blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
23abb883b7d6f8d9b303589c1c757b509dc4d37a
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/dependency_manager/dependency_manager/base_config_unittest.py
|
c10d2a78ea965581534418518cd8d245dca249b5
|
[
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 81,060
|
py
|
base_config_unittest.py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=unused-argument
import os
import unittest
from py_utils import cloud_storage
import mock
from pyfakefs import fake_filesystem_unittest
from pyfakefs import fake_filesystem
from pyfakefs import fake_filesystem_glob
import dependency_manager
from dependency_manager import uploader
class BaseConfigCreationAndUpdateUnittests(fake_filesystem_unittest.TestCase):
def setUp(self):
self.addTypeEqualityFunc(uploader.CloudStorageUploader,
uploader.CloudStorageUploader.__eq__)
self.setUpPyfakefs()
self.dependencies = {
'dep1': {'cloud_storage_bucket': 'bucket1',
'cloud_storage_base_folder': 'dependencies_folder',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash11',
'download_path': '../../relative/dep1/path1'},
'plat2': {
'cloud_storage_hash': 'hash12',
'download_path': '../../relative/dep1/path2'}}},
'dep2': {'cloud_storage_bucket': 'bucket2',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash21',
'download_path': '../../relative/dep2/path1'},
'plat2': {
'cloud_storage_hash': 'hash22',
'download_path': '../../relative/dep2/path2'}}}}
self.expected_file_lines = [
# pylint: disable=bad-continuation
'{', '"config_type": "BaseConfig",', '"dependencies": {',
'"dep1": {', '"cloud_storage_base_folder": "dependencies_folder",',
'"cloud_storage_bucket": "bucket1",', '"file_info": {',
'"plat1": {', '"cloud_storage_hash": "hash11",',
'"download_path": "../../relative/dep1/path1"', '},',
'"plat2": {', '"cloud_storage_hash": "hash12",',
'"download_path": "../../relative/dep1/path2"', '}', '}', '},',
'"dep2": {', '"cloud_storage_bucket": "bucket2",', '"file_info": {',
'"plat1": {', '"cloud_storage_hash": "hash21",',
'"download_path": "../../relative/dep2/path1"', '},',
'"plat2": {', '"cloud_storage_hash": "hash22",',
'"download_path": "../../relative/dep2/path2"', '}', '}', '}',
'}', '}']
self.file_path = os.path.abspath(os.path.join(
'path', 'to', 'config', 'file'))
self.new_dep_path = 'path/to/new/dep'
self.fs.CreateFile(self.new_dep_path)
self.new_dep_hash = 'A23B56B7F23E798601F'
self.new_dependencies = {
'dep1': {'cloud_storage_bucket': 'bucket1',
'cloud_storage_base_folder': 'dependencies_folder',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash11',
'download_path': '../../relative/dep1/path1'},
'plat2': {
'cloud_storage_hash': self.new_dep_hash,
'download_path': '../../relative/dep1/path2'}}},
'dep2': {'cloud_storage_bucket': 'bucket2',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash21',
'download_path': '../../relative/dep2/path1'},
'plat2': {
'cloud_storage_hash': 'hash22',
'download_path': '../../relative/dep2/path2'}}}}
self.new_bucket = 'bucket1'
self.new_remote_path = 'dependencies_folder/dep1_%s' % self.new_dep_hash
self.new_pending_upload = uploader.CloudStorageUploader(
self.new_bucket, self.new_remote_path, self.new_dep_path)
self.expected_new_backup_path = '.'.join([self.new_remote_path, 'old'])
self.new_expected_file_lines = [
# pylint: disable=bad-continuation
'{', '"config_type": "BaseConfig",', '"dependencies": {',
'"dep1": {', '"cloud_storage_base_folder": "dependencies_folder",',
'"cloud_storage_bucket": "bucket1",', '"file_info": {',
'"plat1": {', '"cloud_storage_hash": "hash11",',
'"download_path": "../../relative/dep1/path1"', '},',
'"plat2": {', '"cloud_storage_hash": "%s",' % self.new_dep_hash,
'"download_path": "../../relative/dep1/path2"', '}', '}', '},',
'"dep2": {', '"cloud_storage_bucket": "bucket2",', '"file_info": {',
'"plat1": {', '"cloud_storage_hash": "hash21",',
'"download_path": "../../relative/dep2/path1"', '},',
'"plat2": {', '"cloud_storage_hash": "hash22",',
'"download_path": "../../relative/dep2/path2"', '}', '}', '}',
'}', '}']
self.final_dep_path = 'path/to/final/dep'
self.fs.CreateFile(self.final_dep_path)
self.final_dep_hash = 'B34662F23B56B7F98601F'
self.final_bucket = 'bucket2'
self.final_remote_path = 'dep1_%s' % self.final_dep_hash
self.final_pending_upload = uploader.CloudStorageUploader(
self.final_bucket, self.final_remote_path, self.final_dep_path)
self.expected_final_backup_path = '.'.join([self.final_remote_path,
'old'])
self.final_dependencies = {
'dep1': {'cloud_storage_bucket': 'bucket1',
'cloud_storage_base_folder': 'dependencies_folder',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash11',
'download_path': '../../relative/dep1/path1'},
'plat2': {
'cloud_storage_hash': self.new_dep_hash,
'download_path': '../../relative/dep1/path2'}}},
'dep2': {'cloud_storage_bucket': 'bucket2',
'file_info': {
'plat1': {
'cloud_storage_hash': self.final_dep_hash,
'download_path': '../../relative/dep2/path1'},
'plat2': {
'cloud_storage_hash': 'hash22',
'download_path': '../../relative/dep2/path2'}}}}
self.final_expected_file_lines = [
# pylint: disable=bad-continuation
'{', '"config_type": "BaseConfig",', '"dependencies": {',
'"dep1": {', '"cloud_storage_base_folder": "dependencies_folder",',
'"cloud_storage_bucket": "bucket1",', '"file_info": {',
'"plat1": {', '"cloud_storage_hash": "hash11",',
'"download_path": "../../relative/dep1/path1"', '},',
'"plat2": {', '"cloud_storage_hash": "%s",' % self.new_dep_hash,
'"download_path": "../../relative/dep1/path2"', '}', '}', '},',
'"dep2": {', '"cloud_storage_bucket": "bucket2",', '"file_info": {',
'"plat1": {', '"cloud_storage_hash": "%s",' % self.final_dep_hash,
'"download_path": "../../relative/dep2/path1"', '},',
'"plat2": {', '"cloud_storage_hash": "hash22",',
'"download_path": "../../relative/dep2/path2"', '}', '}', '}',
'}', '}']
def tearDown(self):
self.tearDownPyfakefs()
# Init is not meant to be overridden, so we should be mocking the
# base_config's json module, even in subclasses.
def testCreateEmptyConfig(self):
expected_file_lines = ['{',
'"config_type": "BaseConfig",',
'"dependencies": {}',
'}']
config = dependency_manager.BaseConfig(self.file_path, writable=True)
file_module = fake_filesystem.FakeFileOpen(self.fs)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual({}, config._config_data)
self.assertEqual(self.file_path, config._config_path)
def testCreateEmptyConfigError(self):
self.assertRaises(dependency_manager.EmptyConfigError,
dependency_manager.BaseConfig, self.file_path)
def testCloudStorageRemotePath(self):
dependency = 'dep_name'
cs_hash = self.new_dep_hash
cs_base_folder = 'dependency_remote_folder'
expected_remote_path = '%s/%s_%s' % (cs_base_folder, dependency, cs_hash)
remote_path = dependency_manager.BaseConfig._CloudStorageRemotePath(
dependency, cs_hash, cs_base_folder)
self.assertEqual(expected_remote_path, remote_path)
cs_base_folder = 'dependency_remote_folder'
expected_remote_path = '%s_%s' % (dependency, cs_hash)
remote_path = dependency_manager.BaseConfig._CloudStorageRemotePath(
dependency, cs_hash, cs_base_folder)
def testGetEmptyJsonDict(self):
expected_json_dict = {'config_type': 'BaseConfig',
'dependencies': {}}
json_dict = dependency_manager.BaseConfig._GetJsonDict()
self.assertEqual(expected_json_dict, json_dict)
def testGetNonEmptyJsonDict(self):
expected_json_dict = {"config_type": "BaseConfig",
"dependencies": self.dependencies}
json_dict = dependency_manager.BaseConfig._GetJsonDict(self.dependencies)
self.assertEqual(expected_json_dict, json_dict)
def testWriteEmptyConfigToFile(self):
expected_file_lines = ['{', '"config_type": "BaseConfig",',
'"dependencies": {}', '}']
self.assertFalse(os.path.exists(self.file_path))
dependency_manager.BaseConfig._WriteConfigToFile(self.file_path)
self.assertTrue(os.path.exists(self.file_path))
file_module = fake_filesystem.FakeFileOpen(self.fs)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
def testWriteNonEmptyConfigToFile(self):
self.assertFalse(os.path.exists(self.file_path))
dependency_manager.BaseConfig._WriteConfigToFile(self.file_path,
self.dependencies)
self.assertTrue(os.path.exists(self.file_path))
expected_file_lines = list(self.expected_file_lines)
file_module = fake_filesystem.FakeFileOpen(self.fs)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsNoOp(self, uploader_cs_mock):
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertFalse(config.ExecuteUpdateJobs())
self.assertFalse(config._IsDirty())
self.assertFalse(config._pending_uploads)
self.assertEqual(self.dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnInsertNoCSCollision(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = False
uploader_cs_mock.Insert.side_effect = cloud_storage.CloudStorageError
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path)]
expected_copy_calls = []
expected_delete_calls = []
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs)
self.assertTrue(config._is_dirty)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnInsertCSCollisionForce(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = True
uploader_cs_mock.Insert.side_effect = cloud_storage.CloudStorageError
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path)]
expected_copy_calls = [mock.call(self.new_bucket, self.new_bucket,
self.new_remote_path,
self.expected_new_backup_path),
mock.call(self.new_bucket, self.new_bucket,
self.expected_new_backup_path,
self.new_remote_path)]
expected_delete_calls = []
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs, force=True)
self.assertTrue(config._is_dirty)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnInsertCSCollisionNoForce(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = True
uploader_cs_mock.Insert.side_effect = cloud_storage.CloudStorageError
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
expected_insert_calls = []
expected_copy_calls = []
expected_delete_calls = []
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs)
self.assertTrue(config._is_dirty)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnCopy(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = True
uploader_cs_mock.Copy.side_effect = cloud_storage.CloudStorageError
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
expected_insert_calls = []
expected_copy_calls = [mock.call(self.new_bucket, self.new_bucket,
self.new_remote_path,
self.expected_new_backup_path)]
expected_delete_calls = []
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs, force=True)
self.assertTrue(config._is_dirty)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnSecondInsertNoCSCollision(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = False
uploader_cs_mock.Insert.side_effect = [
True, cloud_storage.CloudStorageError]
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload,
self.final_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
mock.call(self.final_bucket,
self.final_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path),
mock.call(self.final_bucket,
self.final_remote_path,
self.final_dep_path)]
expected_copy_calls = []
expected_delete_calls = [mock.call(self.new_bucket, self.new_remote_path)]
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnSecondInsertCSCollisionForce(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = True
uploader_cs_mock.Insert.side_effect = [
True, cloud_storage.CloudStorageError]
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload,
self.final_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
mock.call(self.final_bucket,
self.final_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path),
mock.call(self.final_bucket,
self.final_remote_path,
self.final_dep_path)]
expected_copy_calls = [mock.call(self.new_bucket, self.new_bucket,
self.new_remote_path,
self.expected_new_backup_path),
mock.call(self.final_bucket, self.final_bucket,
self.final_remote_path,
self.expected_final_backup_path),
mock.call(self.final_bucket, self.final_bucket,
self.expected_final_backup_path,
self.final_remote_path),
mock.call(self.new_bucket, self.new_bucket,
self.expected_new_backup_path,
self.new_remote_path)]
expected_delete_calls = []
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs, force=True)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnSecondInsertFirstCSCollisionForce(
self, uploader_cs_mock):
uploader_cs_mock.Exists.side_effect = [True, False, True]
uploader_cs_mock.Insert.side_effect = [
True, cloud_storage.CloudStorageError]
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload,
self.final_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
mock.call(self.final_bucket,
self.final_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path),
mock.call(self.final_bucket,
self.final_remote_path,
self.final_dep_path)]
expected_copy_calls = [mock.call(self.new_bucket, self.new_bucket,
self.new_remote_path,
self.expected_new_backup_path),
mock.call(self.new_bucket, self.new_bucket,
self.expected_new_backup_path,
self.new_remote_path)]
expected_delete_calls = []
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs, force=True)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnFirstCSCollisionNoForce(
self, uploader_cs_mock):
uploader_cs_mock.Exists.side_effect = [True, False, True]
uploader_cs_mock.Insert.side_effect = [
True, cloud_storage.CloudStorageError]
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload,
self.final_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
expected_insert_calls = []
expected_copy_calls = []
expected_delete_calls = []
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnSecondCopyCSCollision(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = True
uploader_cs_mock.Insert.return_value = True
uploader_cs_mock.Copy.side_effect = [
True, cloud_storage.CloudStorageError, True]
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload,
self.final_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
mock.call(self.final_bucket,
self.final_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path)]
expected_copy_calls = [mock.call(self.new_bucket, self.new_bucket,
self.new_remote_path,
self.expected_new_backup_path),
mock.call(self.final_bucket, self.final_bucket,
self.final_remote_path,
self.expected_final_backup_path),
mock.call(self.new_bucket, self.new_bucket,
self.expected_new_backup_path,
self.new_remote_path)]
expected_delete_calls = []
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs, force=True)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnSecondCopyNoCSCollisionForce(
self, uploader_cs_mock):
uploader_cs_mock.Exists.side_effect = [False, True, False]
uploader_cs_mock.Copy.side_effect = cloud_storage.CloudStorageError
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload,
self.final_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
mock.call(self.final_bucket,
self.final_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path)]
expected_copy_calls = [mock.call(self.final_bucket, self.final_bucket,
self.final_remote_path,
self.expected_final_backup_path)]
expected_delete_calls = [mock.call(self.new_bucket, self.new_remote_path)]
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs, force=True)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnSecondCopyNoCSCollisionNoForce(
self, uploader_cs_mock):
uploader_cs_mock.Exists.side_effect = [False, True, False]
uploader_cs_mock.Copy.side_effect = cloud_storage.CloudStorageError
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload,
self.final_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
mock.call(self.final_bucket,
self.final_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path)]
expected_copy_calls = []
expected_delete_calls = [mock.call(self.new_bucket, self.new_remote_path)]
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsSuccessOnePendingDepNoCloudStorageCollision(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = False
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._pending_uploads = [self.new_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._IsDirty())
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path)]
expected_copy_calls = []
expected_delete_calls = []
self.assertTrue(config.ExecuteUpdateJobs())
self.assertFalse(config._IsDirty())
self.assertFalse(config._pending_uploads)
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.new_expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertFalse(config._pending_uploads)
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsSuccessOnePendingDepCloudStorageCollision(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = True
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._pending_uploads = [self.new_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._IsDirty())
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path)]
expected_copy_calls = [mock.call(self.new_bucket, self.new_bucket,
self.new_remote_path,
self.expected_new_backup_path)]
self.assertTrue(config.ExecuteUpdateJobs(force=True))
self.assertFalse(config._IsDirty())
self.assertFalse(config._pending_uploads)
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.new_expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertFalse(config._pending_uploads)
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsErrorOnePendingDepCloudStorageCollisionNoForce(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = True
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
expected_insert_calls = []
expected_copy_calls = []
self.assertRaises(dependency_manager.CloudStorageUploadConflictError,
config.ExecuteUpdateJobs)
self.assertTrue(config._is_dirty)
self.assertTrue(config._pending_uploads)
self.assertEqual(self.new_dependencies, config._config_data)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsSuccessMultiplePendingDepsOneCloudStorageCollision(
self, uploader_cs_mock):
uploader_cs_mock.Exists.side_effect = [False, True]
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.final_dependencies.copy()
config._pending_uploads = [self.new_pending_upload,
self.final_pending_upload]
self.assertEqual(self.final_dependencies, config._config_data)
self.assertTrue(config._IsDirty())
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
mock.call(self.final_bucket,
self.final_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path),
mock.call(self.final_bucket,
self.final_remote_path,
self.final_dep_path)]
expected_copy_calls = [mock.call(self.final_bucket, self.final_bucket,
self.final_remote_path,
self.expected_final_backup_path)]
self.assertTrue(config.ExecuteUpdateJobs(force=True))
self.assertFalse(config._IsDirty())
self.assertFalse(config._pending_uploads)
self.assertEqual(self.final_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.final_expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertFalse(config._pending_uploads)
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testUpdateCloudStorageDependenciesReadOnlyConfig(
self, uploader_cs_mock):
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path)
with self.assertRaises(dependency_manager.ReadWriteError):
config.AddCloudStorageDependencyUpdateJob(
'dep', 'plat', 'path')
with self.assertRaises(dependency_manager.ReadWriteError):
config.AddCloudStorageDependencyUpdateJob(
'dep', 'plat', 'path', version='1.2.3')
with self.assertRaises(dependency_manager.ReadWriteError):
config.AddCloudStorageDependencyUpdateJob(
'dep', 'plat', 'path', execute_job=False)
with self.assertRaises(dependency_manager.ReadWriteError):
config.AddCloudStorageDependencyUpdateJob(
'dep', 'plat', 'path', version='1.2.3', execute_job=False)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testUpdateCloudStorageDependenciesMissingDependency(
self, uploader_cs_mock):
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertRaises(ValueError, config.AddCloudStorageDependencyUpdateJob,
'dep', 'plat', 'path')
self.assertRaises(ValueError, config.AddCloudStorageDependencyUpdateJob,
'dep', 'plat', 'path', version='1.2.3')
self.assertRaises(ValueError, config.AddCloudStorageDependencyUpdateJob,
'dep', 'plat', 'path', execute_job=False)
self.assertRaises(ValueError, config.AddCloudStorageDependencyUpdateJob,
'dep', 'plat', 'path', version='1.2.3', execute_job=False)
@mock.patch('dependency_manager.uploader.cloud_storage')
@mock.patch('dependency_manager.base_config.cloud_storage')
def testUpdateCloudStorageDependenciesWrite(
self, base_config_cs_mock, uploader_cs_mock):
expected_dependencies = self.dependencies
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertFalse(config._IsDirty())
self.assertEqual(expected_dependencies, config._config_data)
base_config_cs_mock.CalculateHash.return_value = self.new_dep_hash
uploader_cs_mock.Exists.return_value = False
expected_dependencies = self.new_dependencies
config.AddCloudStorageDependencyUpdateJob(
'dep1', 'plat2', self.new_dep_path, execute_job=True)
self.assertFalse(config._IsDirty())
self.assertFalse(config._pending_uploads)
self.assertEqual(expected_dependencies, config._config_data)
# check that file contents has been updated
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.new_expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
expected_dependencies = self.final_dependencies
base_config_cs_mock.CalculateHash.return_value = self.final_dep_hash
config.AddCloudStorageDependencyUpdateJob(
'dep2', 'plat1', self.final_dep_path, execute_job=True)
self.assertFalse(config._IsDirty())
self.assertFalse(config._pending_uploads)
self.assertEqual(expected_dependencies, config._config_data)
# check that file contents has been updated
expected_file_lines = list(self.final_expected_file_lines)
file_module = fake_filesystem.FakeFileOpen(self.fs)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
@mock.patch('dependency_manager.uploader.cloud_storage')
@mock.patch('dependency_manager.base_config.cloud_storage')
def testUpdateCloudStorageDependenciesNoWrite(
self, base_config_cs_mock, uploader_cs_mock):
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertRaises(ValueError, config.AddCloudStorageDependencyUpdateJob,
'dep', 'plat', 'path')
self.assertRaises(ValueError, config.AddCloudStorageDependencyUpdateJob,
'dep', 'plat', 'path', version='1.2.3')
expected_dependencies = self.dependencies
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertFalse(config._IsDirty())
self.assertFalse(config._pending_uploads)
self.assertEqual(expected_dependencies, config._config_data)
base_config_cs_mock.CalculateHash.return_value = self.new_dep_hash
uploader_cs_mock.Exists.return_value = False
expected_dependencies = self.new_dependencies
config.AddCloudStorageDependencyUpdateJob(
'dep1', 'plat2', self.new_dep_path, execute_job=False)
self.assertTrue(config._IsDirty())
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(expected_dependencies, config._config_data)
# check that file contents have not been updated.
expected_file_lines = list(self.expected_file_lines)
file_module = fake_filesystem.FakeFileOpen(self.fs)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
expected_dependencies = self.final_dependencies
base_config_cs_mock.CalculateHash.return_value = self.final_dep_hash
config.AddCloudStorageDependencyUpdateJob(
'dep2', 'plat1', self.final_dep_path, execute_job=False)
self.assertTrue(config._IsDirty())
self.assertEqual(expected_dependencies, config._config_data)
# check that file contents have not been updated.
expected_file_lines = list(self.expected_file_lines)
file_module = fake_filesystem.FakeFileOpen(self.fs)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
class BaseConfigDataManipulationUnittests(fake_filesystem_unittest.TestCase):
def setUp(self):
self.addTypeEqualityFunc(uploader.CloudStorageUploader,
uploader.CloudStorageUploader.__eq__)
self.setUpPyfakefs()
self.cs_bucket = 'bucket1'
self.cs_base_folder = 'dependencies_folder'
self.cs_hash = 'hash12'
self.download_path = '../../relative/dep1/path2'
self.local_paths = ['../../../relative/local/path21',
'../../../relative/local/path22']
self.platform_dict = {'cloud_storage_hash': self.cs_hash,
'download_path': self.download_path,
'local_paths': self.local_paths}
self.dependencies = {
'dep1': {
'cloud_storage_bucket': self.cs_bucket,
'cloud_storage_base_folder': self.cs_base_folder,
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash11',
'download_path': '../../relative/dep1/path1',
'local_paths': ['../../../relative/local/path11',
'../../../relative/local/path12']},
'plat2': self.platform_dict
}
},
'dep2': {
'cloud_storage_bucket': 'bucket2',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash21',
'download_path': '../../relative/dep2/path1',
'local_paths': ['../../../relative/local/path31',
'../../../relative/local/path32']},
'plat2': {
'cloud_storage_hash': 'hash22',
'download_path': '../../relative/dep2/path2'}}}}
self.file_path = os.path.abspath(os.path.join(
'path', 'to', 'config', 'file'))
self.expected_file_lines = [
# pylint: disable=bad-continuation
'{', '"config_type": "BaseConfig",', '"dependencies": {',
'"dep1": {', '"cloud_storage_base_folder": "dependencies_folder",',
'"cloud_storage_bucket": "bucket1",', '"file_info": {',
'"plat1": {', '"cloud_storage_hash": "hash11",',
'"download_path": "../../relative/dep1/path1",',
'"local_paths": [', '"../../../relative/local/path11",',
'"../../../relative/local/path12"', ']', '},',
'"plat2": {', '"cloud_storage_hash": "hash12",',
'"download_path": "../../relative/dep1/path2",',
'"local_paths": [', '"../../../relative/local/path21",',
'"../../../relative/local/path22"', ']',
'}', '}', '},',
'"dep2": {', '"cloud_storage_bucket": "bucket2",', '"file_info": {',
'"plat1": {', '"cloud_storage_hash": "hash21",',
'"download_path": "../../relative/dep2/path1",',
'"local_paths": [', '"../../../relative/local/path31",',
'"../../../relative/local/path32"', ']', '},',
'"plat2": {', '"cloud_storage_hash": "hash22",',
'"download_path": "../../relative/dep2/path2"', '}', '}', '}',
'}', '}']
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
def testContaining(self):
config = dependency_manager.BaseConfig(self.file_path)
self.assertTrue('dep1' in config)
self.assertTrue('dep2' in config)
self.assertFalse('dep3' in config)
def testAddNewDependencyNotWriteable(self):
config = dependency_manager.BaseConfig(self.file_path)
with self.assertRaises(dependency_manager.ReadWriteError):
config.AddNewDependency('dep4', 'foo', 'bar')
def testAddNewDependencyWriteableButDependencyAlreadyExists(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
with self.assertRaises(ValueError):
config.AddNewDependency('dep2', 'foo', 'bar')
def testAddNewDependencySuccessfully(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config.AddNewDependency('dep3', 'foo', 'bar')
self.assertTrue('dep3' in config)
def testSetDownloadPathNotWritable(self):
config = dependency_manager.BaseConfig(self.file_path)
with self.assertRaises(dependency_manager.ReadWriteError):
config.SetDownloadPath('dep2', 'plat1', '../../relative/dep1/path1')
def testSetDownloadPathOnExistingPlatformSuccesfully(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
download_path = '../../relative/dep1/foo.bar'
config.SetDownloadPath('dep2', 'plat1', download_path)
self.assertEqual(
download_path,
config._GetPlatformData('dep2', 'plat1', 'download_path'))
def testSetDownloadPathOnNewPlatformSuccesfully(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
download_path = '../../relative/dep1/foo.bar'
config.SetDownloadPath('dep2', 'newplat', download_path)
self.assertEqual(
download_path,
config._GetPlatformData('dep2', 'newplat', 'download_path'))
def testSetPlatformDataFailureNotWritable(self):
config = dependency_manager.BaseConfig(self.file_path)
self.assertRaises(
dependency_manager.ReadWriteError, config._SetPlatformData,
'dep1', 'plat1', 'cloud_storage_bucket', 'new_bucket')
self.assertEqual(self.dependencies, config._config_data)
def testSetPlatformDataFailure(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertRaises(ValueError, config._SetPlatformData, 'missing_dep',
'plat2', 'cloud_storage_bucket', 'new_bucket')
self.assertEqual(self.dependencies, config._config_data)
self.assertRaises(ValueError, config._SetPlatformData, 'dep1',
'missing_plat', 'cloud_storage_bucket', 'new_bucket')
self.assertEqual(self.dependencies, config._config_data)
def testSetPlatformDataCloudStorageBucketSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
updated_cs_dependencies = {
'dep1': {'cloud_storage_bucket': 'new_bucket',
'cloud_storage_base_folder': 'dependencies_folder',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash11',
'download_path': '../../relative/dep1/path1',
'local_paths': ['../../../relative/local/path11',
'../../../relative/local/path12']},
'plat2': {
'cloud_storage_hash': 'hash12',
'download_path': '../../relative/dep1/path2',
'local_paths': ['../../../relative/local/path21',
'../../../relative/local/path22']}}},
'dep2': {'cloud_storage_bucket': 'bucket2',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash21',
'download_path': '../../relative/dep2/path1',
'local_paths': ['../../../relative/local/path31',
'../../../relative/local/path32']},
'plat2': {
'cloud_storage_hash': 'hash22',
'download_path': '../../relative/dep2/path2'}}}}
config._SetPlatformData('dep1', 'plat2', 'cloud_storage_bucket',
'new_bucket')
self.assertEqual(updated_cs_dependencies, config._config_data)
def testSetPlatformDataCloudStorageBaseFolderSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
updated_cs_dependencies = {
'dep1': {'cloud_storage_bucket': 'bucket1',
'cloud_storage_base_folder': 'new_dependencies_folder',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash11',
'download_path': '../../relative/dep1/path1',
'local_paths': ['../../../relative/local/path11',
'../../../relative/local/path12']},
'plat2': {
'cloud_storage_hash': 'hash12',
'download_path': '../../relative/dep1/path2',
'local_paths': ['../../../relative/local/path21',
'../../../relative/local/path22']}}},
'dep2': {'cloud_storage_bucket': 'bucket2',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash21',
'download_path': '../../relative/dep2/path1',
'local_paths': ['../../../relative/local/path31',
'../../../relative/local/path32']},
'plat2': {
'cloud_storage_hash': 'hash22',
'download_path': '../../relative/dep2/path2'}}}}
config._SetPlatformData('dep1', 'plat2', 'cloud_storage_base_folder',
'new_dependencies_folder')
self.assertEqual(updated_cs_dependencies, config._config_data)
def testSetPlatformDataHashSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
updated_cs_dependencies = {
'dep1': {'cloud_storage_bucket': 'bucket1',
'cloud_storage_base_folder': 'dependencies_folder',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash11',
'download_path': '../../relative/dep1/path1',
'local_paths': ['../../../relative/local/path11',
'../../../relative/local/path12']},
'plat2': {
'cloud_storage_hash': 'new_hash',
'download_path': '../../relative/dep1/path2',
'local_paths': ['../../../relative/local/path21',
'../../../relative/local/path22']}}},
'dep2': {'cloud_storage_bucket': 'bucket2',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash21',
'download_path': '../../relative/dep2/path1',
'local_paths': ['../../../relative/local/path31',
'../../../relative/local/path32']},
'plat2': {
'cloud_storage_hash': 'hash22',
'download_path': '../../relative/dep2/path2'}}}}
config._SetPlatformData('dep1', 'plat2', 'cloud_storage_hash',
'new_hash')
self.assertEqual(updated_cs_dependencies, config._config_data)
def testSetPlatformDataDownloadPathSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
updated_cs_dependencies = {
'dep1': {'cloud_storage_bucket': 'bucket1',
'cloud_storage_base_folder': 'dependencies_folder',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash11',
'download_path': '../../relative/dep1/path1',
'local_paths': ['../../../relative/local/path11',
'../../../relative/local/path12']},
'plat2': {
'cloud_storage_hash': 'hash12',
'download_path': '../../new/dep1/path2',
'local_paths': ['../../../relative/local/path21',
'../../../relative/local/path22']}}},
'dep2': {'cloud_storage_bucket': 'bucket2',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash21',
'download_path': '../../relative/dep2/path1',
'local_paths': ['../../../relative/local/path31',
'../../../relative/local/path32']},
'plat2': {
'cloud_storage_hash': 'hash22',
'download_path': '../../relative/dep2/path2'}}}}
config._SetPlatformData('dep1', 'plat2', 'download_path',
'../../new/dep1/path2')
self.assertEqual(updated_cs_dependencies, config._config_data)
def testSetPlatformDataLocalPathSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
updated_cs_dependencies = {
'dep1': {'cloud_storage_bucket': 'bucket1',
'cloud_storage_base_folder': 'dependencies_folder',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash11',
'download_path': '../../relative/dep1/path1',
'local_paths': ['../../../relative/local/path11',
'../../../relative/local/path12']},
'plat2': {
'cloud_storage_hash': 'hash12',
'download_path': '../../relative/dep1/path2',
'local_paths': ['../../new/relative/local/path21',
'../../new/relative/local/path22']}}},
'dep2': {'cloud_storage_bucket': 'bucket2',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash21',
'download_path': '../../relative/dep2/path1',
'local_paths': ['../../../relative/local/path31',
'../../../relative/local/path32']},
'plat2': {
'cloud_storage_hash': 'hash22',
'download_path': '../../relative/dep2/path2'}}}}
config._SetPlatformData('dep1', 'plat2', 'local_paths',
['../../new/relative/local/path21',
'../../new/relative/local/path22'])
self.assertEqual(updated_cs_dependencies, config._config_data)
def testGetPlatformDataFailure(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertRaises(ValueError, config._GetPlatformData, 'missing_dep',
'plat2', 'cloud_storage_bucket')
self.assertEqual(self.dependencies, config._config_data)
self.assertRaises(ValueError, config._GetPlatformData, 'dep1',
'missing_plat', 'cloud_storage_bucket')
self.assertEqual(self.dependencies, config._config_data)
def testGetPlatformDataDictSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertEqual(self.platform_dict,
config._GetPlatformData('dep1', 'plat2'))
self.assertEqual(self.dependencies, config._config_data)
def testGetPlatformDataCloudStorageBucketSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertEqual(self.cs_bucket, config._GetPlatformData(
'dep1', 'plat2', 'cloud_storage_bucket'))
self.assertEqual(self.dependencies, config._config_data)
def testGetPlatformDataCloudStorageBaseFolderSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertEqual(self.cs_base_folder, config._GetPlatformData(
'dep1', 'plat2', 'cloud_storage_base_folder'))
self.assertEqual(self.dependencies, config._config_data)
def testGetPlatformDataHashSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertEqual(self.cs_hash, config._GetPlatformData(
'dep1', 'plat2', 'cloud_storage_hash'))
self.assertEqual(self.dependencies, config._config_data)
def testGetPlatformDataDownloadPathSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertEqual(self.download_path, config._GetPlatformData(
'dep1', 'plat2', 'download_path'))
self.assertEqual(self.dependencies, config._config_data)
def testGetPlatformDataLocalPathSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertEqual(self.local_paths, config._GetPlatformData(
'dep1', 'plat2', 'local_paths'))
self.assertEqual(self.dependencies, config._config_data)
class BaseConfigTest(unittest.TestCase):
""" Subclassable unittests for BaseConfig.
For subclasses: override setUp, GetConfigDataFromDict,
and EndToEndExpectedConfigData as needed.
setUp must set the following properties:
self.config_type: String returnedd from GetConfigType in config subclass.
self.config_class: the class for the config subclass.
self.config_module: importable module for the config subclass.
self.empty_dict: expected dictionary for an empty config, as it would be
stored in a json file.
self.one_dep_dict: example dictionary for a config with one dependency,
as it would be stored in a json file.
"""
def setUp(self):
self.config_type = 'BaseConfig'
self.config_class = dependency_manager.BaseConfig
self.config_module = 'dependency_manager.base_config'
self.empty_dict = {'config_type': self.config_type,
'dependencies': {}}
dependency_dict = {
'dep': {
'cloud_storage_base_folder': 'cs_base_folder1',
'cloud_storage_bucket': 'bucket1',
'file_info': {
'plat1_arch1': {
'cloud_storage_hash': 'hash111',
'download_path': 'download_path111',
'cs_remote_path': 'cs_path111',
'version_in_cs': 'version_111',
'local_paths': ['local_path1110', 'local_path1111']
},
'plat1_arch2': {
'cloud_storage_hash': 'hash112',
'download_path': 'download_path112',
'cs_remote_path': 'cs_path112',
'local_paths': ['local_path1120', 'local_path1121']
},
'win_arch1': {
'cloud_storage_hash': 'hash1w1',
'download_path': 'download\\path\\1w1',
'cs_remote_path': 'cs_path1w1',
'local_paths': ['local\\path\\1w10', 'local\\path\\1w11']
},
'all_the_variables': {
'cloud_storage_hash': 'hash111',
'download_path': 'download_path111',
'cs_remote_path': 'cs_path111',
'version_in_cs': 'version_111',
'path_within_archive': 'path/within/archive',
'local_paths': ['local_path1110', 'local_path1111']
}
}
}
}
self.one_dep_dict = {'config_type': self.config_type,
'dependencies': dependency_dict}
def GetConfigDataFromDict(self, config_dict):
return config_dict.get('dependencies', {})
@mock.patch('os.path')
@mock.patch('__builtin__.open')
def testInitBaseProperties(self, open_mock, path_mock):
# Init is not meant to be overridden, so we should be mocking the
# base_config's json module, even in subclasses.
json_module = 'dependency_manager.base_config.json'
with mock.patch(json_module) as json_mock:
json_mock.load.return_value = self.empty_dict.copy()
config = self.config_class('file_path')
self.assertEqual('file_path', config._config_path)
self.assertEqual(self.config_type, config.GetConfigType())
self.assertEqual(self.GetConfigDataFromDict(self.empty_dict),
config._config_data)
@mock.patch('dependency_manager.dependency_info.DependencyInfo')
@mock.patch('os.path')
@mock.patch('__builtin__.open')
def testInitWithDependencies(self, open_mock, path_mock, dep_info_mock):
# Init is not meant to be overridden, so we should be mocking the
# base_config's json module, even in subclasses.
json_module = 'dependency_manager.base_config.json'
with mock.patch(json_module) as json_mock:
json_mock.load.return_value = self.one_dep_dict
config = self.config_class('file_path')
self.assertEqual('file_path', config._config_path)
self.assertEqual(self.config_type, config.GetConfigType())
self.assertEqual(self.GetConfigDataFromDict(self.one_dep_dict),
config._config_data)
def testFormatPath(self):
self.assertEqual(None, self.config_class._FormatPath(None))
self.assertEqual('', self.config_class._FormatPath(''))
self.assertEqual('some_string',
self.config_class._FormatPath('some_string'))
expected_path = os.path.join('some', 'file', 'path')
self.assertEqual(expected_path,
self.config_class._FormatPath('some/file/path'))
self.assertEqual(expected_path,
self.config_class._FormatPath('some\\file\\path'))
@mock.patch('dependency_manager.base_config.json')
@mock.patch('dependency_manager.dependency_info.DependencyInfo')
@mock.patch('os.path.exists')
@mock.patch('__builtin__.open')
def testIterDependenciesError(
self, open_mock, exists_mock, dep_info_mock, json_mock):
# Init is not meant to be overridden, so we should be mocking the
# base_config's json module, even in subclasses.
json_mock.load.return_value = self.one_dep_dict
config = self.config_class('file_path', writable=True)
self.assertEqual(self.GetConfigDataFromDict(self.one_dep_dict),
config._config_data)
self.assertTrue(config._writable)
with self.assertRaises(dependency_manager.ReadWriteError):
for _ in config.IterDependencyInfo():
pass
@mock.patch('dependency_manager.base_config.json')
@mock.patch('dependency_manager.dependency_info.DependencyInfo')
@mock.patch('os.path.exists')
@mock.patch('__builtin__.open')
def testIterDependencies(
self, open_mock, exists_mock, dep_info_mock, json_mock):
json_mock.load.return_value = self.one_dep_dict
config = self.config_class('file_path')
self.assertEqual(self.GetConfigDataFromDict(self.one_dep_dict),
config._config_data)
expected_dep_info = ['dep_info0', 'dep_info1', 'dep_info2']
dep_info_mock.side_effect = expected_dep_info
expected_calls = [
mock.call('dep', 'plat1_arch1', 'file_path', cs_bucket='bucket1',
cs_hash='hash111', download_path='download_path111',
cs_remote_path='cs_path111',
local_paths=['local_path1110', 'local_path1111']),
mock.call('dep', 'plat1_arch1', 'file_path', cs_bucket='bucket1',
cs_hash='hash112', download_path='download_path112',
cs_remote_path='cs_path112',
local_paths=['local_path1120', 'local_path1121']),
mock.call('dep', 'win_arch1', 'file_path', cs_bucket='bucket1',
cs_hash='hash1w1',
download_path=os.path.join('download', 'path', '1w1'),
cs_remote_path='cs_path1w1',
local_paths=[os.path.join('download', 'path', '1w10'),
os.path.join('download', 'path', '1w11')])]
deps_seen = []
for dep_info in config.IterDependencyInfo():
deps_seen.append(dep_info)
dep_info_mock.assert_call_args(expected_calls)
self.assertItemsEqual(expected_dep_info, deps_seen)
@mock.patch('dependency_manager.base_config.json')
@mock.patch('os.path.exists')
@mock.patch('__builtin__.open')
def testIterDependenciesStaleGlob(self, open_mock, exists_mock, json_mock):
json_mock.load.return_value = self.one_dep_dict
config = self.config_class('file_path')
abspath = os.path.abspath
should_match = set(map(abspath, [
'dep_all_the_variables_0123456789abcdef0123456789abcdef01234567',
'dep_all_the_variables_123456789abcdef0123456789abcdef012345678']))
# Not testing case changes, because Windows is case-insensitive.
should_not_match = set(map(abspath, [
# A configuration that doesn't unzip shouldn't clear any stale unzips.
'dep_plat1_arch1_0123456789abcdef0123456789abcdef01234567',
# "Hash" component less than 40 characters (not a valid SHA1 hash).
'dep_all_the_variables_0123456789abcdef0123456789abcdef0123456',
# "Hash" component greater than 40 characters (not a valid SHA1 hash).
'dep_all_the_variables_0123456789abcdef0123456789abcdef012345678',
# "Hash" component not comprised of hex (not a valid SHA1 hash).
'dep_all_the_variables_0123456789gggggg0123456789gggggg01234567']))
# Create a fake filesystem just for glob to use
fake_fs = fake_filesystem.FakeFilesystem()
fake_glob = fake_filesystem_glob.FakeGlobModule(fake_fs)
for stale_dir in set.union(should_match, should_not_match):
fake_fs.CreateDirectory(stale_dir)
fake_fs.CreateFile(os.path.join(stale_dir, 'some_file'))
for dep_info in config.IterDependencyInfo():
if dep_info.platform == 'all_the_variables':
cs_info = dep_info.cloud_storage_info
actual_glob = cs_info._archive_info._stale_unzip_path_glob
actual_matches = set(fake_glob.glob(actual_glob))
self.assertItemsEqual(should_match, actual_matches)
|
cb5ff47407f722bd45df77511392f2e4e67893bc
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/refactoring/rename/epydocRenameParameter_after.py
|
914cc1cff998c6f956cd3c3c3bf78e7ebef151d7
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 120
|
py
|
epydocRenameParameter_after.py
|
def func(bar):
""" \\some comment
@param bar: The parameter value.
@type bar: Its type."""
pass
|
8f21ea31bf765262b783698bdaafa6ace9d818cf
|
5f5960d3041beb541f8abc6cb5c5bfeaf80f420d
|
/0248_Problem_1.py
|
75a3dab754a5fca31e7480ce0887e53d4ebf92c8
|
[] |
no_license
|
codereport/LeetCode
|
6a904ba915d9a0e104f9b2e0556592d9b733fcec
|
2a3d5ea3438d696690d9a30f1f3f81403128f983
|
refs/heads/master
| 2023-09-01T05:59:30.763405
| 2023-08-24T23:45:20
| 2023-08-24T23:45:20
| 120,723,684
| 125
| 46
| null | 2023-03-21T17:25:50
| 2018-02-08T06:59:28
|
C++
|
UTF-8
|
Python
| false
| false
| 191
|
py
|
0248_Problem_1.py
|
# Problem Link: https://leetcode.com/contest/weekly-contest-248/problems/build-array-from-permutation/
def buildArray(self, nums: List[int]) -> List[int]:
return [nums[i] for i in nums]
|
8f8d9013e57b2792038d1d10ab2b85a5058d07f8
|
2bc18a13c4a65b4005741b979f2cb0193c1e1a01
|
/test/suite/out/W39.py
|
554814c47839f32df7413729bd66ffe72cb950df
|
[
"MIT"
] |
permissive
|
hhatto/autopep8
|
b0b9daf78050d981c4355f096418b9283fc20a0f
|
4e869ad63a11575267450bfefdf022bb6128ab93
|
refs/heads/main
| 2023-09-01T05:14:18.553939
| 2023-08-27T14:12:45
| 2023-08-27T14:12:45
| 1,206,729
| 3,966
| 329
|
MIT
| 2023-08-27T14:12:46
| 2010-12-29T20:08:51
|
Python
|
UTF-8
|
Python
| false
| false
| 144
|
py
|
W39.py
|
#: W391
# The next line is blank
#: Okay
'''there is nothing wrong
with a multiline string at EOF
that happens to have a blank line in it
'''
|
8e4a27aca6401adddca686c97d5a5ff2d8500194
|
217a3f7fd7ae0d80ab8e04c0e4dce0c16a7cee3f
|
/tools/train_source.py
|
c9a967eec48b8f4c1d17493ab2ead0f1aab2e73e
|
[
"MIT"
] |
permissive
|
ZJULearning/MaxSquareLoss
|
5e1d29dee83158e78f7a294479be734a53cd78dc
|
2fbc18fc4ee0a618bfb6a9c0e1318261c80541db
|
refs/heads/master
| 2022-11-30T16:25:36.343504
| 2021-12-31T10:41:54
| 2021-12-31T10:41:54
| 181,312,374
| 113
| 28
|
MIT
| 2022-11-22T08:49:43
| 2019-04-14T13:23:27
|
Python
|
UTF-8
|
Python
| false
| false
| 31,659
|
py
|
train_source.py
|
import os
import random
import logging
import argparse
import torch
import torch.nn as nn
import torch.utils.data as data
import torch.nn.functional as F
from tqdm import tqdm
from math import ceil
import numpy as np
from distutils.version import LooseVersion
from tensorboardX import SummaryWriter
from torchvision.utils import make_grid
import sys
sys.path.append(os.path.abspath('.'))
from utils.eval import Eval
from utils.train_helper import get_model
from datasets.cityscapes_Dataset import City_Dataset, City_DataLoader, inv_preprocess, decode_labels
from datasets.gta5_Dataset import GTA5_DataLoader
from datasets.synthia_Dataset import SYNTHIA_DataLoader
datasets_path={
'cityscapes': {'data_root_path': './datasets/Cityscapes', 'list_path': './datasets/city_list',
'image_path':'./datasets/Cityscapes/leftImg8bit',
'gt_path': './datasets/Cityscapes/gtFine'},
'gta5': {'data_root_path': './datasets/GTA5', 'list_path': './datasets/GTA5/list',
'image_path':'./datasets/GTA5/images',
'gt_path': './datasets/GTA5/labels'},
'synthia': {'data_root_path': './datasets/SYNTHIA', 'list_path': './datasets/SYNTHIA/list',
'image_path':'./datasets/SYNTHIA/RGB',
'gt_path': './datasets/SYNTHIA/GT/LABELS'},
'NTHU': {'data_root_path': './datasets/NTHU_Datasets', 'list_path': './datasets/NTHU_list'}
}
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
ITER_MAX = 5000
class Trainer():
def __init__(self, args, cuda=None, train_id="None", logger=None):
self.args = args
os.environ["CUDA_VISIBLE_DEVICES"] = self.args.gpu
self.cuda = cuda and torch.cuda.is_available()
self.device = torch.device('cuda' if self.cuda else 'cpu')
self.train_id = train_id
self.logger = logger
self.current_MIoU = 0
self.best_MIou = 0
self.best_source_MIou = 0
self.current_epoch = 0
self.current_iter = 0
self.second_best_MIou = 0
# set TensorboardX
self.writer = SummaryWriter(self.args.checkpoint_dir)
# Metric definition
self.Eval = Eval(self.args.num_classes)
# loss definition
self.loss = nn.CrossEntropyLoss(weight=None, ignore_index= -1)
self.loss.to(self.device)
# model
self.model, params = get_model(self.args)
self.model = nn.DataParallel(self.model, device_ids=[0])
self.model.to(self.device)
if self.args.optim == "SGD":
self.optimizer = torch.optim.SGD(
params=params,
momentum=self.args.momentum,
weight_decay=self.args.weight_decay
)
elif self.args.optim == "Adam":
self.optimizer = torch.optim.Adam(params, betas=(0.9, 0.99), weight_decay=self.args.weight_decay)
# dataloader
if self.args.dataset=="cityscapes":
self.dataloader = City_DataLoader(self.args)
elif self.args.dataset=="gta5":
self.dataloader = GTA5_DataLoader(self.args)
else:
self.dataloader = SYNTHIA_DataLoader(self.args)
self.dataloader.num_iterations = min(self.dataloader.num_iterations, ITER_MAX)
print(self.args.iter_max, self.dataloader.num_iterations)
self.epoch_num = ceil(self.args.iter_max / self.dataloader.num_iterations) if self.args.iter_stop is None else \
ceil(self.args.iter_stop / self.dataloader.num_iterations)
def main(self):
# display args details
self.logger.info("Global configuration as follows:")
for key, val in vars(self.args).items():
self.logger.info("{:16} {}".format(key, val))
# choose cuda
if self.cuda:
current_device = torch.cuda.current_device()
self.logger.info("This model will run on {}".format(torch.cuda.get_device_name(current_device)))
else:
self.logger.info("This model will run on CPU")
# load pretrained checkpoint
if self.args.pretrained_ckpt_file is not None:
if os.path.isdir(self.args.pretrained_ckpt_file):
self.args.pretrained_ckpt_file = os.path.join(self.args.checkpoint_dir, self.train_id + 'best.pth')
self.load_checkpoint(self.args.pretrained_ckpt_file)
if self.args.continue_training:
self.load_checkpoint(os.path.join(self.args.checkpoint_dir, self.train_id + 'best.pth'))
self.best_iter = self.current_iter
self.best_source_iter = self.current_iter
else:
self.current_epoch = 0
# train
self.train()
self.writer.close()
def train(self):
# self.validate() # check image summary
for epoch in tqdm(range(self.current_epoch, self.epoch_num),
desc="Total {} epochs".format(self.epoch_num)):
self.train_one_epoch()
# validate
PA, MPA, MIoU, FWIoU = self.validate()
self.writer.add_scalar('PA', PA, self.current_epoch)
self.writer.add_scalar('MPA', MPA, self.current_epoch)
self.writer.add_scalar('MIoU', MIoU, self.current_epoch)
self.writer.add_scalar('FWIoU', FWIoU, self.current_epoch)
self.current_MIoU = MIoU
is_best = MIoU > self.best_MIou
if is_best:
self.best_MIou = MIoU
self.best_iter = self.current_iter
self.logger.info("=>saving a new best checkpoint...")
self.save_checkpoint(self.train_id+'best.pth')
else:
self.logger.info("=> The MIoU of val does't improve.")
self.logger.info("=> The best MIoU of val is {} at {}".format(self.best_MIou, self.best_iter))
self.current_epoch += 1
state = {
'epoch': self.current_epoch + 1,
'iteration': self.current_iter,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_MIou': self.current_MIoU
}
self.logger.info("=>best_MIou {} at {}".format(self.best_MIou, self.best_iter))
self.logger.info("=>saving the final checkpoint to " + os.path.join(self.args.checkpoint_dir, self.train_id+'final.pth'))
self.save_checkpoint(self.train_id+'final.pth')
def train_one_epoch(self):
tqdm_epoch = tqdm(self.dataloader.data_loader, total=self.dataloader.num_iterations,
desc="Train Epoch-{}-total-{}".format(self.current_epoch+1, self.epoch_num))
self.logger.info("Training one epoch...")
self.Eval.reset()
train_loss = []
loss_seg_value_2 = 0
iter_num = self.dataloader.num_iterations
if self.args.freeze_bn:
self.model.eval()
self.logger.info("freeze bacth normalization successfully!")
else:
self.model.train()
# Initialize your average meters
batch_idx = 0
for x, y, _ in tqdm_epoch:
self.poly_lr_scheduler(
optimizer=self.optimizer,
init_lr=self.args.lr,
iter=self.current_iter,
max_iter=self.args.iter_max,
power=self.args.poly_power,
)
if self.args.iter_stop is not None and self.current_iter >= self.args.iter_stop:
self.logger.info("iteration arrive {}(early stop)/{}(total step)!".format(self.args.iter_stop, self.args.iter_max))
break
if self.current_iter >= self.args.iter_max:
self.logger.info("iteration arrive {}!".format(self.args.iter_max))
break
self.writer.add_scalar('learning_rate', self.optimizer.param_groups[0]["lr"], self.current_iter)
if self.cuda:
x, y = x.to(self.device), y.to(device=self.device, dtype=torch.long)
y = torch.squeeze(y, 1)
self.optimizer.zero_grad()
# model
pred = self.model(x)
if isinstance(pred, tuple):
pred_2 = pred[1]
pred = pred[0]
# loss
cur_loss = self.loss(pred, y)
if self.args.multi:
loss_2 = self.args.lambda_seg * self.loss(pred_2, y)
cur_loss += loss_2
loss_seg_value_2 += loss_2.cpu().item() / iter_num
# optimizer
cur_loss.backward()
self.optimizer.step()
train_loss.append(cur_loss.item())
if batch_idx % 1000 == 0:
if self.args.multi:
self.logger.info("The train loss of epoch{}-batch-{}:{};{}".format(self.current_epoch,
batch_idx, cur_loss.item(), loss_2.item()))
else:
self.logger.info("The train loss of epoch{}-batch-{}:{}".format(self.current_epoch,
batch_idx, cur_loss.item()))
batch_idx += 1
self.current_iter += 1
if np.isnan(float(cur_loss.item())):
raise ValueError('Loss is nan during training...')
pred = pred.data.cpu().numpy()
label = y.cpu().numpy()
argpred = np.argmax(pred, axis=1)
self.Eval.add_batch(label, argpred)
if batch_idx==self.dataloader.num_iterations:
break
self.log_one_train_epoch(x, label, argpred, train_loss)
tqdm_epoch.close()
def log_one_train_epoch(self, x, label, argpred, train_loss):
#show train image on tensorboard
images_inv = inv_preprocess(x.clone().cpu(), self.args.show_num_images, numpy_transform=self.args.numpy_transform)
labels_colors = decode_labels(label, self.args.show_num_images)
preds_colors = decode_labels(argpred, self.args.show_num_images)
for index, (img, lab, color_pred) in enumerate(zip(images_inv, labels_colors, preds_colors)):
self.writer.add_image('train/'+ str(index)+'/Images', img, self.current_epoch)
self.writer.add_image('train/'+ str(index)+'/Labels', lab, self.current_epoch)
self.writer.add_image('train/'+ str(index)+'/preds', color_pred, self.current_epoch)
if self.args.class_16:
PA = self.Eval.Pixel_Accuracy()
MPA_16, MPA = self.Eval.Mean_Pixel_Accuracy()
MIoU_16, MIoU = self.Eval.Mean_Intersection_over_Union()
FWIoU_16, FWIoU = self.Eval.Frequency_Weighted_Intersection_over_Union()
else:
PA = self.Eval.Pixel_Accuracy()
MPA = self.Eval.Mean_Pixel_Accuracy()
MIoU = self.Eval.Mean_Intersection_over_Union()
FWIoU = self.Eval.Frequency_Weighted_Intersection_over_Union()
self.logger.info('\nEpoch:{}, train PA1:{}, MPA1:{}, MIoU1:{}, FWIoU1:{}'.format(self.current_epoch, PA, MPA,
MIoU, FWIoU))
self.writer.add_scalar('train_PA', PA, self.current_epoch)
self.writer.add_scalar('train_MPA', MPA, self.current_epoch)
self.writer.add_scalar('train_MIoU', MIoU, self.current_epoch)
self.writer.add_scalar('train_FWIoU', FWIoU, self.current_epoch)
tr_loss = sum(train_loss)/len(train_loss) if isinstance(train_loss, list) else train_loss
self.writer.add_scalar('train_loss', tr_loss, self.current_epoch)
tqdm.write("The average loss of train epoch-{}-:{}".format(self.current_epoch, tr_loss))
def validate(self, mode='val'):
self.logger.info('\nvalidating one epoch...')
self.Eval.reset()
with torch.no_grad():
tqdm_batch = tqdm(self.dataloader.val_loader, total=self.dataloader.valid_iterations,
desc="Val Epoch-{}-".format(self.current_epoch + 1))
if mode == 'val':
self.model.eval()
i = 0
for x, y, id in tqdm_batch:
if self.cuda:
x, y = x.to(self.device), y.to(device=self.device, dtype=torch.long)
# model
pred = self.model(x)
if isinstance(pred, tuple):
pred_2 = pred[1]
pred = pred[0]
pred_P = F.softmax(pred, dim=1)
pred_P_2 = F.softmax(pred_2, dim=1)
y = torch.squeeze(y, 1)
pred = pred.data.cpu().numpy()
label = y.cpu().numpy()
argpred = np.argmax(pred, axis=1)
self.Eval.add_batch(label, argpred)
#show val result on tensorboard
images_inv = inv_preprocess(x.clone().cpu(), self.args.show_num_images, numpy_transform=self.args.numpy_transform)
labels_colors = decode_labels(label, self.args.show_num_images)
preds_colors = decode_labels(argpred, self.args.show_num_images)
for index, (img, lab, color_pred) in enumerate(zip(images_inv, labels_colors, preds_colors)):
self.writer.add_image(str(index)+'/Images', img, self.current_epoch)
self.writer.add_image(str(index)+'/Labels', lab, self.current_epoch)
self.writer.add_image(str(index)+'/preds', color_pred, self.current_epoch)
if self.args.class_16:
def val_info(Eval, name):
PA = Eval.Pixel_Accuracy()
MPA_16, MPA_13 = Eval.Mean_Pixel_Accuracy()
MIoU_16, MIoU_13 = Eval.Mean_Intersection_over_Union()
FWIoU_16, FWIoU_13 = Eval.Frequency_Weighted_Intersection_over_Union()
PC_16, PC_13 = Eval.Mean_Precision()
print("########## Eval{} ############".format(name))
self.logger.info('\nEpoch:{:.3f}, {} PA:{:.3f}, MPA_16:{:.3f}, MIoU_16:{:.3f}, FWIoU_16:{:.3f}, PC_16:{:.3f}'.format(self.current_epoch, name, PA, MPA_16,
MIoU_16, FWIoU_16, PC_16))
self.logger.info('\nEpoch:{:.3f}, {} PA:{:.3f}, MPA_13:{:.3f}, MIoU_13:{:.3f}, FWIoU_13:{:.3f}, PC_13:{:.3f}'.format(self.current_epoch, name, PA, MPA_13,
MIoU_13, FWIoU_13, PC_13))
self.writer.add_scalar('PA'+name, PA, self.current_epoch)
self.writer.add_scalar('MPA_16'+name, MPA_16, self.current_epoch)
self.writer.add_scalar('MIoU_16'+name, MIoU_16, self.current_epoch)
self.writer.add_scalar('FWIoU_16'+name, FWIoU_16, self.current_epoch)
self.writer.add_scalar('MPA_13'+name, MPA_13, self.current_epoch)
self.writer.add_scalar('MIoU_13'+name, MIoU_13, self.current_epoch)
self.writer.add_scalar('FWIoU_13'+name, FWIoU_13, self.current_epoch)
return PA, MPA_13, MIoU_13, FWIoU_13
else:
def val_info(Eval, name):
PA = Eval.Pixel_Accuracy()
MPA = Eval.Mean_Pixel_Accuracy()
MIoU = Eval.Mean_Intersection_over_Union()
FWIoU = Eval.Frequency_Weighted_Intersection_over_Union()
PC = Eval.Mean_Precision()
print("########## Eval{} ############".format(name))
self.logger.info('\nEpoch:{:.3f}, {} PA1:{:.3f}, MPA1:{:.3f}, MIoU1:{:.3f}, FWIoU1:{:.3f}, PC:{:.3f}'.format(self.current_epoch, name, PA, MPA,
MIoU, FWIoU, PC))
self.writer.add_scalar('PA'+name, PA, self.current_epoch)
self.writer.add_scalar('MPA'+name, MPA, self.current_epoch)
self.writer.add_scalar('MIoU'+name, MIoU, self.current_epoch)
self.writer.add_scalar('FWIoU'+name, FWIoU, self.current_epoch)
return PA, MPA, MIoU, FWIoU
PA, MPA, MIoU, FWIoU = val_info(self.Eval, "")
tqdm_batch.close()
return PA, MPA, MIoU, FWIoU
def validate_source(self):
self.logger.info('\nvalidating source domain...')
self.Eval.reset()
with torch.no_grad():
tqdm_batch = tqdm(self.source_val_dataloader, total=self.dataloader.valid_iterations,
desc="Source Val Epoch-{}-".format(self.current_epoch + 1))
self.model.eval()
i = 0
for x, y, id in tqdm_batch:
# y.to(torch.long)
if self.cuda:
x, y = x.to(self.device), y.to(device=self.device, dtype=torch.long)
# model
pred = self.model(x)
if isinstance(pred, tuple):
pred_2 = pred[1]
pred = pred[0]
pred_P = F.softmax(pred, dim=1)
pred_P_2 = F.softmax(pred_2, dim=1)
y = torch.squeeze(y, 1)
pred = pred.data.cpu().numpy()
label = y.cpu().numpy()
argpred = np.argmax(pred, axis=1)
self.Eval.add_batch(label, argpred)
i += 1
if i == self.dataloader.valid_iterations:
break
#show val result on tensorboard
images_inv = inv_preprocess(x.clone().cpu(), self.args.show_num_images, numpy_transform=self.args.numpy_transform)
labels_colors = decode_labels(label, self.args.show_num_images)
preds_colors = decode_labels(argpred, self.args.show_num_images)
for index, (img, lab, color_pred) in enumerate(zip(images_inv, labels_colors, preds_colors)):
self.writer.add_image('source_eval/'+str(index)+'/Images', img, self.current_epoch)
self.writer.add_image('source_eval/'+str(index)+'/Labels', lab, self.current_epoch)
self.writer.add_image('source_eval/'+str(index)+'/preds', color_pred, self.current_epoch)
if self.args.class_16:
def source_val_info(Eval, name):
PA = Eval.Pixel_Accuracy()
MPA_16, MPA_13 = Eval.Mean_Pixel_Accuracy()
MIoU_16, MIoU_13 = Eval.Mean_Intersection_over_Union()
FWIoU_16, FWIoU_13 = Eval.Frequency_Weighted_Intersection_over_Union()
PC_16, PC_13 = Eval.Mean_Precision()
print("########## Source Eval{} ############".format(name))
self.logger.info('\nEpoch:{:.3f}, source {} PA:{:.3f}, MPA_16:{:.3f}, MIoU_16:{:.3f}, FWIoU_16:{:.3f}, PC_16:{:.3f}'.format(self.current_epoch, name, PA, MPA_16,
MIoU_16, FWIoU_16, PC_16))
self.logger.info('\nEpoch:{:.3f}, source {} PA:{:.3f}, MPA_13:{:.3f}, MIoU_13:{:.3f}, FWIoU_13:{:.3f}, PC_13:{:.3f}'.format(self.current_epoch, name, PA, MPA_13,
MIoU_13, FWIoU_13, PC_13))
self.writer.add_scalar('source_PA'+name, PA, self.current_epoch)
self.writer.add_scalar('source_MPA_16'+name, MPA_16, self.current_epoch)
self.writer.add_scalar('source_MIoU_16'+name, MIoU_16, self.current_epoch)
self.writer.add_scalar('source_FWIoU_16'+name, FWIoU_16, self.current_epoch)
self.writer.add_scalar('source_MPA_13'+name, MPA_13, self.current_epoch)
self.writer.add_scalar('source_MIoU_13'+name, MIoU_13, self.current_epoch)
self.writer.add_scalar('source_FWIoU_13'+name, FWIoU_13, self.current_epoch)
return PA, MPA_13, MIoU_13, FWIoU_13
else:
def source_val_info(Eval, name):
PA = Eval.Pixel_Accuracy()
MPA = Eval.Mean_Pixel_Accuracy()
MIoU = Eval.Mean_Intersection_over_Union()
FWIoU = Eval.Frequency_Weighted_Intersection_over_Union()
PC = Eval.Mean_Precision()
self.writer.add_scalar('source_PA'+name, PA, self.current_epoch)
self.writer.add_scalar('source_MPA'+name, MPA, self.current_epoch)
self.writer.add_scalar('source_MIoU'+name, MIoU, self.current_epoch)
self.writer.add_scalar('source_FWIoU'+name, FWIoU, self.current_epoch)
print("########## Source Eval{} ############".format(name))
self.logger.info('\nEpoch:{:.3f}, source {} PA1:{:.3f}, MPA1:{:.3f}, MIoU1:{:.3f}, FWIoU1:{:.3f}, PC:{:.3f}'.format(self.current_epoch, name, PA, MPA,
MIoU, FWIoU, PC))
return PA, MPA, MIoU, FWIoU
PA, MPA, MIoU, FWIoU = source_val_info(self.Eval, "")
tqdm_batch.close()
is_best = MIoU > self.best_source_MIou
if is_best:
self.best_source_MIou = MIoU
self.best_source_iter = self.current_iter
self.logger.info("=>saving a new best source checkpoint...")
self.save_checkpoint(self.train_id+'source_best.pth')
else:
self.logger.info("=> The source MIoU of val does't improve.")
self.logger.info("=> The best source MIoU of val is {} at {}".format(self.best_source_MIou, self.best_source_iter))
return PA, MPA, MIoU, FWIoU
def save_checkpoint(self, filename=None):
"""
Save checkpoint if a new best is achieved
:param state:
:param is_best:
:param filepath:
:return:
"""
filename = os.path.join(self.args.checkpoint_dir, filename)
state = {
'epoch': self.current_epoch + 1,
'iteration': self.current_iter,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_MIou':self.best_MIou
}
torch.save(state, filename)
def load_checkpoint(self, filename):
try:
self.logger.info("Loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
if 'state_dict' in checkpoint:
self.model.load_state_dict(checkpoint['state_dict'])
else:
self.model.module.load_state_dict(checkpoint)
self.logger.info("Checkpoint loaded successfully from "+filename)
except OSError as e:
self.logger.info("No checkpoint exists from '{}'. Skipping...".format(self.args.checkpoint_dir))
self.logger.info("**First time to train**")
def poly_lr_scheduler(self, optimizer, init_lr=None, iter=None,
max_iter=None, power=None):
init_lr = self.args.lr if init_lr is None else init_lr
iter = self.current_iter if iter is None else iter
max_iter = self.args.iter_max if max_iter is None else max_iter
power = self.args.poly_power if power is None else power
new_lr = init_lr * (1 - float(iter) / max_iter) ** power
optimizer.param_groups[0]["lr"] = new_lr
if len(optimizer.param_groups) == 2:
optimizer.param_groups[1]["lr"] = 10 * new_lr
def add_train_args(arg_parser):
# Path related arguments
arg_parser.add_argument('--data_root_path', type=str, default=None,
help="the root path of dataset")
arg_parser.add_argument('--list_path', type=str, default=None,
help="the root path of dataset")
arg_parser.add_argument('--checkpoint_dir', default="./log/train",
help="the path of ckpt file")
# Model related arguments
arg_parser.add_argument('--backbone', default='deeplabv2_multi',
help="backbone of encoder")
arg_parser.add_argument('--bn_momentum', type=float, default=0.1,
help="batch normalization momentum")
arg_parser.add_argument('--imagenet_pretrained', type=str2bool, default=True,
help="whether apply imagenet pretrained weights")
arg_parser.add_argument('--pretrained_ckpt_file', type=str, default=None,
help="whether apply pretrained checkpoint")
arg_parser.add_argument('--continue_training', type=str2bool, default=False,
help="whether to continue training ")
arg_parser.add_argument('--show_num_images', type=int, default=2,
help="show how many images during validate")
# train related arguments
arg_parser.add_argument('--seed', default=12345, type=int,
help='random seed')
arg_parser.add_argument('--gpu', type=str, default="0",
help=" the num of gpu")
arg_parser.add_argument('--batch_size_per_gpu', default=1, type=int,
help='input batch size')
# dataset related arguments
arg_parser.add_argument('--dataset', default='cityscapes', type=str,
help='dataset choice')
arg_parser.add_argument('--base_size', default="1280,720", type=str,
help='crop size of image')
arg_parser.add_argument('--crop_size', default="1280,720", type=str,
help='base size of image')
arg_parser.add_argument('--target_base_size', default="1024,512", type=str,
help='crop size of target image')
arg_parser.add_argument('--target_crop_size', default="1024,512", type=str,
help='base size of target image')
arg_parser.add_argument('--num_classes', default=19, type=int,
help='num class of mask')
arg_parser.add_argument('--data_loader_workers', default=16, type=int,
help='num_workers of Dataloader')
arg_parser.add_argument('--pin_memory', default=2, type=int,
help='pin_memory of Dataloader')
arg_parser.add_argument('--split', type=str, default='train',
help="choose from train/val/test/trainval/all")
arg_parser.add_argument('--random_mirror', default=True, type=str2bool,
help='add random_mirror')
arg_parser.add_argument('--random_crop', default=False, type=str2bool,
help='add random_crop')
arg_parser.add_argument('--resize', default=True, type=str2bool,
help='resize')
arg_parser.add_argument('--gaussian_blur', default=True, type=str2bool,
help='add gaussian_blur')
arg_parser.add_argument('--numpy_transform', default=True, type=str2bool,
help='image transform with numpy style')
# optimization related arguments
arg_parser.add_argument('--freeze_bn', type=str2bool, default=False,
help="whether freeze BatchNormalization")
arg_parser.add_argument('--optim', default="SGD", type=str,
help='optimizer')
arg_parser.add_argument('--momentum', type=float, default=0.9)
arg_parser.add_argument('--weight_decay', type=float, default=5e-4)
arg_parser.add_argument('--lr', type=float, default=2.5e-4,
help="init learning rate ")
arg_parser.add_argument('--iter_max', type=int, default=250000,
help="the maxinum of iteration")
arg_parser.add_argument('--iter_stop', type=int, default=None,
help="the early stop step")
arg_parser.add_argument('--poly_power', type=float, default=0.9,
help="poly_power")
# multi-level output
arg_parser.add_argument('--multi', default=False, type=str2bool,
help='output model middle feature')
arg_parser.add_argument('--lambda_seg', type=float, default=0.1,
help="lambda_seg of middle output")
return arg_parser
def init_args(args):
args.batch_size = args.batch_size_per_gpu * ceil(len(args.gpu) / 2)
print("batch size: ", args.batch_size)
train_id = str(args.dataset)
crop_size = args.crop_size.split(',')
base_size = args.base_size.split(',')
if len(crop_size)==1:
args.crop_size = int(crop_size[0])
args.base_size = int(base_size[0])
else:
args.crop_size = (int(crop_size[0]), int(crop_size[1]))
args.base_size = (int(base_size[0]), int(base_size[1]))
target_crop_size = args.target_crop_size.split(',')
target_base_size = args.target_base_size.split(',')
if len(target_crop_size)==1:
args.target_crop_size = int(target_crop_size[0])
args.target_base_size = int(target_base_size[0])
else:
args.target_crop_size = (int(target_crop_size[0]), int(target_crop_size[1]))
args.target_base_size = (int(target_base_size[0]), int(target_base_size[1]))
if not args.continue_training:
if os.path.exists(args.checkpoint_dir):
print("checkpoint dir exists, which will be removed")
import shutil
shutil.rmtree(args.checkpoint_dir, ignore_errors=True)
os.mkdir(args.checkpoint_dir)
if args.data_root_path is None:
args.data_root_path = datasets_path[args.dataset]['data_root_path']
args.list_path = datasets_path[args.dataset]['list_path']
args.image_filepath = datasets_path[args.dataset]['image_path']
args.gt_filepath = datasets_path[args.dataset]['gt_path']
args.class_16 = True if args.num_classes == 16 else False
args.class_13 = True if args.num_classes == 13 else False
# logger configure
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fh = logging.FileHandler(os.path.join(args.checkpoint_dir, 'train_log.txt'))
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
#set seed
random.seed(args.seed)
np.random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.backends.cudnn.benchmark=True
return args, train_id, logger
if __name__ == '__main__':
assert LooseVersion(torch.__version__) >= LooseVersion('1.0.0'), 'PyTorch>=1.0.0 is required'
arg_parser = argparse.ArgumentParser()
arg_parser = add_train_args(arg_parser)
args = arg_parser.parse_args()
args, train_id, logger = init_args(args)
agent = Trainer(args=args, cuda=True, train_id=train_id, logger=logger)
agent.main()
|
61d048fc281accde5be37d930b995f11593e5ea6
|
ddddaa700e4642f46a2c1e1e0271a7c8ea62ba0f
|
/harness/determined/constants.py
|
57887b7c582edf87bad5b37e3522a7b8330fda2b
|
[
"Apache-2.0"
] |
permissive
|
determined-ai/determined
|
9d563cb5ffd074c88ee5edc9bf22ab9c3cb78c7e
|
8239b1993f4f44390f4e88901ffaf3b12429b83c
|
refs/heads/main
| 2023-08-21T12:13:36.651298
| 2023-08-21T08:34:16
| 2023-08-21T08:34:16
| 253,846,879
| 2,531
| 330
|
Apache-2.0
| 2023-09-14T21:54:17
| 2020-04-07T16:12:29
|
Go
|
UTF-8
|
Python
| false
| false
| 2,417
|
py
|
constants.py
|
import os
MAX_SLOTS_PER_AGENT = 16
# The default configs to use in when running test experiments.
#
# TODO: Unify the defaults used here with the defaults used in master.
DEFAULT_SEARCHER_CFG = {"name": "single", "max_length": {"batches": 100}}
DEFAULT_RESOURCES_CFG = {"slots_per_trial": 1, "native_parallel": False}
DEFAULT_SCHEDULING_UNIT = 100
DEFAULT_OPTIMIZATIONS = {
"aggregation_frequency": 1,
"average_aggregated_gradients": True,
"average_training_metrics": True,
"gradient_compression": False,
"mixed_precision": "O0",
}
DEFAULT_EXP_CFG = {
"searcher": DEFAULT_SEARCHER_CFG,
"scheduling_unit": DEFAULT_SCHEDULING_UNIT,
"resources": DEFAULT_RESOURCES_CFG,
"optimizations": DEFAULT_OPTIMIZATIONS,
}
# Until we implement a more automatic solution, expose a temporary workaround of
# allowing ports to be changed using envionment variables for the rare case that
# the default ports are already in use by other processes.
DTRAIN_SSH_PORT = int(str(os.getenv("DTRAIN_SSH_PORT", "12350")))
# GLOO port used by Horovod for the Gloo controller.
HOROVOD_GLOO_RENDEZVOUS_PORT = int(str(os.getenv("HOROVOD_GLOO_RENDEZVOUS_PORT", "12355")))
# Port for communicating between training processes. Used for reducing
# validation metrics.
INTER_TRAIN_PROCESS_COMM_PORT_1 = int(str(os.getenv("INTER_TRAIN_PROCESS_COMM_PORT_1", "12360")))
INTER_TRAIN_PROCESS_COMM_PORT_2 = int(
str(
os.getenv(
"INTER_TRAIN_PROCESS_COMM_PORT_2", INTER_TRAIN_PROCESS_COMM_PORT_1 + MAX_SLOTS_PER_AGENT
)
)
)
# both of the above ports will be offset
# (value that we get from the port offset registry) in distributed context.
# How many seconds horovod waits for startup to complete before failing.
HOROVOD_STARTUP_TIMEOUT_SECONDS = 1200
# Path for file that stores output of horovod auto-tuning. Only created when
# horovod auto-tuning is enabled.
HOROVOD_AUTOTUNE_LOG_FILEPATH = "/tmp/autotune_log.csv"
# How many seconds GLOO waits for all tasks to connect before failing.
# Increasing this from a default of 30 is necessary when there is a
# large number of machines.
HOROVOD_GLOO_TIMEOUT_SECONDS = 240
# The well-known locations of the executing container's STDOUT and STDERR.
CONTAINER_STDOUT = "/run/determined/train/logs/stdout.log"
CONTAINER_STDERR = "/run/determined/train/logs/stderr.log"
MANAGED_TRAINING_MODEL_COPY = "/run/determined/train/model"
|
122cf87df135a600099ddde86b57a92018a2e24a
|
3ca67d69abd4e74b7145b340cdda65532f90053b
|
/BOJ/1245.농장관리/sAp00n.py
|
58fcb04e7263d3cf6ebf22e66ef8f001ab85f8de
|
[] |
no_license
|
DKU-STUDY/Algorithm
|
19549516984b52a1c5cd73e1ed1e58f774d6d30e
|
6f78efdbefd8eedab24e43d74c7dae7f95c2893b
|
refs/heads/master
| 2023-02-18T06:48:39.309641
| 2023-02-09T07:16:14
| 2023-02-09T07:16:14
| 258,455,710
| 175
| 49
| null | 2023-02-09T07:16:16
| 2020-04-24T08:42:27
|
Python
|
UTF-8
|
Python
| false
| false
| 5,675
|
py
|
sAp00n.py
|
from sys import stdin
from collections import deque
def near_check(mat, node):
global N_Column
global M_Row
i, j = node
node_val = mat[j][i]
return_list = []
if i == 0:
if j == 0:
search_list = [[i + 1, j], [i, j + 1], [i + 1, j + 1]]
for near_node in search_list:
near_i, near_j = near_node
if near_i <= M_Row - 1 and near_j <= N_Column - 1:
if mat[near_j][near_i] <= node_val:
return_list.append([near_i, near_j])
elif j == N_Column - 1:
search_list = [[i + 1, j], [i, j - 1], [i + 1, j - 1]]
for near_node in search_list:
near_i, near_j = near_node
if near_i <= M_Row -1 and near_j <= N_Column - 1:
if mat[near_j][near_i] <= node_val:
return_list.append([near_i, near_j])
else:
search_list = [[i, j - 1], [i + 1, j - 1], [i + 1, j], [i + 1, j + 1], [i, j + 1]]
for near_node in search_list:
near_i, near_j = near_node
if near_i <= M_Row - 1 and near_j <= N_Column -1:
if mat[near_j][near_i] <= node_val:
return_list.append([near_i, near_j])
elif i == M_Row - 1:
if j == 0:
search_list = [[i - 1, j], [i - 1, j + 1], [i, j + 1]]
for near_node in search_list:
near_i, near_j = near_node
if near_i <= M_Row - 1 and near_j <= N_Column - 1:
if mat[near_j][near_i] <= node_val:
return_list.append([near_i, near_j])
elif j == N_Column - 1:
search_list = [[i - 1, j], [i - 1, j - 1], [i, j - 1]]
for near_node in search_list:
near_i, near_j = near_node
if near_i <= M_Row - 1 and near_j <= N_Column - 1:
if mat[near_j][near_i] <= node_val:
return_list.append([near_i, near_j])
else:
search_list = [[i, j - 1], [i - 1, j - 1], [i - 1, j], [i - 1, j + 1], [i, j + 1]]
for near_node in search_list:
near_i, near_j = near_node
if near_i <= M_Row - 1 and near_j <= N_Column - 1:
if mat[near_j][near_i] <= node_val:
return_list.append([near_i, near_j])
else:
if j == 0:
search_list = [[i - 1, j], [i - 1, j + 1], [i, j + 1], [i + 1, j + 1], [i + 1, j]]
for near_node in search_list:
near_i, near_j = near_node
if near_i <= M_Row - 1 and near_j <= N_Column -1:
if mat[near_j][near_i] <= node_val:
return_list.append([near_i, near_j])
elif j == N_Column - 1:
search_list = [[i - 1, j], [i - 1, j - 1], [i, j - 1], [i + 1, j - 1], [i + 1, j]]
for near_node in search_list:
near_i, near_j = near_node
if near_i <= M_Row - 1 and near_j <= N_Column - 1:
if mat[near_j][near_i] <= node_val:
return_list.append([near_i, near_j])
else:
search_list = [[i - 1, j], [i - 1, j - 1], [i, j - 1], [i + 1, j - 1], [i + 1, j], [i + 1, j + 1],
[i, j + 1], [i - 1, j + 1]]
for near_node in search_list:
near_i, near_j = near_node
if near_i <= M_Row - 1 and near_j <= N_Column - 1:
if mat[near_j][near_i] <= node_val:
return_list.append([near_i, near_j])
return return_list
N_Column, M_Row = list(map(int, stdin.readline().split()))
if N_Column == 1 and M_Row == 1:
if int(stdin.readline()) != 0:
print(1)
else:
print(0)
else:
mat = [None] * N_Column
num_of_Mount = 0
for j in range(N_Column):
mat[j] = list(map(int, stdin.readline().split()))
graph = {}
max_val = 0
for j in range(N_Column):
for i in range(M_Row):
ele = mat[j][i]
if ele not in graph:
graph[ele] = []
graph[ele] += [[i, j]]
max_val = max(max_val, ele)
for value in range(max_val, 0, -1):
same_value_node_list = graph.get(value)
if same_value_node_list is None:
continue
for searching_node in same_value_node_list:
searching_i, searching_j = searching_node
if mat[searching_j][searching_i] == 0:
continue
stack = deque()
stack.append([searching_i, searching_j])
while len(stack) > 0:
current_node = stack.popleft()
#print(f'stack: {stack}')
current_i, current_j = current_node
#print(f'current : ({current_i}, {current_j}) = {mat[current_j][current_i]}')
if mat[current_j][current_i] == 0:
continue
near_node_list = near_check(mat, current_node)
#print(f'near_node : {near_node_list}')
for node in near_node_list:
node_i, node_j = node
if mat[node_j][node_i] == 0:
continue
if mat[current_j][current_i] >= mat[node_j][node_i]:
stack.append(node)
#print(f'append node : ({node_i}, {node_j})')
mat[current_j][current_i] = 0
num_of_Mount += 1
'''for i in mat:
print(i)
print('\n')'''
print(num_of_Mount)
|
d4321b9d3679ecc77b78f4e72dff7dd5100f92c6
|
9734c93c86c982b1ce046340bac9e53645b261b8
|
/tests/parsers/systemd_journal.py
|
5f378d3d4041cef535987a02edcda75af03d51d6
|
[
"Apache-2.0"
] |
permissive
|
log2timeline/plaso
|
cd72dd407d6c5627506c14f58cb8f6a6926aa808
|
d6022f8cfebfddf2d08ab2d300a41b61f3349933
|
refs/heads/main
| 2023-09-02T08:43:48.241198
| 2023-08-19T07:28:12
| 2023-08-19T07:28:12
| 23,812,315
| 1,506
| 421
|
Apache-2.0
| 2023-09-04T08:24:53
| 2014-09-08T23:29:28
|
Python
|
UTF-8
|
Python
| false
| false
| 5,652
|
py
|
systemd_journal.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Systemd Journal parser."""
import unittest
from plaso.containers import warnings
from plaso.parsers import systemd_journal
from tests.parsers import test_lib
class SystemdJournalParserTest(test_lib.ParserTestCase):
"""Tests for the Systemd Journal parser."""
def testParse(self):
"""Tests the Parse function."""
parser = systemd_journal.SystemdJournalParser()
storage_writer = self._ParseFile([
'systemd', 'journal', 'system.journal'], parser)
number_of_event_data = storage_writer.GetNumberOfAttributeContainers(
'event_data')
self.assertEqual(number_of_event_data, 2101)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
expected_event_values = {
'body': 'Started User Manager for UID 1000.',
'data_type': 'systemd:journal',
'hostname': 'test-VirtualBox',
'pid': '1',
'reporter': 'systemd',
'written_time': '2017-01-27T09:40:55.913258+00:00'}
event_data = storage_writer.GetAttributeContainerByIndex('event_data', 0)
self.CheckEventData(event_data, expected_event_values)
# Test a XZ compressed data log entry.
expected_event_values = {
'body': 'a' * 692,
'data_type': 'systemd:journal',
'hostname': 'test-VirtualBox',
'pid': '22921',
'reporter': 'root',
'written_time': '2017-02-06T16:24:32.564585+00:00'}
event_data = storage_writer.GetAttributeContainerByIndex('event_data', 2098)
self.CheckEventData(event_data, expected_event_values)
def testParseLZ4(self):
"""Tests the Parse function on a journal with LZ4 compressed events."""
parser = systemd_journal.SystemdJournalParser()
storage_writer = self._ParseFile([
'systemd', 'journal', 'system.journal.lz4'], parser)
number_of_event_data = storage_writer.GetNumberOfAttributeContainers(
'event_data')
self.assertEqual(number_of_event_data, 85)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
expected_event_values = {
'body': 'Reached target Paths.',
'data_type': 'systemd:journal',
'hostname': 'testlol',
'pid': '822',
'reporter': 'systemd',
'written_time': '2018-07-03T15:00:16.682340+00:00'}
event_data = storage_writer.GetAttributeContainerByIndex('event_data', 0)
self.CheckEventData(event_data, expected_event_values)
# Test a LZ4 compressed data log entry.
# The text used in the test message was triplicated to make it long enough
# to trigger the LZ4 compression.
# Source: https://github.com/systemd/systemd/issues/6237
expected_body_parts = [' textual user names.']
expected_body_parts.extend(
(' Yes, as you found out 0day is not a valid username. I wonder which '
'tool permitted you to create it in the first place. Note that not '
'permitting numeric first characters is done on purpose: to avoid '
'ambiguities between numeric UID and textual user names.') * 3)
expected_body = ''.join(expected_body_parts)
expected_event_values = {
'body': expected_body,
'data_type': 'systemd:journal',
'hostname': 'testlol',
'pid': '34757',
'reporter': 'test',
'written_time': '2018-07-03T15:19:04.667807+00:00'}
event_data = storage_writer.GetAttributeContainerByIndex('event_data', 84)
self.CheckEventData(event_data, expected_event_values)
def testParseDirty(self):
"""Tests the Parse function on a 'dirty' journal file."""
parser = systemd_journal.SystemdJournalParser()
storage_writer = self._ParseFile([
'systemd', 'journal',
'system@00053f9c9a4c1e0e-2e18a70e8b327fed.journalTILDE'], parser)
number_of_event_data = storage_writer.GetNumberOfAttributeContainers(
'event_data')
self.assertEqual(number_of_event_data, 2211)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 1)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
expected_event_values = {
'body': ('Runtime journal (/run/log/journal/) is 1.2M, max 9.9M, 8.6M '
'free.'),
'data_type': 'systemd:journal',
'hostname': 'test-VirtualBox',
'pid': '569',
'reporter': 'systemd-journald',
'written_time': '2016-10-24T13:20:01.063423+00:00'}
event_data = storage_writer.GetAttributeContainerByIndex('event_data', 0)
self.CheckEventData(event_data, expected_event_values)
generator = storage_writer.GetAttributeContainers(
warnings.ExtractionWarning.CONTAINER_TYPE)
test_warnings = list(generator)
test_warning = test_warnings[0]
self.assertIsNotNone(test_warning)
expected_message = (
'Unable to parse journal entry at offset: 0x0041bfb0 with error: '
'object offset should be after hash tables (0 < 2527472)')
self.assertEqual(test_warning.message, expected_message)
if __name__ == '__main__':
unittest.main()
|
0314bd661b49af8ecf559d27c57e29d5439cf7c2
|
df2dbd755cc34689d735db2dcc635378307afc19
|
/日常自动化工具/win32api/key_press.py
|
f35d263eb26d4365cf7e27156eeb899091c4bd03
|
[] |
no_license
|
ywz978020607/History
|
0c0c72fccba2189492f0137e3ea2fe83af6b4417
|
6979784ef14dfa97d9dfc4e19b3376ec05e4e1e4
|
refs/heads/master
| 2023-05-25T17:31:28.875449
| 2023-05-18T08:57:15
| 2023-05-18T08:57:15
| 262,790,307
| 126
| 61
| null | 2020-11-13T16:37:09
| 2020-05-10T13:13:26
|
Python
|
UTF-8
|
Python
| false
| false
| 188
|
py
|
key_press.py
|
import win32api
import win32con
import time
time.sleep(5)
win32api.keybd_event(0x41,0,0,0) # enter
win32api.keybd_event(0x41,0,win32con.KEYEVENTF_KEYUP,0) #释放按键
print("ok")
|
862055a16f5c4eaa61c1a97fe3955617feb52152
|
3f7028cc89a79582266a19acbde0d6b066a568de
|
/test/extensions/filters/network/thrift_proxy/driver/server.py
|
5741a98bbf0c8b9e3392cd9e12da227654c61c11
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
envoyproxy/envoy
|
882d3c7f316bf755889fb628bee514bb2f6f66f0
|
72f129d273fa32f49581db3abbaf4b62e3e3703c
|
refs/heads/main
| 2023-08-31T09:20:01.278000
| 2023-08-31T08:58:36
| 2023-08-31T08:58:36
| 65,214,191
| 21,404
| 4,756
|
Apache-2.0
| 2023-09-14T21:56:37
| 2016-08-08T15:07:24
|
C++
|
UTF-8
|
Python
| false
| false
| 7,086
|
py
|
server.py
|
#!/usr/bin/env python
import argparse
import logging
import sys
from generated.example import Example
from generated.example.ttypes import (Result, TheWorks, AppException)
from thrift import Thrift, TMultiplexedProcessor
from thrift.protocol import TBinaryProtocol, TCompactProtocol, TJSONProtocol
from thrift.server import TServer
from thrift.transport import TSocket
from thrift.transport import TTransport
from fbthrift import THeaderTransport
# On Windows we run this test on Python3
if sys.version_info[0] != 2:
sys.stdin.reconfigure(encoding='utf-8')
sys.stdout.reconfigure(encoding='utf-8')
class SuccessHandler:
def ping(self):
print("server: ping()")
def poke(self):
print("server: poke()")
def add(self, a, b):
result = a + b
print("server: add({0}, {1}) = {2}".format(a, b, result))
return result
def execute(self, param):
print("server: execute({0})".format(param))
if "all" in param.return_fields:
return Result(param.the_works)
elif "none" in param.return_fields:
return Result(TheWorks())
the_works = TheWorks()
for field, value in vars(param.the_works).items():
if field in param.return_fields:
setattr(the_works, field, value)
return Result(the_works)
class IDLExceptionHandler:
def ping(self):
print("server: ping()")
def poke(self):
print("server: poke()")
def add(self, a, b):
result = a + b
print("server: add({0}, {1}) = {2}".format(a, b, result))
return result
def execute(self, param):
print("server: app error: execute failed")
raise AppException("execute failed")
class ExceptionHandler:
def ping(self):
print("server: ping failure")
raise Thrift.TApplicationException(
type=Thrift.TApplicationException.INTERNAL_ERROR,
message="for ping",
)
def poke(self):
print("server: poke failure")
raise Thrift.TApplicationException(
type=Thrift.TApplicationException.INTERNAL_ERROR,
message="for poke",
)
def add(self, a, b):
print("server: add failure")
raise Thrift.TApplicationException(
type=Thrift.TApplicationException.INTERNAL_ERROR,
message="for add",
)
def execute(self, param):
print("server: execute failure")
raise Thrift.TApplicationException(
type=Thrift.TApplicationException.INTERNAL_ERROR,
message="for execute",
)
def main(cfg):
if cfg.unix:
if cfg.addr == "":
sys.exit("invalid listener unix domain socket: {}".format(cfg.addr))
else:
try:
(host, port) = cfg.addr.rsplit(":", 1)
port = int(port)
except ValueError:
sys.exit("invalid listener address: {}".format(cfg.addr))
if cfg.response == "success":
handler = SuccessHandler()
elif cfg.response == "idl-exception":
handler = IDLExceptionHandler()
elif cfg.response == "exception":
# squelch traceback for the exception we throw
logging.getLogger().setLevel(logging.CRITICAL)
handler = ExceptionHandler()
else:
sys.exit("unknown server response mode {0}".format(cfg.response))
processor = Example.Processor(handler)
if cfg.service is not None:
# wrap processor with multiplexor
multi = TMultiplexedProcessor.TMultiplexedProcessor()
multi.registerProcessor(cfg.service, processor)
processor = multi
if cfg.unix:
transport = TSocket.TServerSocket(unix_socket=cfg.addr)
else:
transport = TSocket.TServerSocket(host=host, port=port)
if cfg.transport == "framed":
transport_factory = TTransport.TFramedTransportFactory()
elif cfg.transport == "unframed":
transport_factory = TTransport.TBufferedTransportFactory()
elif cfg.transport == "header":
if cfg.protocol == "binary":
transport_factory = THeaderTransport.THeaderTransportFactory(
THeaderTransport.T_BINARY_PROTOCOL)
elif cfg.protocol == "compact":
transport_factory = THeaderTransport.THeaderTransportFactory(
THeaderTransport.T_COMPACT_PROTOCOL)
else:
sys.exit("header transport cannot be used with protocol {0}".format(cfg.protocol))
else:
sys.exit("unknown transport {0}".format(cfg.transport))
if cfg.protocol == "binary":
protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()
elif cfg.protocol == "compact":
protocol_factory = TCompactProtocol.TCompactProtocolFactory()
elif cfg.protocol == "json":
protocol_factory = TJSONProtocol.TJSONProtocolFactory()
else:
sys.exit("unknown protocol {0}".format(cfg.protocol))
print(
"Thrift Server listening on {0} for {1} {2} requests".format(
cfg.addr, cfg.transport, cfg.protocol))
if cfg.service is not None:
print("Thrift Server service name {0}".format(cfg.service))
if cfg.response == "idl-exception":
print("Thrift Server will throw IDL exceptions when defined")
elif cfg.response == "exception":
print("Thrift Server will throw Thrift exceptions for all messages")
server = TServer.TThreadedServer(processor, transport, transport_factory, protocol_factory)
try:
server.serve()
except KeyboardInterrupt:
print
if __name__ == "__main__":
logging.basicConfig()
parser = argparse.ArgumentParser(description="Thrift server to match client.py.")
parser.add_argument(
"-a",
"--addr",
metavar="ADDR",
dest="addr",
default=":0",
help="Listener address for server in the form host:port. The host is optional. If --unix"
+ " is set, the address is the socket name.",
)
parser.add_argument(
"-m",
"--multiplex",
metavar="SERVICE",
dest="service",
help="Enable service multiplexing and set the service name.",
)
parser.add_argument(
"-p",
"--protocol",
help="Selects a protocol.",
dest="protocol",
default="binary",
choices=["binary", "compact", "json"],
)
parser.add_argument(
"-r",
"--response",
dest="response",
default="success",
choices=["success", "idl-exception", "exception"],
help="Controls how the server responds to requests",
)
parser.add_argument(
"-t",
"--transport",
help="Selects a transport.",
dest="transport",
default="framed",
choices=["framed", "unframed", "header"],
)
parser.add_argument(
"-u",
"--unix",
dest="unix",
action="store_true",
)
cfg = parser.parse_args()
try:
main(cfg)
except Thrift.TException as tx:
sys.exit("Thrift exception: {0}".format(tx.message))
|
a216a7f4faaf25409c9d57d6938bdf69d91bfd05
|
140e90b1cdc47650cc88637d79e122d0695a8105
|
/pyrender/font.py
|
5ac530d7b949f50314a0d9cf5d744bedcace0571
|
[
"MIT"
] |
permissive
|
mmatl/pyrender
|
2d7a92cab4e134a65f98990549d98ef3c63aa668
|
a59963ef890891656fd17c90e12d663233dcaa99
|
refs/heads/master
| 2023-07-10T17:31:50.861253
| 2022-04-30T19:40:43
| 2022-04-30T19:40:43
| 166,606,982
| 1,166
| 224
|
MIT
| 2023-04-06T08:09:08
| 2019-01-20T00:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 8,225
|
py
|
font.py
|
"""Font texture loader and processor.
Author: Matthew Matl
"""
import freetype
import numpy as np
import os
import OpenGL
from OpenGL.GL import *
from .constants import TextAlign, FLOAT_SZ
from .texture import Texture
from .sampler import Sampler
class FontCache(object):
"""A cache for fonts.
"""
def __init__(self, font_dir=None):
self._font_cache = {}
self.font_dir = font_dir
if self.font_dir is None:
base_dir, _ = os.path.split(os.path.realpath(__file__))
self.font_dir = os.path.join(base_dir, 'fonts')
def get_font(self, font_name, font_pt):
# If it's a file, load it directly, else, try to load from font dir.
if os.path.isfile(font_name):
font_filename = font_name
_, font_name = os.path.split(font_name)
font_name, _ = os.path.split(font_name)
else:
font_filename = os.path.join(self.font_dir, font_name) + '.ttf'
cid = OpenGL.contextdata.getContext()
key = (cid, font_name, int(font_pt))
if key not in self._font_cache:
self._font_cache[key] = Font(font_filename, font_pt)
return self._font_cache[key]
def clear(self):
for key in self._font_cache:
self._font_cache[key].delete()
self._font_cache = {}
class Character(object):
"""A single character, with its texture and attributes.
"""
def __init__(self, texture, size, bearing, advance):
self.texture = texture
self.size = size
self.bearing = bearing
self.advance = advance
class Font(object):
"""A font object.
Parameters
----------
font_file : str
The file to load the font from.
font_pt : int
The height of the font in pixels.
"""
def __init__(self, font_file, font_pt=40):
self.font_file = font_file
self.font_pt = int(font_pt)
self._face = freetype.Face(font_file)
self._face.set_pixel_sizes(0, font_pt)
self._character_map = {}
for i in range(0, 128):
# Generate texture
face = self._face
face.load_char(chr(i))
buf = face.glyph.bitmap.buffer
src = (np.array(buf) / 255.0).astype(np.float32)
src = src.reshape((face.glyph.bitmap.rows,
face.glyph.bitmap.width))
tex = Texture(
sampler=Sampler(
magFilter=GL_LINEAR,
minFilter=GL_LINEAR,
wrapS=GL_CLAMP_TO_EDGE,
wrapT=GL_CLAMP_TO_EDGE
),
source=src,
source_channels='R',
)
character = Character(
texture=tex,
size=np.array([face.glyph.bitmap.width,
face.glyph.bitmap.rows]),
bearing=np.array([face.glyph.bitmap_left,
face.glyph.bitmap_top]),
advance=face.glyph.advance.x
)
self._character_map[chr(i)] = character
self._vbo = None
self._vao = None
@property
def font_file(self):
"""str : The file the font was loaded from.
"""
return self._font_file
@font_file.setter
def font_file(self, value):
self._font_file = value
@property
def font_pt(self):
"""int : The height of the font in pixels.
"""
return self._font_pt
@font_pt.setter
def font_pt(self, value):
self._font_pt = int(value)
def _add_to_context(self):
self._vao = glGenVertexArrays(1)
glBindVertexArray(self._vao)
self._vbo = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self._vbo)
glBufferData(GL_ARRAY_BUFFER, FLOAT_SZ * 6 * 4, None, GL_DYNAMIC_DRAW)
glEnableVertexAttribArray(0)
glVertexAttribPointer(
0, 4, GL_FLOAT, GL_FALSE, 4 * FLOAT_SZ, ctypes.c_void_p(0)
)
glBindVertexArray(0)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
for c in self._character_map:
ch = self._character_map[c]
if not ch.texture._in_context():
ch.texture._add_to_context()
def _remove_from_context(self):
for c in self._character_map:
ch = self._character_map[c]
ch.texture.delete()
if self._vao is not None:
glDeleteVertexArrays(1, [self._vao])
glDeleteBuffers(1, [self._vbo])
self._vao = None
self._vbo = None
def _in_context(self):
return self._vao is not None
def _bind(self):
glBindVertexArray(self._vao)
def _unbind(self):
glBindVertexArray(0)
def delete(self):
self._unbind()
self._remove_from_context()
def render_string(self, text, x, y, scale=1.0,
align=TextAlign.BOTTOM_LEFT):
"""Render a string to the current view buffer.
Note
----
Assumes correct shader program already bound w/ uniforms set.
Parameters
----------
text : str
The text to render.
x : int
Horizontal pixel location of text.
y : int
Vertical pixel location of text.
scale : int
Scaling factor for text.
align : int
One of the TextAlign options which specifies where the ``x``
and ``y`` parameters lie on the text. For example,
:attr:`.TextAlign.BOTTOM_LEFT` means that ``x`` and ``y`` indicate
the position of the bottom-left corner of the textbox.
"""
glActiveTexture(GL_TEXTURE0)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glDisable(GL_DEPTH_TEST)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
self._bind()
# Determine width and height of text relative to x, y
width = 0.0
height = 0.0
for c in text:
ch = self._character_map[c]
height = max(height, ch.bearing[1] * scale)
width += (ch.advance >> 6) * scale
# Determine offsets based on alignments
xoff = 0
yoff = 0
if align == TextAlign.BOTTOM_RIGHT:
xoff = -width
elif align == TextAlign.BOTTOM_CENTER:
xoff = -width / 2.0
elif align == TextAlign.TOP_LEFT:
yoff = -height
elif align == TextAlign.TOP_RIGHT:
yoff = -height
xoff = -width
elif align == TextAlign.TOP_CENTER:
yoff = -height
xoff = -width / 2.0
elif align == TextAlign.CENTER:
xoff = -width / 2.0
yoff = -height / 2.0
elif align == TextAlign.CENTER_LEFT:
yoff = -height / 2.0
elif align == TextAlign.CENTER_RIGHT:
xoff = -width
yoff = -height / 2.0
x += xoff
y += yoff
ch = None
for c in text:
ch = self._character_map[c]
xpos = x + ch.bearing[0] * scale
ypos = y - (ch.size[1] - ch.bearing[1]) * scale
w = ch.size[0] * scale
h = ch.size[1] * scale
vertices = np.array([
[xpos, ypos, 0.0, 0.0],
[xpos + w, ypos, 1.0, 0.0],
[xpos + w, ypos + h, 1.0, 1.0],
[xpos + w, ypos + h, 1.0, 1.0],
[xpos, ypos + h, 0.0, 1.0],
[xpos, ypos, 0.0, 0.0],
], dtype=np.float32)
ch.texture._bind()
glBindBuffer(GL_ARRAY_BUFFER, self._vbo)
glBufferData(
GL_ARRAY_BUFFER, FLOAT_SZ * 6 * 4, vertices, GL_DYNAMIC_DRAW
)
# TODO MAKE THIS MORE EFFICIENT, lgBufferSubData is broken
# glBufferSubData(
# GL_ARRAY_BUFFER, 0, 6 * 4 * FLOAT_SZ,
# np.ascontiguousarray(vertices.flatten)
# )
glDrawArrays(GL_TRIANGLES, 0, 6)
x += (ch.advance >> 6) * scale
self._unbind()
if ch:
ch.texture._unbind()
|
264169d936e993af74b52d178786c3046a64c1ca
|
86092e7c45eeb677576edab301cebb592a8baa31
|
/analyze-dns-nsid.py
|
d46e5db122a06dccbb3fd3df1a776cc5f1d9594a
|
[] |
no_license
|
RIPE-Atlas-Community/ripe-atlas-community-contrib
|
73b745aec4845452ba7eaa30932e7bee934171e8
|
2fb7d7e8143ee3884d0ad556612ba759ee414955
|
refs/heads/master
| 2023-06-08T18:13:14.845089
| 2023-06-06T14:08:08
| 2023-06-06T14:08:08
| 8,092,696
| 144
| 31
| null | 2021-11-25T14:15:35
| 2013-02-08T11:52:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,674
|
py
|
analyze-dns-nsid.py
|
#!/usr/bin/env python
""" Python code to analyze RIPE Atlas UDM (User-Defined Measurements)
results. This one is for DNS NSID queries (find the identity of a name
server). It uses a RIPE Atlas JSON file as input or a reference to a
public measurement.
Stephane Bortzmeyer <bortzmeyer@nic.fr>
"""
import json
import sys
import base64
import collections
import urllib
import getopt
# DNS Python http://www.dnspython.org/
import dns.message
class Items:
def __init__(self):
self.num = 0
def by_num(l, r):
return -cmp(ns_names[l].num, ns_names[r].num)
def usage(msg=None):
print >>sys.stderr, "Usage: %s [-f file] [-d measurement]" % sys.argv[0]
if msg is not None:
print >>sys.stderr, msg
# Try measurement #1008591 for instance
results = None
try:
optlist, args = getopt.getopt (sys.argv[1:], "f:d:", ["file=","data-api="])
for option, value in optlist:
if option == "--file" or option == "-f":
results = json.loads(open(value).read())
elif option == "--data-api" or option == "-d":
url = "https://atlas.ripe.net/api/v1/measurement/" + value + "/result/"
results = json.load(urllib.urlopen(url))
else:
usage("Unknown option %s" % option)
sys.exit(1)
except getopt.error, reason:
usage(reason)
sys.exit(1)
if results is None:
if len(args) == 1:
results = json.loads(open(args[0]).read())
else:
usage("One of -f or -d is mandatory")
sys.exit(1)
net_failures = 0
nsid_failures = 0
successes = 0
probes = collections.defaultdict(Items)
ns_names = collections.defaultdict(Items)
for result in results:
probes[result['prb_id']].num += 1
if not result.has_key('result'):
net_failures += 1
else:
answer = result['result']['abuf'] + "=="
content = base64.b64decode(answer)
msg = dns.message.from_wire(content)
ns_name = None
for opt in msg.options:
if opt.otype == dns.edns.NSID:
ns_name = opt.data
ns_names[ns_name].num += 1
if ns_name is not None:
successes += 1
else:
nsid_failures += 1
if len(results) != len(probes):
print >>sys.stderr, "%i results but %i probes" % (len(results), len(probes))
sys.exit(1)
print "%i probes, %i successes, %i network failures, %i NSID failures" % (len(probes),
successes, net_failures, nsid_failures)
names = ns_names.keys()
names.sort(by_num)
for ns in names:
print "%s: %i (%.0f %%)" % (ns, ns_names[ns].num, ns_names[ns].num*100.0/successes)
|
3b07765f8e6ddf1333145a850f391f6145cf704b
|
e31f84c20af7be8646f03faf22ac55ad041444a3
|
/feature_engine/selection/single_feature_performance.py
|
35e95336fb184a8700afeaf77f4863047a181b9b
|
[
"BSD-3-Clause"
] |
permissive
|
feature-engine/feature_engine
|
564aa2f298bb1beb0606bd5d51261b4d1085a8df
|
3343305a01d1acfeff846b65d33a5686c6e8c84f
|
refs/heads/main
| 2023-08-07T09:19:24.315277
| 2023-06-08T06:27:45
| 2023-06-08T06:27:45
| 163,630,824
| 874
| 105
|
BSD-3-Clause
| 2023-09-13T14:02:23
| 2018-12-31T01:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 8,211
|
py
|
single_feature_performance.py
|
import warnings
from typing import List, Union
import pandas as pd
from sklearn.model_selection import cross_validate
from feature_engine._docstrings.fit_attributes import (
_feature_names_in_docstring,
_n_features_in_docstring,
)
from feature_engine._docstrings.init_parameters.selection import (
_confirm_variables_docstring,
)
from feature_engine._docstrings.methods import _fit_transform_docstring
from feature_engine._docstrings.substitute import Substitution
from feature_engine.dataframe_checks import check_X_y
from feature_engine._docstrings.selection._docstring import (
_cv_docstring,
_estimator_docstring,
_features_to_drop_docstring,
_fit_docstring,
_get_support_docstring,
_initial_model_performance_docstring,
_scoring_docstring,
_threshold_docstring,
_transform_docstring,
_variables_attribute_docstring,
_variables_numerical_docstring,
)
from feature_engine.selection.base_selector import BaseSelector
from feature_engine.tags import _return_tags
from feature_engine.variable_handling._init_parameter_checks import (
_check_init_parameter_variables,
)
from feature_engine.variable_handling.variable_type_selection import (
find_or_check_numerical_variables,
)
Variables = Union[None, int, str, List[Union[str, int]]]
@Substitution(
estimator=_estimator_docstring,
scoring=_scoring_docstring,
threshold=_threshold_docstring,
cv=_cv_docstring,
variables=_variables_numerical_docstring,
confirm_variables=_confirm_variables_docstring,
initial_model_performance_=_initial_model_performance_docstring,
features_to_drop_=_features_to_drop_docstring,
variables_=_variables_attribute_docstring,
feature_names_in_=_feature_names_in_docstring,
n_features_in_=_n_features_in_docstring,
fit=_fit_docstring,
transform=_transform_docstring,
fit_transform=_fit_transform_docstring,
get_support=_get_support_docstring,
)
class SelectBySingleFeaturePerformance(BaseSelector):
"""
SelectBySingleFeaturePerformance() selects features based on the performance
of a machine learning model trained utilising a single feature. In other
words, it trains a machine learning model for every single feature, then determines
each model's performance. If the performance of the model is greater than a user
specified threshold, then the feature is retained, otherwise removed.
The models are trained on each individual features using cross-validation.
The performance metric to evaluate and the machine learning model to train are
specified by the user.
More details in the :ref:`User Guide <single_feat_performance>`.
Parameters
----------
{estimator}
{variables}
{scoring}
{threshold}
{cv}
{confirm_variables}
Attributes
----------
{features_to_drop_}
feature_performance_:
Dictionary with the single feature model performance per feature.
{variables_}
{feature_names_in_}
{n_features_in_}
Methods
-------
{fit}
{fit_transform}
{get_support}
{transform}
References
----------
Selection based on single feature performance was used in Credit Risk modelling as
discussed in the following talk at PyData London 2017:
.. [1] Galli S. "Machine Learning in Financial Risk Assessment".
https://www.youtube.com/watch?v=KHGGlozsRtA
Examples
--------
>>> import pandas as pd
>>> from sklearn.ensemble import RandomForestClassifier
>>> from feature_engine.selection import SelectBySingleFeaturePerformance
>>> X = pd.DataFrame(dict(x1 = [1000,2000,1000,1000,2000,3000],
>>> x2 = [2,4,3,1,2,2],
>>> x3 = [1,1,1,0,0,0],
>>> x4 = [1,2,1,1,0,1],
>>> x5 = [1,1,1,1,1,1]))
>>> y = pd.Series([1,0,0,1,1,0])
>>> sfp = SelectBySingleFeaturePerformance(
>>> RandomForestClassifier(random_state=42),
>>> cv=2)
>>> sfp.fit_transform(X, y)
x2 x3
0 2 1
1 4 1
2 3 1
3 1 0
4 2 0
5 2 0
"""
def __init__(
self,
estimator,
scoring: str = "roc_auc",
cv=3,
threshold: Union[int, float, None] = None,
variables: Variables = None,
confirm_variables: bool = False,
):
if threshold:
if not isinstance(threshold, (int, float)):
raise ValueError("threshold can only be integer, float or None")
if scoring == "roc_auc" and (threshold < 0.5 or threshold > 1):
raise ValueError(
"roc-auc score should vary between 0.5 and 1. Pick a "
"threshold within this interval."
)
if scoring == "r2" and (threshold < 0 or threshold > 1):
raise ValueError(
"r2 takes values between -1 and 1. To select features the "
"transformer considers the absolute value. Pick a threshold within "
"0 and 1."
)
super().__init__(confirm_variables)
self.variables = _check_init_parameter_variables(variables)
self.estimator = estimator
self.scoring = scoring
self.threshold = threshold
self.cv = cv
def fit(self, X: pd.DataFrame, y: pd.Series):
"""
Select features.
Parameters
----------
X: pandas dataframe of shape = [n_samples, n_features]
The input dataframe
y: array-like of shape (n_samples)
Target variable. Required to train the estimator.
"""
# check input dataframe
X, y = check_X_y(X, y)
# If required exclude variables that are not in the input dataframe
self._confirm_variables(X)
# find numerical variables or check variables entered by user
self.variables_ = find_or_check_numerical_variables(X, self.variables_)
if len(self.variables_) == 1 and self.threshold is None:
raise ValueError(
"When evaluating a single feature you need to manually set a value "
"for the threshold. "
f"The transformer is evaluating the performance of {self.variables_} "
f"and the threshold was left to {self.threshold} when initializing "
f"the transformer."
)
self.feature_performance_ = {}
# train a model for every feature and store the performance
for feature in self.variables_:
model = cross_validate(
self.estimator,
X[feature].to_frame(),
y,
cv=self.cv,
return_estimator=False,
scoring=self.scoring,
)
self.feature_performance_[feature] = model["test_score"].mean()
# select features
if not self.threshold:
threshold = pd.Series(self.feature_performance_).mean()
else:
threshold = self.threshold
self.features_to_drop_ = [
f
for f in self.feature_performance_.keys()
if self.feature_performance_[f] < threshold
]
# check we are not dropping all the columns in the df
if len(self.features_to_drop_) == len(X.columns):
warnings.warn("All features will be dropped, try changing the threshold.")
# save input features
self._get_feature_names_in(X)
return self
def _more_tags(self):
tags_dict = _return_tags()
tags_dict["variables"] = "numerical"
tags_dict["requires_y"] = True
# add additional test that fails
tags_dict["_xfail_checks"][
"check_parameters_default_constructible"
] = "transformer has 1 mandatory parameter"
tags_dict["_xfail_checks"]["check_estimators_nan_inf"] = "transformer allows NA"
msg = "transformers need more than 1 feature to work"
tags_dict["_xfail_checks"]["check_fit2d_1feature"] = msg
return tags_dict
|
253ea4fc6e865174cfbc94a53897191fdf458b92
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/ai/mmdetection/configs/fcos/fcos_r50-caffe_fpn_gn-head-center_1x_coco.py
|
9e4eb1d5981761fab8fe0bb876ff7ef243ac31f9
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
fcos_r50-caffe_fpn_gn-head-center_1x_coco.py
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(bbox_head=dict(center_sampling=True, center_sample_radius=1.5))
|
dfecf2a4e9a12348976b9fcf08c7b39dde67ece7
|
5a5bde743ddbcfa28dbd71dbd8fe1835010763df
|
/functest/func_obj_loader_consistency.py
|
468a4bc25c90e48729084dbd0a6c1a523527b934
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lightmetrica/lightmetrica-v3
|
fb85230c92dac12e754ca570cd54d45b5b6e457c
|
70601dbef13a513df032911d47f790791671a8e0
|
refs/heads/master
| 2021-10-29T08:13:40.140577
| 2021-10-22T10:50:39
| 2021-10-22T10:50:39
| 189,633,321
| 105
| 14
|
NOASSERTION
| 2021-10-20T14:34:34
| 2019-05-31T17:27:55
|
C++
|
UTF-8
|
Python
| false
| false
| 2,674
|
py
|
func_obj_loader_consistency.py
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.3.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Checking consistency of OBJ loader
# %load_ext autoreload
# %autoreload 2
import lmenv
env = lmenv.load('.lmenv')
import os
import imageio
import pandas as pd
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import lmscene
import lightmetrica as lm
# %load_ext lightmetrica_jupyter
lm.init()
lm.log.init('jupyter')
lm.progress.init('jupyter')
lm.info()
lm.comp.load_plugin(os.path.join(env.bin_path, 'accel_embree'))
lm.comp.load_plugin(os.path.join(env.bin_path, 'objloader_tinyobjloader'))
def build_and_render(scene_name):
lm.reset()
accel = lm.load_accel('accel', 'embree')
scene = lm.load_scene('scene', 'default', accel=accel)
lmscene.load(scene, env.scene_path, scene_name)
scene.build()
film = lm.load_film('film_output', 'bitmap', w=1920, h=1080)
renderer = lm.load_renderer('renderer', 'raycast', scene=scene, output=film)
renderer.render()
return np.copy(film.buffer())
objloaders = ['tinyobjloader']
scene_names = lmscene.scenes_small()
def rmse_pixelwised(img1, img2):
return np.sqrt(np.sum((img1 - img2) ** 2, axis=2) / 3)
for scene_name in scene_names:
# Reference
lm.objloader.init('simple')
ref = build_and_render(scene_name)
# Visualize reference
f = plt.figure(figsize=(15,15))
ax = f.add_subplot(111)
ax.imshow(np.clip(np.power(ref,1/2.2),0,1), origin='lower')
ax.set_title('{}, simple'.format(scene_name))
plt.show()
# Check consistency with other loaders
for objloader in objloaders:
# Render
lm.objloader.init(objloader, {})
img = build_and_render(scene_name)
diff = rmse_pixelwised(ref, img)
# Visualize
f = plt.figure(figsize=(15,15))
ax = f.add_subplot(111)
ax.imshow(np.clip(np.power(img,1/2.2),0,1), origin='lower')
ax.set_title('{}, {}'.format(scene_name, objloader))
plt.show()
# Visualize the difference image
f = plt.figure(figsize=(15,15))
ax = f.add_subplot(111)
im = ax.imshow(diff, origin='lower')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
ax.set_title('{}, simple vs. {}'.format(scene_name, objloader))
plt.show()
|
b7e191d2a4dd7d36e4d4d5aa675f8a93a28846e4
|
2212a32833776a5d5d2164d8efd11bd18bd3f768
|
/tf_agents/train/ppo_learner.py
|
2dd1a5ba14e954246b2bf86fcc009899db8b1ead
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/agents
|
f39805fb98ef9af712dcaff3ba49e1ac6d42804b
|
eca1093d3a047e538f17f6ab92ab4d8144284f23
|
refs/heads/master
| 2023-08-14T04:56:30.774797
| 2023-08-02T17:43:44
| 2023-08-02T17:44:09
| 157,936,206
| 2,755
| 848
|
Apache-2.0
| 2023-07-26T02:35:32
| 2018-11-17T00:29:12
|
Python
|
UTF-8
|
Python
| false
| false
| 14,808
|
py
|
ppo_learner.py
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PPO Learner implementation."""
from typing import Callable, Optional, Text
import gin
import tensorflow as tf
from tf_agents.agents import tf_agent
from tf_agents.agents.ppo import ppo_agent
from tf_agents.networks import utils
from tf_agents.specs import tensor_spec
from tf_agents.train import learner
from tf_agents.typing import types
from tf_agents.utils import common
from tf_agents.utils import nest_utils
@gin.configurable
class PPOLearner(object):
"""Manages all the learning details needed when training an PPO.
These include:
* Using distribution strategies correctly
* Summaries
* Checkpoints
* Minimizing entering/exiting TF context:
Especially in the case of TPUs scheduling a single TPU program to
perform multiple train steps is critical for performance.
* Generalizes the train call to be done correctly across CPU, GPU, or TPU
executions managed by DistributionStrategies. This uses `strategy.run` and
then makes sure to do a reduce operation over the `LossInfo` returned by
the agent.
"""
def __init__(
self,
root_dir: Text,
train_step: tf.Variable,
agent: ppo_agent.PPOAgent,
experience_dataset_fn: Callable[..., tf.data.Dataset],
normalization_dataset_fn: Callable[..., tf.data.Dataset],
num_samples: int,
num_epochs: int = 1,
minibatch_size: Optional[int] = None,
shuffle_buffer_size: Optional[int] = None,
after_train_strategy_step_fn: Optional[
Callable[[types.NestedTensor, tf_agent.LossInfo], None]
] = None,
triggers: Optional[Callable[..., None]] = None,
checkpoint_interval: int = 100000,
summary_interval: int = 1000,
use_kwargs_in_agent_train: bool = False,
strategy: Optional[tf.distribute.Strategy] = None,
):
"""Initializes a PPOLearner instance.
```python
agent = ppo_agent.PPOAgent(...,
compute_value_and_advantage_in_train=False,
# Skips updating normalizers in the agent, as it's handled in the learner.
update_normalizers_in_train=False)
# train_replay_buffer and normalization_replay_buffer point to two Reverb
# tables that are synchronized. Sampling is done in a FIFO fashion.
def experience_dataset_fn():
return train_replay_buffer.as_dataset(sample_batch_size,
sequence_preprocess_fn=agent.preprocess_sequence)
def normalization_dataset_fn():
return normalization_replay_buffer.as_dataset(sample_batch_size,
sequence_preprocess_fn=agent.preprocess_sequence)
learner = PPOLearner(..., agent, experience_dataset_fn,
normalization_dataset_fn)
learner.run()
```
Args:
root_dir: Main directory path where checkpoints, saved_models, and
summaries will be written to.
train_step: a scalar tf.int64 `tf.Variable` which will keep track of the
number of train steps. This is used for artifacts created like
summaries, or outputs in the root_dir.
agent: `ppo_agent.PPOAgent` instance to train with. Note that
update_normalizers_in_train should be set to `False`, otherwise a
ValueError will be raised. We do not update normalizers in the agent
again because we already update it in the learner. When mini batching is
enabled, compute_value_and_advantage_in_train should be set to False,
and preprocessing should be done as part of the data pipeline as part of
`replay_buffer.as_dataset`.
experience_dataset_fn: a function that will create an instance of a
tf.data.Dataset used to sample experience for training. Each element in
the dataset is a (Trajectory, SampleInfo) pair. Note that each
Trajectory in itself might represent an episode (variable length) or a
collection of transitions depending on how the replay buffer and hence
the `experience_dataset_fn` was set up.
normalization_dataset_fn: a function that will create an instance of a
tf.data.Dataset used for normalization. This dataset is often from a
separate reverb table that is synchronized with the table used in
experience_dataset_fn. Each element in the dataset is a (Trajectory,
SampleInfo) pair. Note that each Trajectory in itself might represent an
episode (variable length) or a collection of transitions depending on
how the replay buffer and hence the `experience_dataset_fn` was set up.
num_samples: The number of samples used for training and normalization. A
sample here means one reading of the experience_dataset_fn which in turn
might have more than 1 Trajectories (For ex. if this is coming from a
`reverb_replay_buffer` with `sample_batch_size` = 2, then one sample
means 2 trajectories, and the trainer will pull in num_samples * 2
trajectories for one iteration of training). If fewer than this amount
of batches exists in the dataset, the learner will wait for more data to
be added, or until the reverb timeout is reached.
num_epochs: The number of iterations to go through the same sequences.
minibatch_size: The minibatch size. If set, the input data set will be
flattened, and the dataset used for training is shaped `[minibatch_size,
1, ...]`, where each element in the dataset represents a single step. If
None, full sequences will be fed into the agent. Please set this
parameter to None for RNN networks which requires full sequences.
shuffle_buffer_size: The buffer size for shuffling the trajectories before
splitting them into mini batches. Only required when mini batch learning
is enabled (minibatch_size is set). Otherwise it is ignored. Commonly
set to a number 1-3x the episode length of your environment.
after_train_strategy_step_fn: (Optional) callable of the form `fn(sample,
loss)` which can be used for example to update priorities in a replay
buffer where sample is pulled from the `experience_iterator` and loss is
a `LossInfo` named tuple returned from the agent. This is called after
every train step. It runs using `strategy.run(...)`.
triggers: List of callables of the form `trigger(train_step)`. After every
`run` call every trigger is called with the current `train_step` value
as an np scalar.
checkpoint_interval: Number of train steps in between checkpoints. Note
these are placed into triggers and so a check to generate a checkpoint
only occurs after every `run` call. Set to -1 to disable (this is not
recommended, because it means that if the pipeline gets preempted, all
previous progress is lost). This only takes care of the checkpointing
the training process. Policies must be explicitly exported through
triggers.
summary_interval: Number of train steps in between summaries. Note these
are placed into triggers and so a check to generate a checkpoint only
occurs after every `run` call.
use_kwargs_in_agent_train: If True the experience from the replay buffer
is passed into the agent as kwargs. This requires samples from the RB to
be of the form `dict(experience=experience, kwarg1=kwarg1, ...)`. This
is useful if you have an agent with a custom argspec.
strategy: (Optional) `tf.distribute.Strategy` to use during training.
Raises:
ValueError:mini batching is enabled, but shuffle_buffer_size isn't
provided.
ValueError: minibatch_size is passed in for RNN networks. RNNs require
full sequences.
ValueError:mini batching is enabled, but
agent._compute_value_and_advantage_in_train is set to `True`.
ValueError: agent.update_normalizers_in_train or is set to `True`. The
learner already updates the normalizers, so no need to update again in
the agent.
"""
if minibatch_size and shuffle_buffer_size is None:
raise ValueError(
'shuffle_buffer_size must be provided if minibatch_size is not None.'
)
if minibatch_size and (
agent._actor_net.state_spec or agent._value_net.state_spec
):
raise ValueError('minibatch_size must be set to None for RNN networks.')
if minibatch_size and agent._compute_value_and_advantage_in_train:
raise ValueError(
'agent.compute_value_and_advantage_in_train should be set to False '
'when mini batching is used.'
)
if agent.update_normalizers_in_train:
raise ValueError(
'agent.update_normalizers_in_train should be set to False when '
'PPOLearner is used.'
)
strategy = strategy or tf.distribute.get_strategy()
self._agent = agent
self._minibatch_size = minibatch_size
self._shuffle_buffer_size = shuffle_buffer_size
self._num_epochs = num_epochs
self._experience_dataset_fn = experience_dataset_fn
self._normalization_dataset_fn = normalization_dataset_fn
self._num_samples = num_samples
self._generic_learner = learner.Learner(
root_dir,
train_step,
agent,
experience_dataset_fn=None,
after_train_strategy_step_fn=after_train_strategy_step_fn,
triggers=triggers,
checkpoint_interval=checkpoint_interval,
summary_interval=summary_interval,
use_kwargs_in_agent_train=use_kwargs_in_agent_train,
strategy=strategy,
)
self.num_replicas = strategy.num_replicas_in_sync
self._create_datasets(strategy)
self.num_frames_for_training = tf.Variable(0, dtype=tf.int32)
def _create_datasets(self, strategy):
"""Create the training dataset and iterator."""
def _make_dataset(_):
train_dataset = self._experience_dataset_fn().take(self._num_samples)
# We take the current batches, repeat for `num_epochs` times and exhaust
# this data in the current learner run. The next time learner runs, new
# batches of data will be sampled, cached and repeated.
# This is enabled by the `Counter().flat_map()` trick below.
train_dataset = train_dataset.cache().repeat(self._num_epochs)
if self._minibatch_size:
def squash_dataset_element(sequence, info):
return tf.nest.map_structure(
utils.BatchSquash(2).flatten, (sequence, info)
)
# We unbatch the dataset shaped [B, T, ...] to a new dataset that
# contains individual elements.
# Note that we unbatch across the time dimension, which could result
# in mini batches that contain subsets from more than one sequences.
# PPO agent can handle mini batches across episode boundaries.
train_dataset = train_dataset.map(squash_dataset_element).unbatch()
train_dataset = train_dataset.shuffle(self._shuffle_buffer_size)
train_dataset = train_dataset.batch(1, drop_remainder=True)
train_dataset = train_dataset.batch(
self._minibatch_size, drop_remainder=True
)
return train_dataset
def make_dataset(_):
return tf.data.experimental.Counter().flat_map(_make_dataset)
def _make_normalization_dataset(_):
return self._normalization_dataset_fn()
self._normalization_dataset = tf.data.experimental.Counter().flat_map(
_make_normalization_dataset
)
self._normalization_iterator = iter(self._normalization_dataset)
with strategy.scope():
if strategy.num_replicas_in_sync > 1:
self._train_dataset = strategy.distribute_datasets_from_function(
make_dataset
)
else:
self._train_dataset = make_dataset(0)
self._train_iterator = iter(self._train_dataset)
def run(self, parallel_iterations=10):
"""Train `num_samples` batches repeating for `num_epochs` of iterations.
Args:
parallel_iterations: Maximum number of train iterations to allow running
in parallel. This value is forwarded directly to the training tf.while
loop.
Returns:
The total loss computed before running the final step.
"""
num_frames = self._update_normalizers(self._normalization_iterator)
self.num_frames_for_training.assign(num_frames)
if self._minibatch_size:
num_total_batches = (
int(self.num_frames_for_training.numpy() / self._minibatch_size)
* self._num_epochs
)
else:
num_total_batches = self._num_samples * self._num_epochs
iterations = int(num_total_batches / self.num_replicas)
if iterations == 0:
raise ValueError(
'Cannot distribute {} batches across {} replicas. '
'Please increase PPOLearner.num_samples. See PPOLeaner.'
'num_samples documentation for more details.'.format(
num_total_batches, self.num_replicas
)
)
loss_info = self._generic_learner.run(
iterations,
self._train_iterator,
parallel_iterations=parallel_iterations,
)
return loss_info
@common.function(autograph=True)
def _update_normalizers(self, iterator):
"""Update the normalizers and count the total number of frames."""
reward_spec = tensor_spec.TensorSpec(shape=[], dtype=tf.float32)
def _update(traj):
self._agent.update_observation_normalizer(traj.observation)
self._agent.update_reward_normalizer(traj.reward)
if traj.reward.shape:
outer_shape = nest_utils.get_outer_shape(traj.reward, reward_spec)
batch_size = outer_shape[0]
if len(outer_shape) > 1:
batch_size *= outer_shape[1]
else:
batch_size = 1
return batch_size
num_frames = 0
traj, _ = next(iterator)
num_frames += _update(traj)
for _ in tf.range(1, self._num_samples):
traj, _ = next(iterator)
num_frames += _update(traj)
return num_frames
@property
def train_step_numpy(self):
"""The current train_step.
Returns:
The current `train_step`. Note this will return a scalar numpy array which
holds the `train_step` value when this was called.
"""
return self._generic_learner.train_step_numpy
|
88c35e0742afa10300280d070fd5754cebb38f24
|
2d05050d0ada29f7680b4df20c10bb85b0530e45
|
/tests/python/relay/aot/test_aot_create_function_metadata.py
|
80137bd23f0c39de4ad5dc1d297ecf513ace516a
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
apache/tvm
|
87cb617f9a131fa44e1693303aaddf70e7a4c403
|
d75083cd97ede706338ab413dbc964009456d01b
|
refs/heads/main
| 2023-09-04T11:24:26.263032
| 2023-09-04T07:26:00
| 2023-09-04T07:26:00
| 70,746,484
| 4,575
| 1,903
|
Apache-2.0
| 2023-09-14T19:06:33
| 2016-10-12T22:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 13,086
|
py
|
test_aot_create_function_metadata.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long,missing-class-docstring,missing-module-docstring,missing-function-docstring,no-self-argument,unused-argument,invalid-name
import numpy as np
import tvm
import tvm.testing
from tvm.script import tir as T
from tvm.runtime.ndarray import array
from tvm.relay.backend.aot import CreateFunctionMetadata
from tvm.ir.memory_pools import AllocatedPoolInfo, ConstantPoolInfo, WorkspacePoolInfo, ConstantInfo
def _check_function_metadata(function_metadata, expected_infos):
for symbol, expected_info in expected_infos.items():
func_info = function_metadata[symbol]
# Check workspace_sizes
key, value = func_info.workspace_sizes.items()[0]
assert str(key) == expected_info["target"]
assert value == expected_info["workspace_sizes"]
# Check io_sizes
key, value = func_info.io_sizes.items()[0]
assert str(key) == expected_info["target"]
assert value == expected_info["io_sizes"]
# Check constant_sizes
key, value = func_info.constant_sizes.items()[0]
assert str(key) == expected_info["target"]
assert value == expected_info["constant_sizes"]
# Check tir_primfuncs
key, value = func_info.tir_primfuncs.items()[0]
assert str(key) == expected_info["target"]
tvm.ir.assert_structural_equal(value, expected_info["tir_primfuncs"])
def test_create_function_metadata_workspace_allocate_only():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]})})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
sid_3 = T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
sid_1 = T.allocate([140], "int8", "global.workspace")
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_1, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_1, sid_2, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_2, sid_3, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 432,
"io_sizes": 280,
"constant_sizes": 0,
"tir_primfuncs": Module["__tvm_main__"],
}
}
function_metadata = CreateFunctionMetadata(Module, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_constant_allocate_only():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "num_inputs": 1, "num_outputs": 1})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, constant_0, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 0,
"io_sizes": 280,
"constant_sizes": 140,
"tir_primfuncs": Module["__tvm_main__"],
}
}
function_metadata = CreateFunctionMetadata(Module, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_constant_pool_only():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "num_inputs": 1, "num_outputs": 1})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, a_buffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 0,
"io_sizes": 280,
"constant_sizes": 256,
"tir_primfuncs": Module["__tvm_main__"],
}
}
target = Module["__tvm_main__"].attrs["target"]
mod = Module.with_attr(
"pool_args",
[
AllocatedPoolInfo(
ConstantPoolInfo(
"flash",
[target],
[ConstantInfo("a", 0, array(np.array([0])))],
),
256,
),
],
)
function_metadata = CreateFunctionMetadata(mod, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_workspace_pool_only():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "num_inputs": 1, "num_outputs": 1})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, a_buffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 256,
"io_sizes": 280,
"constant_sizes": 0,
"tir_primfuncs": Module["__tvm_main__"],
}
}
target = Module["__tvm_main__"].attrs["target"]
mod = Module.with_attr(
"pool_args",
[
AllocatedPoolInfo(
WorkspacePoolInfo("sram", [target]),
256,
),
],
)
function_metadata = CreateFunctionMetadata(mod, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_all_single_func():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]})})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
sid_3 = T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
sid_1 = T.allocate([140], "int8", "global.workspace")
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_1, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_1, constant_0, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_2, sid_3, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 688,
"io_sizes": 280,
"constant_sizes": 652,
"tir_primfuncs": Module["__tvm_main__"],
}
}
target = Module["__tvm_main__"].attrs["target"]
mod = Module.with_attr(
"pool_args",
[
AllocatedPoolInfo(
ConstantPoolInfo(
"flash",
[target],
[ConstantInfo("a", 0, array(np.array([0])))],
),
512,
),
AllocatedPoolInfo(
WorkspacePoolInfo("sram", [target]),
256,
),
],
)
function_metadata = CreateFunctionMetadata(mod, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_workspace_multi_funcs():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "num_inputs": 1, "num_outputs": 1})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, a_buffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
@T.prim_func
def test_fused_add(a: T.handle, b: T.handle, output: T.handle, device_context_unused: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod_test_fused_add", "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]})})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
b_buffer = T.match_buffer(b, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
sid_0 = T.allocate([140], "int8", "global.workspace")
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("magic", a_buffer.data, b_buffer.data, sid_0, constant_0, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 0,
"io_sizes": 280,
"constant_sizes": 0,
"tir_primfuncs": Module["__tvm_main__"],
},
"test_fused_add": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 144,
"io_sizes": 420,
"constant_sizes": 140,
"tir_primfuncs": Module["test_fused_add"],
},
}
function_metadata = CreateFunctionMetadata(Module, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
if __name__ == "__main__":
tvm.testing.main()
|
4a11b6496ff58f2370bccd8605b97ba5f806ad91
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/olap/ByConity/tests/testflows/rbac/tests/syntax/revoke_role.py
|
0642dd6b0d4e44772e7e66c0fa0b610f7f3fa7cc
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 10,114
|
py
|
revoke_role.py
|
from contextlib import contextmanager
from testflows.core import *
import rbac.helper.errors as errors
from rbac.requirements import *
@TestFeature
@Name("revoke role")
@Args(format_description=False)
def feature(self, node="clickhouse1"):
"""Check revoke query syntax.
```sql
REVOKE [ON CLUSTER cluster_name] [ADMIN OPTION FOR]
role [,...] FROM {user | role | CURRENT_USER} [,...]
| ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...]
```
"""
node = self.context.cluster.node(node)
@contextmanager
def setup(users=2,roles=2):
try:
with Given("I have some users"):
for i in range(users):
node.query(f"CREATE USER OR REPLACE user{i}")
with And("I have some roles"):
for i in range(roles):
node.query(f"CREATE ROLE OR REPLACE role{i}")
yield
finally:
with Finally("I drop the users"):
for i in range(users):
node.query(f"DROP USER IF EXISTS user{i}")
with And("I drop the roles"):
for i in range(roles):
node.query(f"DROP ROLE IF EXISTS role{i}")
with Scenario("I revoke a role from a user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup():
with When("I revoke a role"):
node.query("REVOKE role0 FROM user0")
with Scenario("I revoke a nonexistent role from user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup(1,0):
with When("I revoke nonexistent role from a user"):
exitcode, message = errors.role_not_found_in_disk(name="role0")
node.query("REVOKE role0 FROM user0", exitcode=exitcode, message=message)
# with nonexistent object name, REVOKE assumes type role (treats user0 as role)
with Scenario("I revoke a role from a nonexistent user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup(0,1):
with When("I revoke role from a nonexistent user"):
exitcode, message = errors.role_not_found_in_disk(name="user0")
node.query("REVOKE role0 FROM user0", exitcode=exitcode, message=message)
# with nonexistent object name, REVOKE assumes type role (treats user0 as role)
with Scenario("I revoke a role from ALL EXCEPT nonexistent user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup(0,1):
with When("I revoke role from a nonexistent user"):
exitcode, message = errors.role_not_found_in_disk(name="user0")
node.query("REVOKE role0 FROM ALL EXCEPT user0", exitcode=exitcode, message=message)
with Scenario("I revoke a nonexistent role from a nonexistent user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup(0,0):
with When("I revoke nonexistent role from a nonexistent user"):
exitcode, message = errors.role_not_found_in_disk(name="role0")
node.query("REVOKE role0 FROM user0", exitcode=exitcode, message=message)
with Scenario("I revoke a role from multiple users", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup():
with When("I revoke a role from multiple users"):
node.query("REVOKE role0 FROM user0, user1")
with Scenario("I revoke multiple roles from multiple users", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup():
node.query("REVOKE role0, role1 FROM user0, user1")
#user is default, expect exception
with Scenario("I revoke a role from default user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke a role from default user"):
exitcode, message = errors.cannot_update_default()
node.query("REVOKE role0 FROM CURRENT_USER", exitcode=exitcode, message=message)
#user is user0
with Scenario("I revoke a role from current user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke a role from current user"):
node.query("REVOKE role0 FROM CURRENT_USER", settings = [("user","user0")])
#user is default, expect exception
with Scenario("I revoke a role from all", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke a role from all"):
exitcode, message = errors.cannot_update_default()
node.query("REVOKE role0 FROM ALL", exitcode=exitcode, message=message)
#user is default, expect exception
with Scenario("I revoke multiple roles from all", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke multiple roles from all"):
exitcode, message = errors.cannot_update_default()
node.query("REVOKE role0, role1 FROM ALL", exitcode=exitcode, message=message)
with Scenario("I revoke a role from all but current user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke a role from all except current"):
node.query("REVOKE role0 FROM ALL EXCEPT CURRENT_USER")
with Scenario("I revoke a role from all but default user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke a role from all except default"):
node.query("REVOKE role0 FROM ALL EXCEPT default",
settings = [("user","user0")])
with Scenario("I revoke multiple roles from all but default user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke multiple roles from all except default"):
node.query("REVOKE role0, role1 FROM ALL EXCEPT default", settings = [("user","user0")])
with Scenario("I revoke a role from a role", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup():
with When("I revoke a role from a role"):
node.query("REVOKE role0 FROM role1")
with Scenario("I revoke a role from a role and a user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup():
with When("I revoke a role from multiple roles"):
node.query("REVOKE role0 FROM role1, user0")
with Scenario("I revoke a role from a user on cluster", requirements=[
RQ_SRS_006_RBAC_Revoke_Role_Cluster("1.0")]):
with Given("I have a role and a user on a cluster"):
node.query("CREATE USER OR REPLACE user0 ON CLUSTER sharded_cluster")
node.query("CREATE ROLE OR REPLACE role0 ON CLUSTER sharded_cluster")
with When("I revoke a role from user on a cluster"):
node.query("REVOKE ON CLUSTER sharded_cluster role0 FROM user0")
with Finally("I drop the user and role"):
node.query("DROP USER IF EXISTS user0 ON CLUSTER sharded_cluster")
node.query("DROP ROLE IF EXISTS role0 ON CLUSTER sharded_cluster")
with Scenario("I revoke a role on fake cluster, throws exception", requirements=[
RQ_SRS_006_RBAC_Revoke_Role_Cluster("1.0")]):
with Given("I have a role and a user on a cluster"):
node.query("CREATE USER OR REPLACE user0")
node.query("CREATE ROLE OR REPLACE role0")
with When("I revoke a role from user on a cluster"):
exitcode, message = errors.cluster_not_found("fake_cluster")
node.query("REVOKE ON CLUSTER fake_cluster role0 FROM user0", exitcode=exitcode, message=message)
with Finally("I drop the user and role"):
node.query("DROP USER IF EXISTS user0")
node.query("DROP ROLE IF EXISTS role0")
with Scenario("I revoke multiple roles from multiple users on cluster", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Cluster("1.0")]):
with Given("I have multiple roles and multiple users on a cluster"):
for i in range(2):
node.query(f"CREATE USER OR REPLACE user{i} ON CLUSTER sharded_cluster")
node.query(f"CREATE ROLE OR REPLACE role{i} ON CLUSTER sharded_cluster")
with When("I revoke multiple roles from multiple users on cluster"):
node.query("REVOKE ON CLUSTER sharded_cluster role0, role1 FROM user0, user1")
with Finally("I drop the roles and users"):
for i in range(2):
node.query(f"DROP USER IF EXISTS user{i} ON CLUSTER sharded_cluster")
node.query(f"DROP ROLE IF EXISTS role{i} ON CLUSTER sharded_cluster")
with Scenario("I revoke admin option for role from a user", requirements=[
RQ_SRS_006_RBAC_Revoke_AdminOption("1.0")]):
with setup():
with When("I revoke admin option for role from a user"):
node.query("REVOKE ADMIN OPTION FOR role0 FROM user0")
with Scenario("I revoke admin option for multiple roles from multiple users", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_AdminOption("1.0")]):
with setup():
with When("I revoke admin option for multiple roles from multiple users"):
node.query("REVOKE ADMIN OPTION FOR role0, role1 FROM user0, user1")
|
6f2112ec15539460a1bf1490e719f3ef54457d1c
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/sms/migrations/0018_check_for_phone_number_migration.py
|
b62e6f3906bfb326c08d29c28a9c610d3276cd5c
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 396
|
py
|
0018_check_for_phone_number_migration.py
|
from django.db import migrations
from corehq.apps.sms.migration_status import (
assert_phone_number_migration_complete,
)
def noop(*args, **kwargs):
pass
class Migration(migrations.Migration):
dependencies = [
('sms', '0017_update_phoneblacklist'),
]
operations = {
migrations.RunPython(assert_phone_number_migration_complete, reverse_code=noop),
}
|
2189589f1cb1e2fea85c1540bd69de59979f4d62
|
a7a9b9341aeb04e3c01981f6d451f52f891a4d3e
|
/models/resnet_small_V3.py
|
0a88c64216db3e6a2c8142c7e5a82a9e9137bd29
|
[] |
no_license
|
he-y/filter-pruning-geometric-median
|
500ee7485adaddd9e356f6f67cd9ea27fcd1e5da
|
44fbab47b246e8b11bc8008a7bebfe06c0e391b8
|
refs/heads/master
| 2023-08-08T16:33:18.136385
| 2023-08-05T03:15:18
| 2023-08-05T03:15:18
| 177,763,869
| 547
| 123
| null | 2023-08-31T06:50:25
| 2019-03-26T10:17:15
|
Python
|
UTF-8
|
Python
| false
| false
| 5,825
|
py
|
resnet_small_V3.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
#from .res_utils import DownsampleA, DownsampleC, DownsampleD
import math
class DownsampleA(nn.Module):
def __init__(self, nIn, nOut, stride):
super(DownsampleA, self).__init__()
self.avg = nn.AvgPool2d(kernel_size=1, stride=stride)
def forward(self, x):
x = self.avg(x)
return torch.cat((x, x.mul(0)), 1)
class ResNetBasicblock(nn.Module):
expansion = 1
"""
RexNet basicblock (https://github.com/facebook/fb.resnet.torch/blob/master/models/resnet.lua)
"""
def __init__(self, inplanes, planes, index, stride=1, downsample=None):
super(ResNetBasicblock, self).__init__()
self.conv_a = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn_a = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_b = nn.BatchNorm2d(planes)
self.downsample = downsample
self.inplanes = inplanes
self.index = index
def forward(self, x):
residual = x
basicblock = self.conv_a(x)
basicblock = self.bn_a(basicblock)
basicblock = F.relu(basicblock, inplace=True)
basicblock = self.conv_b(basicblock)
basicblock = self.bn_b(basicblock)
if self.downsample is not None:
residual = self.downsample(x)
# out = self.out.cuda()
# out.zero_()
# out = torch.FloatTensor(self.inplanes, basicblock.size()[1], basicblock.size()[2]).zero_()
# out.index_add_(0, self.index[0], residual.data)
# out.index_add_(0, self.index[1], basicblock.data)
out = torch.rand(self.inplanes, basicblock.size()[1], basicblock.size()[2])
return F.relu(out, inplace=True)
class CifarResNet(nn.Module):
"""
ResNet optimized for the Cifar dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(self, block, depth, num_classes, index, rate=[16, 16, 32, 64, 16, 32, 64]):
""" Constructor
Args:
depth: number of layers.
num_classes: number of classes
base_width: base width
"""
super(CifarResNet, self).__init__()
#Model type specifies number of layers for CIFAR-10 and CIFAR-100 model
assert (depth - 2) % 6 == 0, 'depth should be one of 20, 32, 44, 56, 110'
layer_blocks = (depth - 2) // 6
self.stage_num = (depth - 2) // 3
print ('CifarResNet : Depth : {} , Layers for each block : {}'.format(depth, layer_blocks))
print(len(index))
self.num_classes = num_classes
self.rate = rate
self.index = index
self.conv_1_3x3 = nn.Conv2d(3, rate[0], kernel_size=3, stride=1, padding=1, bias=False)
self.bn_1 = nn.BatchNorm2d(rate[0])
print(len(index[1 : self.stage_num + 1]))
self.inplanes = rate[0]
self.stage_1 = self._make_layer(block, rate[4], rate[1], index[1 : self.stage_num + 1], layer_blocks, 1)
self.stage_2 = self._make_layer(block, rate[5], rate[2], index[self.stage_num + 1 : self.stage_num * 2 + 1], layer_blocks, 2)
self.stage_3 = self._make_layer(block, rate[6], rate[3], index[self.stage_num * 2 + 1 : self.stage_num * 3 + 1], layer_blocks, 2)
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear(64*block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
#m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal(m.weight)
m.bias.data.zero_()
def _make_layer(self, block, inplanes, planes, index, blocks, stride=1):
downsample = None
if stride != 1 :
downsample = DownsampleA(self.inplanes, planes * block.expansion, stride)
# print(self.inplanes)
layers = []
i=0
j=2
layers.append(block(self.inplanes, planes, index[i:j], stride, downsample))
# self.inplanes = planes * block.expansion
i += 2
j += 2
self.inplanes = inplanes
print(inplanes)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,index[i:j]))
i += 2
j += 2
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return self.classifier(x)
def resnet20_small(index, rate,num_classes=10):
"""Constructs a ResNet-20 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 20, num_classes, index, rate)
return model
def resnet32_small(index, rate,num_classes=10):
"""Constructs a ResNet-32 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 32, num_classes,index, rate)
return model
def resnet44_small(index, rate,num_classes=10):
"""Constructs a ResNet-44 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 44, num_classes,index, rate)
return model
def resnet56_small(index, rate,num_classes=10):
"""Constructs a ResNet-56 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 56, num_classes,index, rate)
return model
def resnet110_small(index, rate,num_classes=10):
"""Constructs a ResNet-110 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 110, num_classes,index, rate)
return model
|
d9341243e0c7abaa1c70d22d7815c7ab4c365abe
|
8212e68f7c960ff43336afcb142e6a8f5a4dae0a
|
/libforget/json.py
|
488581e73131dbdbb30ae29dac68559360dcad6a
|
[
"ISC"
] |
permissive
|
codl/forget
|
7997da98897c9fe949d6266cd74b7826b0351d05
|
50ba3dfaa0082b49bfba726d8f13cbb396fd44f8
|
refs/heads/master
| 2022-12-09T18:39:03.936811
| 2022-09-16T19:48:05
| 2022-09-16T19:48:05
| 99,214,821
| 166
| 17
|
ISC
| 2022-11-22T10:10:35
| 2017-08-03T09:21:08
|
Python
|
UTF-8
|
Python
| false
| false
| 735
|
py
|
json.py
|
from json import dumps
def account(acc):
last_delete = None
next_delete = None
if acc.last_delete:
last_delete = acc.last_delete.isoformat()
if acc.next_delete:
next_delete = acc.next_delete.isoformat()
return dumps(dict(
post_count=acc.post_count(),
eligible_for_delete_estimate=acc.estimate_eligible_for_delete(),
display_name=acc.display_name,
screen_name=acc.screen_name,
avatar_url=acc.get_avatar(),
avatar_url_orig=acc.avatar_url,
id=acc.id,
service=acc.service,
policy_enabled=acc.policy_enabled,
next_delete=next_delete,
last_delete=last_delete,
))
|
91e5b18a00232d6188b11216ea7ebda624643969
|
6c1fb34e6220673f38c4bedd752caf13e1323aab
|
/Code/novelty-dates.py
|
b674dd8f4a235974853c8f8adc4189784a686332
|
[] |
no_license
|
rozim/ChessData
|
9b7112b519bbf88540ed221b516222284b8938f9
|
0b6da41b8ae357c5c72c93ad8e20b873cb556571
|
refs/heads/master
| 2023-06-11T03:15:13.799114
| 2023-05-30T05:52:10
| 2023-05-30T05:52:10
| 7,515,413
| 153
| 31
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,633
|
py
|
novelty-dates.py
|
# Generate first date seen of positions.
import sys
import os
import chess
import chess.pgn
from pprint import pprint
import time
import collections
from util import *
import resource
import sqlitedict
import json
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('max_ply', 60, 'Ending ply')
flags.DEFINE_string('first_key', '', 'For recovery')
# 2014.11.17
n_improve = 0
n_not_improve = 0
n_add = 0
DBG = '3rrqk1/1p3ppp/p1bb1n2/3p4/7Q/1PN1PN2/PB3PPP/2RR2K1 b - -'
class NoveltyDb:
def __init__(self):
self.d1 = sqlitedict.open('novelty.sqlite',
flag='c',
timeout=60,
encode=json.dumps,
decode=json.loads)
self.d2 = {}
def __getitem__(self, key):
try:
return self.d2[key]
except KeyError:
return self.d1[key]
def __setitem__(self, key, value, /):
self.d2[key] = value
# only set self.d1 on flush()
def flush(self):
for k, v in self.d2.items():
self.d1[k] = v
self.d2 = {}
self.d1.sync()
def munch_game(game, cur_date, novelty_db):
global n_improve, n_not_improve, n_add
h = game.headers
if 'FEN' in h or 'SetUp' in h:
return None
board = chess.Board()
for ply, move in enumerate(game.mainline_moves()):
board.push(move)
if ply > FLAGS.max_ply:
break
sfen = simplify_fen(board)
try:
db_date = novelty_db[sfen]
if cur_date < db_date:
if sfen == DBG:
print('IMPROVE ', db_date, cur_date)
n_improve += 1
assert False, 'should be impossible now'
novelty_db[sfen] = cur_date
else:
n_not_improve += 1
except KeyError:
if sfen == DBG:
print('ADD ', cur_date)
novelty_db[sfen] = cur_date
n_add += 1
def main(argv):
global n_add, n_improve, n_not_improve
assert len(argv[1:]) == 1
t0 = time.time()
dates_db = sqlitedict.open('novelty-prep.sqlite',
flag='r',
timeout=60,
encode=json.dumps,
decode=json.loads)
novelty_db = NoveltyDb()
pgn_fn = argv[1:][0]
pgn_f = open(pgn_fn, 'r', encoding='utf-8', errors='replace')
ng = 0
dates = sorted(dates_db.keys())
for di, date in enumerate(dates):
if FLAGS.first_key and date < FLAGS.first_key:
print(f'SKIP {date}')
continue
if len(date) != 10:
print('BAD LEN: ', date)
continue
positions = dates_db[date]
pcd = 100.0 * di / len(dates)
if di % 10 == 0:
print(f'{di:6d} {date:10s} {pcd:3.1f}% {time.time() - t0:4.1f}s np={len(positions)} ng={ng} add={n_add} imp={n_improve} ~imp={n_not_improve}')
for pos in dates_db[date]:
pgn_f.seek(pos, 0)
game = chess.pgn.read_game(pgn_f)
munch_game(game, date, novelty_db)
fens = gen_fens(game)
ng += 1
if ng % 10000 == 0:
print('\t', 'ng=', ng, f'{time.time() - t0:.1f}s #fens={len(list(fens))}')
if ng % 100000 == 0:
t1 = time.time()
print('FLUSH')
novelty_db.flush()
dt = time.time() - t1
print(f'FLUSHED {dt:.1f}s')
maxrss = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss // 1024 // 1024
print('RSS: ', maxrss)
novelty_db.flush()
print('d1 : ', len(novelty_db.d1))
print('d2 : ', len(novelty_db.d2))
print('Improve : ', n_improve)
print('Not Improve : ', n_not_improve)
print('Add : ', n_add)
if __name__ == "__main__":
app.run(main)
|
a7ca4ed536c44f433051e7894bdeb9c61ddd1974
|
e7fdf97207866226b2a85bf23da2f8553e9a57cf
|
/setup.py
|
d5a033d39c07c11a5c3e4fdf45222e8f2fe93134
|
[] |
no_license
|
avibrazil/iOSbackup
|
3f31cc6fd2d4df11efccc98b9247e039c3e728ed
|
76666902d69c1758a673a9142e45cf8d6ed40210
|
refs/heads/master
| 2023-07-16T09:46:31.093550
| 2022-10-16T01:03:54
| 2022-10-16T01:03:54
| 231,742,810
| 169
| 29
| null | 2023-06-27T01:21:43
| 2020-01-04T10:07:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,683
|
py
|
setup.py
|
import setuptools
from iOSbackup import __version__
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="iOSbackup",
version=__version__,
author="Avi Alkalay",
author_email="avibrazil@gmail.com",
description="Reads and extracts files from password-encrypted iOS backups",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/avibrazil/iOSbackup",
install_requires=['NSKeyedUnArchiver','pycryptodome'],
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"Development Status :: 4 - Beta",
"Environment :: MacOS X",
"Environment :: Console",
"Environment :: Win32 (MS Windows)",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Legal Industry",
"Intended Audience :: Telecommunications Industry",
"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
"Operating System :: MacOS",
"Operating System :: iOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Topic :: Database",
"Topic :: Security :: Cryptography",
"Topic :: Software Development :: Embedded Systems",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Archiving :: Backup",
"Topic :: System :: Recovery Tools"
],
python_requires='>=3.7',
)
|
a2bf331c2ae0a8db4777bef72f56eab1a6515e32
|
095e5e86c931af6553996b0a128c07d94b38cbca
|
/test/test_number.py
|
2b92924fb839f7206c2f55a406850b3efb967e50
|
[
"MIT"
] |
permissive
|
hpyproject/hpy
|
1dc9e5e855fa006b1728703c5925addbb43cf792
|
8310a762d78e3412464b1869959a77da013e6307
|
refs/heads/master
| 2023-09-03T21:18:17.273371
| 2023-07-24T07:26:14
| 2023-07-24T07:26:14
| 196,559,763
| 681
| 41
|
MIT
| 2023-07-24T07:26:16
| 2019-07-12T10:27:56
|
Python
|
UTF-8
|
Python
| false
| false
| 8,440
|
py
|
test_number.py
|
from .support import HPyTest
class TestNumber(HPyTest):
def test_bool_from_bool_and_long(self):
import pytest
mod = self.make_module("""
HPyDef_METH(from_bool, "from_bool", HPyFunc_O)
static HPy from_bool_impl(HPyContext *ctx, HPy self, HPy arg)
{
int32_t x = HPyLong_AsInt32_t(ctx, arg);
if (x == -1 && HPyErr_Occurred(ctx))
return HPy_NULL;
if (x != 0 && x != 1) {
HPyErr_SetString(ctx, ctx->h_ValueError,
"value must be 0 or 1");
return HPy_NULL;
}
return HPyBool_FromBool(ctx, (x ? true : false));
}
HPyDef_METH(from_long, "from_long", HPyFunc_O)
static HPy from_long_impl(HPyContext *ctx, HPy self, HPy arg)
{
long x = HPyLong_AsLong(ctx, arg);
if (x == -1 && HPyErr_Occurred(ctx))
return HPy_NULL;
return HPyBool_FromLong(ctx, x);
}
@EXPORT(from_bool)
@EXPORT(from_long)
@INIT
""")
assert mod.from_bool(0) is False
assert mod.from_bool(1) is True
with pytest.raises(ValueError):
mod.from_bool(2)
assert mod.from_long(0) is False
assert mod.from_long(42) is True
def test_unary(self):
import pytest
import operator
for c_name, op in [
('Negative', operator.neg),
('Positive', operator.pos),
('Absolute', abs),
('Invert', operator.invert),
('Index', operator.index),
('Long', int),
('Float', float),
]:
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_O)
static HPy f_impl(HPyContext *ctx, HPy self, HPy arg)
{
return HPy_%s(ctx, arg);
}
@EXPORT(f)
@INIT
""" % (c_name,), name='number_'+c_name)
assert mod.f(-5) == op(-5)
assert mod.f(6) == op(6)
try:
res = op(4.75)
except Exception as e:
with pytest.raises(e.__class__):
mod.f(4.75)
else:
assert mod.f(4.75) == res
def test_binary(self):
import operator
for c_name, op in [
('Add', operator.add),
('Subtract', operator.sub),
('Multiply', operator.mul),
('FloorDivide', operator.floordiv),
('TrueDivide', operator.truediv),
('Remainder', operator.mod),
('Divmod', divmod),
('Lshift', operator.lshift),
('Rshift', operator.rshift),
('And', operator.and_),
('Xor', operator.xor),
('Or', operator.or_),
]:
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_VARARGS)
static HPy f_impl(HPyContext *ctx, HPy self,
const HPy *args, size_t nargs)
{
HPy a, b;
if (!HPyArg_Parse(ctx, NULL, args, nargs, "OO", &a, &b))
return HPy_NULL;
return HPy_%s(ctx, a, b);
}
@EXPORT(f)
@INIT
""" % (c_name,), name='number_'+c_name)
assert mod.f(5, 4) == op(5, 4)
assert mod.f(6, 3) == op(6, 3)
def test_power(self):
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_VARARGS)
static HPy f_impl(HPyContext *ctx, HPy self,
const HPy *args, size_t nargs)
{
HPy a, b, c;
if (!HPyArg_Parse(ctx, NULL, args, nargs, "OOO", &a, &b, &c))
return HPy_NULL;
return HPy_Power(ctx, a, b, c);
}
@EXPORT(f)
@INIT
""")
assert mod.f(4, 5, None) == 4 ** 5
assert mod.f(4, 5, 7) == pow(4, 5, 7)
def test_matmul(self):
class Mat:
def __matmul__(self, other):
return ('matmul', self, other)
m1 = Mat()
m2 = Mat()
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_VARARGS)
static HPy f_impl(HPyContext *ctx, HPy self,
const HPy *args, size_t nargs)
{
HPy a, b;
if (!HPyArg_Parse(ctx, NULL, args, nargs, "OO", &a, &b))
return HPy_NULL;
return HPy_MatrixMultiply(ctx, a, b);
}
@EXPORT(f)
@INIT
""")
assert mod.f(m1, m2) == m1.__matmul__(m2)
def test_inplace_binary(self):
import operator
for c_name, py_name in [
('Add', '__iadd__'),
('Subtract', '__isub__'),
('Multiply', '__imul__'),
('FloorDivide', '__ifloordiv__'),
('TrueDivide', '__itruediv__'),
('Remainder', '__imod__'),
('Lshift', '__ilshift__'),
('Rshift', '__irshift__'),
('And', '__iand__'),
('Xor', '__ixor__'),
('Or', '__ior__'),
]:
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_VARARGS)
static HPy f_impl(HPyContext *ctx, HPy self,
const HPy *args, size_t nargs)
{
HPy a, b;
if (!HPyArg_Parse(ctx, NULL, args, nargs, "OO", &a, &b))
return HPy_NULL;
return HPy_InPlace%s(ctx, a, b);
}
@EXPORT(f)
@INIT
""" % (c_name,), name='number_'+c_name)
class A:
def mymethod(self, b):
return (py_name, b)
setattr(A, py_name, A.mymethod)
assert mod.f(A(), 12.34) == A().mymethod(12.34)
def test_inplace_power(self):
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_VARARGS)
static HPy f_impl(HPyContext *ctx, HPy self,
const HPy *args, size_t nargs)
{
HPy a, b, c;
if (!HPyArg_Parse(ctx, NULL, args, nargs, "OOO", &a, &b, &c))
return HPy_NULL;
return HPy_InPlacePower(ctx, a, b, c);
}
@EXPORT(f)
@INIT
""")
class A:
def __ipow__(self, b):
return ('ipow', b)
# the behavior of PyNumber_InPlacePower is weird: if __ipow__ is
# defined, the 3rd arg is always ignored, even if the doc say the
# opposite
assert mod.f(A(), 5, None) == A().__ipow__(5)
assert mod.f(A(), 7, 'hello') == A().__ipow__(7)
assert mod.f(4, 5, 7) == pow(4, 5, 7)
def test_inplace_matmul(self):
class Mat:
def __imatmul__(self, other):
return ('imatmul', self, other)
m1 = Mat()
m2 = Mat()
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_VARARGS)
static HPy f_impl(HPyContext *ctx, HPy self,
const HPy *args, size_t nargs)
{
HPy a, b;
if (!HPyArg_Parse(ctx, NULL, args, nargs, "OO", &a, &b))
return HPy_NULL;
return HPy_InPlaceMatrixMultiply(ctx, a, b);
}
@EXPORT(f)
@INIT
""")
assert mod.f(m1, m2) == m1.__imatmul__(m2)
def test_number_check(self):
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_O)
static HPy f_impl(HPyContext *ctx, HPy self, HPy arg)
{
int cond = HPyNumber_Check(ctx, arg);
return HPyLong_FromLong(ctx, cond);
}
@EXPORT(f)
@INIT
""")
assert mod.f("foo") == 0
assert mod.f(42) == 1
assert mod.f(42.1) == 1
|
d1a0e81c16d7b8a0e0f583d152b4c3787e1d2f48
|
fdb9bdc6c4ab2f14ba71e544493706d5e275899f
|
/fhir/resources/STU3/implementationguide.py
|
921b56c28868b3e7fbbfcc7a6a0dcce50cb62dd5
|
[
"BSD-3-Clause"
] |
permissive
|
nazrulworld/fhir.resources
|
6ae8aea8180c611b0c5050759c6dcdf63e4cb061
|
1fd6ea476b27b3fcb8c4ef8f23bc51cf161e69e3
|
refs/heads/main
| 2023-08-30T18:27:27.277249
| 2023-07-03T19:57:06
| 2023-07-03T19:57:06
| 165,297,877
| 256
| 83
|
NOASSERTION
| 2023-08-24T15:34:05
| 2019-01-11T19:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 46,599
|
py
|
implementationguide.py
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/ImplementationGuide
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
import typing
from pydantic import Field, root_validator
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.errors import MissingError, NoneIsNotAllowedError
from . import backboneelement, domainresource, fhirtypes
class ImplementationGuide(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A set of rules about how FHIR is used.
A set of rules of how FHIR is used to solve a particular problem. This
resource is used to gather all the parts of an implementation guide into a
logical whole and to publish a computable definition of all the parts.
"""
resource_type = Field("ImplementationGuide", const=True)
binary: typing.List[fhirtypes.Uri] = Field(
None,
alias="binary",
title="Image, css, script, etc.",
description=(
"A binary file that is included in the implementation guide when it is"
" published."
),
# if property is element of this resource.
element_property=True,
)
binary__ext: typing.List[
typing.Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(None, alias="_binary", title="Extension field for ``binary``.")
contact: typing.List[fhirtypes.ContactDetailType] = Field(
None,
alias="contact",
title="Contact details for the publisher",
description=(
"Contact details to assist a user in finding and communicating with the"
" publisher."
),
# if property is element of this resource.
element_property=True,
)
copyright: fhirtypes.Markdown = Field(
None,
alias="copyright",
title="Use and/or publishing restrictions",
description=(
"A copyright statement relating to the implementation guide and/or its "
"contents. Copyright statements are generally legal restrictions on the"
" use and publishing of the implementation guide."
),
# if property is element of this resource.
element_property=True,
)
copyright__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_copyright", title="Extension field for ``copyright``."
)
date: fhirtypes.DateTime = Field(
None,
alias="date",
title="Date this was last changed",
description=(
"The date (and optionally time) when the implementation guide was "
"published. The date must change if and when the business version "
"changes and it must change if the status code changes. In addition, it"
" should change when the substantive content of the implementation "
"guide changes."
),
# if property is element of this resource.
element_property=True,
)
date__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_date", title="Extension field for ``date``."
)
dependency: typing.List[fhirtypes.ImplementationGuideDependencyType] = Field(
None,
alias="dependency",
title="Another Implementation guide this depends on",
description=(
"Another implementation guide that this implementation depends on. "
"Typically, an implementation guide uses value sets, profiles "
"etc.defined in other implementation guides."
),
# if property is element of this resource.
element_property=True,
)
description: fhirtypes.Markdown = Field(
None,
alias="description",
title="Natural language description of the implementation guide",
description=(
"A free text natural language description of the implementation guide "
"from a consumer's perspective."
),
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
experimental: bool = Field(
None,
alias="experimental",
title="For testing purposes, not real usage",
description=(
"A boolean value to indicate that this implementation guide is authored"
" for testing purposes (or education/evaluation/marketing), and is not "
"intended to be used for genuine usage."
),
# if property is element of this resource.
element_property=True,
)
experimental__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_experimental", title="Extension field for ``experimental``."
)
fhirVersion: fhirtypes.Id = Field(
None,
alias="fhirVersion",
title="FHIR Version this Implementation Guide targets",
description=(
"The version of the FHIR specification on which this "
"ImplementationGuide is based - this is the formal version of the "
"specification, without the revision number, e.g. "
"[publication].[major].[minor], which is 3.0.2 for this version."
),
# if property is element of this resource.
element_property=True,
)
fhirVersion__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_fhirVersion", title="Extension field for ``fhirVersion``."
)
global_fhir: typing.List[fhirtypes.ImplementationGuideGlobalType] = Field(
None,
alias="global",
title="Profiles that apply globally",
description=(
"A set of profiles that all resources covered by this implementation "
"guide must conform to."
),
# if property is element of this resource.
element_property=True,
)
jurisdiction: typing.List[fhirtypes.CodeableConceptType] = Field(
None,
alias="jurisdiction",
title="Intended jurisdiction for implementation guide (if applicable)",
description=(
"A legal or geographic region in which the implementation guide is "
"intended to be used."
),
# if property is element of this resource.
element_property=True,
)
name: fhirtypes.String = Field(
None,
alias="name",
title="Name for this implementation guide (computer friendly)",
description=(
"A natural language name identifying the implementation guide. This "
"name should be usable as an identifier for the module by machine "
"processing applications such as code generation."
),
# if property is element of this resource.
element_property=True,
element_required=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
package: typing.List[fhirtypes.ImplementationGuidePackageType] = Field(
None,
alias="package",
title="Group of resources as used in .page.package",
description=(
"A logical group of resources. Logical groups can be used when building"
" pages."
),
# if property is element of this resource.
element_property=True,
)
page: fhirtypes.ImplementationGuidePageType = Field(
None,
alias="page",
title="Page/Section in the Guide",
description=(
"A page / section in the implementation guide. The root page is the "
"implementation guide home page."
),
# if property is element of this resource.
element_property=True,
)
publisher: fhirtypes.String = Field(
None,
alias="publisher",
title="Name of the publisher (organization or individual)",
description=(
"The name of the individual or organization that published the "
"implementation guide."
),
# if property is element of this resource.
element_property=True,
)
publisher__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_publisher", title="Extension field for ``publisher``."
)
status: fhirtypes.Code = Field(
None,
alias="status",
title="draft | active | retired | unknown",
description=(
"The status of this implementation guide. Enables tracking the life-"
"cycle of the content."
),
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["draft", "active", "retired", "unknown"],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
url: fhirtypes.Uri = Field(
None,
alias="url",
title="Logical URI to reference this implementation guide (globally unique)",
description=(
"An absolute URI that is used to identify this implementation guide "
"when it is referenced in a specification, model, design or an "
"instance. This SHALL be a URL, SHOULD be globally unique, and SHOULD "
"be an address at which this implementation guide is (or will be) "
"published. The URL SHOULD include the major version of the "
"implementation guide. For more information see [Technical and Business"
" Versions](resource.html#versions)."
),
# if property is element of this resource.
element_property=True,
element_required=True,
)
url__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_url", title="Extension field for ``url``."
)
useContext: typing.List[fhirtypes.UsageContextType] = Field(
None,
alias="useContext",
title="Context the content is intended to support",
description=(
"The content was developed with a focus and intent of supporting the "
"contexts that are listed. These terms may be used to assist with "
"indexing and searching for appropriate implementation guide instances."
),
# if property is element of this resource.
element_property=True,
)
version: fhirtypes.String = Field(
None,
alias="version",
title="Business version of the implementation guide",
description=(
"The identifier that is used to identify this version of the "
"implementation guide when it is referenced in a specification, model, "
"design or instance. This is an arbitrary value managed by the "
"implementation guide author and is not expected to be globally unique."
" For example, it might be a timestamp (e.g. yyyymmdd) if a managed "
"version is not available. There is also no expectation that versions "
"can be placed in a lexicographical sequence."
),
# if property is element of this resource.
element_property=True,
)
version__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_version", title="Extension field for ``version``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``ImplementationGuide`` according specification,
with preserving original sequence order.
"""
return [
"id",
"meta",
"implicitRules",
"language",
"text",
"contained",
"extension",
"modifierExtension",
"url",
"version",
"name",
"status",
"experimental",
"date",
"publisher",
"contact",
"description",
"useContext",
"jurisdiction",
"copyright",
"fhirVersion",
"dependency",
"package",
"global",
"binary",
"page",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2146(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [
("name", "name__ext"),
("status", "status__ext"),
("url", "url__ext"),
]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class ImplementationGuideDependency(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Another Implementation guide this depends on.
Another implementation guide that this implementation depends on.
Typically, an implementation guide uses value sets, profiles etc.defined in
other implementation guides.
"""
resource_type = Field("ImplementationGuideDependency", const=True)
type: fhirtypes.Code = Field(
None,
alias="type",
title="reference | inclusion",
description="How the dependency is represented when the guide is published.",
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["reference", "inclusion"],
)
type__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_type", title="Extension field for ``type``."
)
uri: fhirtypes.Uri = Field(
None,
alias="uri",
title="Where to find dependency",
description="Where the dependency is located.",
# if property is element of this resource.
element_property=True,
element_required=True,
)
uri__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_uri", title="Extension field for ``uri``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``ImplementationGuideDependency`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "type", "uri"]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_3189(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("type", "type__ext"), ("uri", "uri__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class ImplementationGuideGlobal(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Profiles that apply globally.
A set of profiles that all resources covered by this implementation guide
must conform to.
"""
resource_type = Field("ImplementationGuideGlobal", const=True)
profile: fhirtypes.ReferenceType = Field(
...,
alias="profile",
title="Profile that all resources must conform to",
description="A reference to the profile that all instances must conform to.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["StructureDefinition"],
)
type: fhirtypes.Code = Field(
None,
alias="type",
title="Type this profiles applies to",
description="The type of resource that all instances must conform to.",
# if property is element of this resource.
element_property=True,
element_required=True,
)
type__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_type", title="Extension field for ``type``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``ImplementationGuideGlobal`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "type", "profile"]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2746(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("type", "type__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class ImplementationGuidePackage(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Group of resources as used in .page.package.
A logical group of resources. Logical groups can be used when building
pages.
"""
resource_type = Field("ImplementationGuidePackage", const=True)
description: fhirtypes.String = Field(
None,
alias="description",
title="Human readable text describing the package",
description=None,
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
name: fhirtypes.String = Field(
None,
alias="name",
title="Name used .page.package",
description="The name for the group, as used in page.package.",
# if property is element of this resource.
element_property=True,
element_required=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
resource: typing.List[fhirtypes.ImplementationGuidePackageResourceType] = Field(
...,
alias="resource",
title="Resource in the implementation guide",
description=(
"A resource that is part of the implementation guide. Conformance "
"resources (value set, structure definition, capability statements "
"etc.) are obvious candidates for inclusion, but any kind of resource "
"can be included as an example resource."
),
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``ImplementationGuidePackage`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"name",
"description",
"resource",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2830(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("name", "name__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class ImplementationGuidePackageResource(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Resource in the implementation guide.
A resource that is part of the implementation guide. Conformance resources
(value set, structure definition, capability statements etc.) are obvious
candidates for inclusion, but any kind of resource can be included as an
example resource.
"""
resource_type = Field("ImplementationGuidePackageResource", const=True)
acronym: fhirtypes.String = Field(
None,
alias="acronym",
title="Short code to identify the resource",
description=(
"A short code that may be used to identify the resource throughout the "
"implementation guide."
),
# if property is element of this resource.
element_property=True,
)
acronym__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_acronym", title="Extension field for ``acronym``."
)
description: fhirtypes.String = Field(
None,
alias="description",
title="Reason why included in guide",
description=(
"A description of the reason that a resource has been included in the "
"implementation guide."
),
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
example: bool = Field(
None,
alias="example",
title="If not an example, has its normal meaning",
description=(
"Whether a resource is included in the guide as part of the rules "
"defined by the guide, or just as an example of a resource that "
"conforms to the rules and/or help implementers understand the intent "
"of the guide."
),
# if property is element of this resource.
element_property=True,
element_required=True,
)
example__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_example", title="Extension field for ``example``."
)
exampleFor: fhirtypes.ReferenceType = Field(
None,
alias="exampleFor",
title="Resource this is an example of (if applicable)",
description=(
"Another resource that this resource is an example for. This is mostly "
"used for resources that are included as examples of "
"StructureDefinitions."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["StructureDefinition"],
)
name: fhirtypes.String = Field(
None,
alias="name",
title="Human Name for the resource",
description=(
"A human assigned name for the resource. All resources SHOULD have a "
"name, but the name may be extracted from the resource (e.g. "
"ValueSet.name)."
),
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
sourceReference: fhirtypes.ReferenceType = Field(
None,
alias="sourceReference",
title="Location of the resource",
description="Where this resource is found.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e source[x]
one_of_many="source",
one_of_many_required=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Resource"],
)
sourceUri: fhirtypes.Uri = Field(
None,
alias="sourceUri",
title="Location of the resource",
description="Where this resource is found.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e source[x]
one_of_many="source",
one_of_many_required=True,
)
sourceUri__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_sourceUri", title="Extension field for ``sourceUri``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``ImplementationGuidePackageResource`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"example",
"name",
"description",
"acronym",
"sourceUri",
"sourceReference",
"exampleFor",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_3670(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("example", "example__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_3670(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {"source": ["sourceReference", "sourceUri"]}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class ImplementationGuidePage(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Page/Section in the Guide.
A page / section in the implementation guide. The root page is the
implementation guide home page.
"""
resource_type = Field("ImplementationGuidePage", const=True)
format: fhirtypes.Code = Field(
None,
alias="format",
title="Format of the page (e.g. html, markdown, etc.)",
description="The format of the page.",
# if property is element of this resource.
element_property=True,
)
format__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_format", title="Extension field for ``format``."
)
kind: fhirtypes.Code = Field(
None,
alias="kind",
title=(
"page | example | list | include | directory | dictionary | toc | "
"resource"
),
description=(
"The kind of page that this is. Some pages are autogenerated (list, "
"example), and other kinds are of interest so that tools can navigate "
"the user to the page of interest."
),
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=[
"page",
"example",
"list",
"include",
"directory",
"dictionary",
"toc",
"resource",
],
)
kind__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_kind", title="Extension field for ``kind``."
)
package: typing.List[typing.Optional[fhirtypes.String]] = Field(
None,
alias="package",
title="Name of package to include",
description=(
"For constructed pages, a list of packages to include in the page (or "
"else empty for everything)."
),
# if property is element of this resource.
element_property=True,
)
package__ext: typing.List[
typing.Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(None, alias="_package", title="Extension field for ``package``.")
page: typing.List[fhirtypes.ImplementationGuidePageType] = Field(
None,
alias="page",
title="Nested Pages / Sections",
description="Nested Pages/Sections under this page.",
# if property is element of this resource.
element_property=True,
)
source: fhirtypes.Uri = Field(
None,
alias="source",
title="Where to find that page",
description="The source address for the page.",
# if property is element of this resource.
element_property=True,
element_required=True,
)
source__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_source", title="Extension field for ``source``."
)
title: fhirtypes.String = Field(
None,
alias="title",
title="Short title shown for navigational assistance",
description=(
"A short title used to represent this page in navigational structures "
"such as table of contents, bread crumbs, etc."
),
# if property is element of this resource.
element_property=True,
element_required=True,
)
title__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_title", title="Extension field for ``title``."
)
type: typing.List[fhirtypes.Code] = Field(
None,
alias="type",
title="Kind of resource to include in the list",
description="For constructed pages, what kind of resources to include in the list.",
# if property is element of this resource.
element_property=True,
)
type__ext: typing.List[
typing.Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(None, alias="_type", title="Extension field for ``type``.")
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``ImplementationGuidePage`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"source",
"title",
"kind",
"type",
"package",
"format",
"page",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2527(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [
("kind", "kind__ext"),
("source", "source__ext"),
("title", "title__ext"),
]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
|
9f1417770b538b6d7f3b4085cf1093ae0dd6d5a4
|
c268dcf432f3b7171be6eb307aafbe1bd173285a
|
/reddit2telegram/channels/~inactive/r_linux/app.py
|
aa800ceee1d2fcc8a5f65f2c735cfd3f68280292
|
[
"MIT"
] |
permissive
|
Fillll/reddit2telegram
|
a7162da2cc08c81bcc8078ea4160d4ee07461fee
|
5d8ee3097e716734d55a72f5a16ce3d7467e2ed7
|
refs/heads/master
| 2023-08-09T10:34:16.163262
| 2023-07-30T18:36:19
| 2023-07-30T18:36:19
| 67,726,018
| 258
| 205
|
MIT
| 2023-09-07T02:36:36
| 2016-09-08T17:39:46
|
Python
|
UTF-8
|
Python
| false
| false
| 133
|
py
|
app.py
|
#encoding:utf-8
subreddit = 'linux'
t_channel = '@r_linux'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
b5f970f4dddce31f24458b797e8f3b67a486e828
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/wpt_tools/wpt/tools/third_party/html5lib/html5lib/treewalkers/__init__.py
|
b2d3aac3137f5d374ec35dd4bbfdb5e732fc51f0
|
[
"BSD-3-Clause",
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 5,719
|
py
|
__init__.py
|
"""A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method which takes a tree as sole argument and
returns an iterator which generates tokens.
"""
from __future__ import absolute_import, division, unicode_literals
from .. import constants
from .._utils import default_etree
__all__ = ["getTreeWalker", "pprint"]
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
:arg str treeType: the name of the tree type required (case-insensitive).
Supported values are:
* "dom": The xml.dom.minidom DOM implementation
* "etree": A generic walker for tree implementations exposing an
elementtree-like interface (known to work with ElementTree,
cElementTree and lxml.etree).
* "lxml": Optimized walker for lxml.etree
* "genshi": a Genshi stream
:arg implementation: A module implementing the tree type e.g.
xml.etree.ElementTree or cElementTree (Currently applies to the "etree"
tree type only).
:arg kwargs: keyword arguments passed to the etree walker--for other
walkers, this has no effect
:returns: a TreeWalker class
"""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType == "dom":
from . import dom
treeWalkerCache[treeType] = dom.TreeWalker
elif treeType == "genshi":
from . import genshi
treeWalkerCache[treeType] = genshi.TreeWalker
elif treeType == "lxml":
from . import etree_lxml
treeWalkerCache[treeType] = etree_lxml.TreeWalker
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
def concatenateCharacterTokens(tokens):
pendingCharacters = []
for token in tokens:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
pendingCharacters.append(token["data"])
else:
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
pendingCharacters = []
yield token
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
def pprint(walker):
"""Pretty printer for tree walkers
Takes a TreeWalker instance and pretty prints the output of walking the tree.
:arg walker: a TreeWalker instance
"""
output = []
indent = 0
for token in concatenateCharacterTokens(walker):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
# tag name
if token["namespace"] and token["namespace"] != constants.namespaces["html"]:
if token["namespace"] in constants.prefixes:
ns = constants.prefixes[token["namespace"]]
else:
ns = token["namespace"]
name = "%s %s" % (ns, token["name"])
else:
name = token["name"]
output.append("%s<%s>" % (" " * indent, name))
indent += 2
# attributes (sorted for consistent ordering)
attrs = token["data"]
for (namespace, localname), value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
ns = constants.prefixes[namespace]
else:
ns = namespace
name = "%s %s" % (ns, localname)
else:
name = localname
output.append("%s%s=\"%s\"" % (" " * indent, name, value))
# self-closing
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
(" " * indent,
token["name"],
token["publicId"],
token["systemId"] if token["systemId"] else ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">""" %
(" " * indent,
token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>" % (" " * indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" " * indent,))
elif type == "Characters":
output.append("%s\"%s\"" % (" " * indent, token["data"]))
elif type == "SpaceCharacters":
assert False, "concatenateCharacterTokens should have got rid of all Space tokens"
else:
raise ValueError("Unknown token type, %s" % type)
return "\n".join(output)
|
a5e73235197eb021e62f2343a9669f508ad24ac7
|
8fb93fda613102df17c6c3788e9214713b94c4fd
|
/demos/demo_mustache.py
|
0177c796c028d756f800fa7b2c90bb9faaa67f7c
|
[] |
no_license
|
j2labs/brubeck
|
b2b314dd1f973399966b0cb602d21d0b35c258ea
|
0e42200126155973b1403a2c073768a3345329a6
|
refs/heads/master
| 2023-09-05T17:34:04.684344
| 2016-06-01T02:46:31
| 2016-06-01T02:46:31
| 1,339,333
| 320
| 54
| null | 2020-03-18T13:18:21
| 2011-02-07T20:39:12
|
Python
|
UTF-8
|
Python
| false
| false
| 643
|
py
|
demo_mustache.py
|
#!/usr/bin/env python
from brubeck.request_handling import Brubeck
from brubeck.templating import MustacheRendering, load_mustache_env
from brubeck.connections import Mongrel2Connection
class DemoHandler(MustacheRendering):
def get(self):
name = self.get_argument('name', 'dude')
context = {
'name': name,
}
return self.render_template('success', **context)
app = Brubeck(msg_conn=Mongrel2Connection('tcp://127.0.0.1:9999', 'tcp://127.0.0.1:9998'),
handler_tuples=[(r'^/brubeck', DemoHandler)],
template_loader=load_mustache_env('./templates/mustache'))
app.run()
|
53020e4aa9c8cc1c278e8277df4f6ce85784141a
|
894b88c702e1b3cb4fe394081ca2216051ebeb3e
|
/benchmark/pyfastx_fasta_sequence_iterate_without_index.py
|
f68e5a3c5773afc6cb33770e66d35920f3c9371c
|
[
"MIT"
] |
permissive
|
lmdu/pyfastx
|
3cdb7c60fc26a639918496a643ac6a71bf390f69
|
b6f7b9aba6869df899135944922d90635c78f669
|
refs/heads/master
| 2023-08-11T18:28:17.746734
| 2023-04-18T15:43:49
| 2023-04-18T15:43:49
| 176,523,301
| 203
| 20
|
MIT
| 2023-08-21T14:39:47
| 2019-03-19T13:55:46
|
C
|
UTF-8
|
Python
| false
| false
| 97
|
py
|
pyfastx_fasta_sequence_iterate_without_index.py
|
import sys
import pyfastx
for name, seq in pyfastx.Fasta(sys.argv[1], build_index=False):
pass
|
10ce29ef164d38160c224c1b7a08ee4daec5b6d4
|
15eb68a30bd1bcd8c153ce3c8774e09ef3f4135d
|
/TrainingExtensions/tensorflow/src/python/aimet_tensorflow/keras/adaround/adaround_wrapper.py
|
e390150d6e66509fa628d06221c1240d2caf799f
|
[
"BSD-3-Clause"
] |
permissive
|
quic/aimet
|
77a984af68fc3c46d98c707d18a14c95a3efdacf
|
5a406e657082b6a4f6e4bf48f0e46e085cb1e351
|
refs/heads/develop
| 2023-08-21T12:51:10.500286
| 2023-08-18T18:35:39
| 2023-08-18T18:35:39
| 257,688,216
| 1,676
| 339
|
NOASSERTION
| 2023-09-08T06:59:39
| 2020-04-21T18:57:10
|
Python
|
UTF-8
|
Python
| false
| false
| 12,317
|
py
|
adaround_wrapper.py
|
# /usr/bin/env python3.6
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Adaround wrapper """
import typing
from typing import Dict, Union, List, Tuple
import numpy as np
import tensorflow as tf
from tensorflow import keras
# Import AIMET specific modules
import aimet_common.libpymo as libpymo
from aimet_common.defs import QuantScheme
from aimet_tensorflow.adaround.adaround_wrapper import AdaroundWrapper as TfAdaroundWrapper
BATCH_SIZE = 32
ConvType = typing.Union[tf.keras.layers.Conv2D,
tf.keras.layers.Conv2DTranspose,
tf.keras.layers.DepthwiseConv2D]
_supported_convs = ConvType.__args__
class AdaroundWrapper(keras.layers.Layer):
"""
Adaround Wrapper base class
"""
# pylint: disable=too-many-arguments
# pylint: disable=too-many-instance-attributes
def __init__(self, layer: tf.keras.layers.Layer, param_bw: int, quant_scheme: QuantScheme, is_symmetric: bool,
strict_symmetric: bool, unsigned_symmetric: bool, per_channel_enabled: bool,
output_height: Union[int, None], output_width: Union[int, None], output_channels: Union[int, None]):
"""
:param layer: Tf keras layer.
:param param_bw: Bitwidth for weight quantization
:param quant_scheme: Quantization scheme
:param is_symmetric: Symmetric vs Asymmetric encodings
:param strict_symmetric: Strict symmetric flag
:param unsigned_symmetric: Unsigned symmetric flag
:param per_channel_enabled: Flag set for per channel encodings
"""
super(AdaroundWrapper, self).__init__()
self._layer = layer
self.per_channel_enabled = per_channel_enabled
self._orig_weight_tensor_shape, self._weight_tensor, self._bias_tensor = \
self._get_weight_and_bias_tensors(layer)
self.use_soft_rounding = self.add_weight(layer.name + '_use_soft_rounding', dtype=tf.bool,
initializer=tf.constant_initializer(True), trainable=False)
self.ch_axis = self._get_channel_axis(self._layer, self._weight_tensor.shape)
self.encoding = self.compute_encodings(self._weight_tensor, param_bw, quant_scheme, is_symmetric,
strict_symmetric=strict_symmetric, unsigned_symmetric=unsigned_symmetric,
per_channel_enabled=self.per_channel_enabled, ch_axis=self.ch_axis)
alpha = self._calculate_alpha(self._weight_tensor, self.encoding, self.per_channel_enabled, self.ch_axis)
self.alpha = self.add_weight(self._layer.name + '_alpha', trainable=True, shape=alpha.shape)
self.alpha.assign(alpha)
self._output_height = output_height
self._output_width = output_width
self._output_channels = output_channels
@staticmethod
def _get_channel_axis(layer: tf.keras.layers.Layer, shape: Tuple) -> int:
"""
Get channel axis corresponding to the Keras layer
:param layer: Keras layer to get channel axis for
:param shape: Shape of the weight tensor of the layer
:return: Channel axis for the Keras layer
"""
ch_axis = len(shape) - 1
if isinstance(layer, tf.keras.layers.Conv2DTranspose):
ch_axis = 2
return ch_axis
def adaround_weights(self) -> tf.Tensor:
"""
Adaround the weight tensor. Extra post-processing step if the layer is a DepthwiseConv2D
:return: AdaRounded weight tensor
"""
adaround_tensor = TfAdaroundWrapper.get_adarounded_weight(self.alpha, self._weight_tensor, self.encoding,
self.use_soft_rounding,
enable_per_channel=self.per_channel_enabled,
ch_axis=self.ch_axis)
if self.per_channel_enabled and isinstance(self._layer, tf.keras.layers.DepthwiseConv2D):
return tf.reshape(adaround_tensor, self._orig_weight_tensor_shape)
return adaround_tensor
def _compute_output_with_adarounded_weights(self, inp_tensor: tf.Tensor, adaround_weight_tensor: tf.Tensor) -> \
tf.Tensor:
"""
Compute output of AdaroundSupportedModules with adarounded weights
:param inp_tensor: The input tensor to be used for computing the output
:param adaround_weight_tensor: The adarounded weight
:return: output of the op computed with AdaRounded weights
"""
if isinstance(self._layer, _supported_convs):
kwargs = self._get_conv_args(self._layer)
if isinstance(self._layer, tf.keras.layers.DepthwiseConv2D):
adaround_out_tensor = tf.nn.depthwise_conv2d(inp_tensor, adaround_weight_tensor, **kwargs)
elif isinstance(self._layer, tf.keras.layers.Conv2DTranspose):
adaround_out_tensor = TfAdaroundWrapper.compute_output_with_adaround_weights_conv2d_transpose_helper(
self._output_height,
self._output_width,
self._output_channels,
inp_tensor,
adaround_weight_tensor,
**kwargs)
else:
adaround_out_tensor = tf.nn.conv2d(inp_tensor, adaround_weight_tensor, **kwargs)
elif isinstance(self._layer, tf.keras.layers.Dense):
adaround_out_tensor = tf.matmul(inp_tensor, adaround_weight_tensor)
else:
raise ValueError('Keras Layer: {} not supported'.format(self._layer))
return adaround_out_tensor
# Different 'call' method signatures between TF 2.4 and TF 2.10
# pylint: disable=arguments-differ
def call(self, inputs, *args, **kwargs): # pylint: disable=unused-argument
"""
:param inputs: Input tensor
:param kwargs: Additional keyword arguments
:return: Adarounded output tensor
"""
adaround_weight_tensor = self.adaround_weights()
adaround_out_tensor = self._compute_output_with_adarounded_weights(inputs, adaround_weight_tensor)
if self._bias_tensor is not None:
adaround_out_tensor = adaround_out_tensor + self._bias_tensor
return adaround_out_tensor
@staticmethod
def _calculate_alpha(weight, encoding: Union[libpymo.TfEncoding, List[libpymo.TfEncoding]],
per_channel_enabled: bool, ch_axis: int) -> tf.Tensor:
"""
Calculate alpha parameter for either per tensor or per channel
:param weight: The weight tensor to be ada rounded
:param encoding: Encoding(s) for the tensor
:param per_channel_enabled: Flag for per channel to broadcoast the tensor
:param ch_axis: Channel axis to broadcast if per channel is enabled
:return: Adarounded output tensor
"""
return TfAdaroundWrapper.calculate_alpha(weight, encoding, per_channel_enabled, ch_axis)
def _get_weight_and_bias_tensors(self, layer: tf.keras.layers.Layer) -> Tuple[Tuple, np.ndarray, np.ndarray]:
"""
Function to properly grab the weight and bias tensor of a given Keras layer, as well as transform
weights if needed.
:param layer: Keras layer to have weights and bias extracted from
:return: weight and bias tensor of Keras layer
"""
weights = layer.get_weights()
weight_tensor = weights[0]
orig_weight_shape = weight_tensor.shape
if self.per_channel_enabled and isinstance(layer, tf.keras.layers.DepthwiseConv2D):
weight_tensor = TfAdaroundWrapper.transform_input_ndarray_for_depthwise_conv_2d(weight_tensor)
bias_tensor = None
if len(weights) > 1:
bias_tensor = weights[1]
return orig_weight_shape, weight_tensor, bias_tensor
@staticmethod
def compute_encodings(weight_data: np.ndarray, param_bw: int, quant_scheme: QuantScheme, is_symmetric: bool,
strict_symmetric: bool, unsigned_symmetric: bool, per_channel_enabled: bool,
ch_axis: int) -> libpymo.TfEncoding:
"""
:param weight_data: Weight data of Adaround supported ops
:param param_bw: bitwidth (4-31) to use for quantizing weight data
:param quant_scheme: Quantization scheme
:param is_symmetric: True if symmetric encodings is used, else asymmetric encodings.
:param strict_symmetric: If true, and if is_symmetric is true, calculate encodings exactly centered
around 0. E.g. if bw==8, then this results in quantized int values (-127:127). If this is not set, then
quantized int values would be (-128:127) to use the entire range.
:param unsigned_symmetric: If true, and if is_symmetric is true, check if the entire statistics we
have collected are for +ve numbers. If yes, use quantized int values (0:255). This is a special case,
where we have double the resolution for the computed encodings while still preserving the zero-point to
be absolute 0.
:param per_channel_enabled: Flag set to compute encodings on a per channel basis
:param ch_axis:
:return: Encodings (max, min, delta and offset)
"""
return TfAdaroundWrapper.compute_encodings(weight_data, param_bw, quant_scheme, is_symmetric,
strict_symmetric, unsigned_symmetric,
enable_per_channel=per_channel_enabled,
ch_axis=ch_axis)
@staticmethod
def _get_conv_args(layer: tf.keras.layers.Conv2D) -> Dict:
"""
:param op: Tf op of type Conv2d, Depthwise_Conv2d
:return: keyword arguments
"""
if layer.data_format == 'channels_last':
data_format = 'NHWC'
strides = [1, layer.strides[0], layer.strides[1], 1]
else:
data_format = 'NCHW'
strides = [1, 1, layer.strides[0], layer.strides[1]]
if layer.padding == 'valid':
padding = 'VALID'
else:
padding = 'SAME'
kwargs = {'data_format': data_format,
'strides': strides,
'padding': padding,
'dilations': layer.dilation_rate}
return kwargs
|
409187fadb21b1a86a0fd2eb973cba33ce42d49d
|
83963c19fd120dcc7498b726cc56de7fbb900a47
|
/tests/test_photokit.py
|
7f606319b6ffa0c13ace9b569e4b77498e467ccc
|
[
"MIT",
"LicenseRef-scancode-public-domain",
"CC-BY-2.0"
] |
permissive
|
RhetTbull/osxphotos
|
55ad4f1257bcd26bb3fbadde6ce5dd59c0917354
|
2cb5a4d18a27be6ccf68f5f35abd39418d238016
|
refs/heads/main
| 2023-09-02T18:11:06.227191
| 2023-09-02T16:06:51
| 2023-09-02T16:06:51
| 192,160,985
| 1,287
| 93
|
MIT
| 2023-09-14T14:10:58
| 2019-06-16T07:07:49
|
Python
|
UTF-8
|
Python
| false
| false
| 13,434
|
py
|
test_photokit.py
|
""" test photokit.py methods """
import os
import pathlib
import tempfile
import pytest
from osxphotos.platform import is_macos
if is_macos:
from osxphotos.photokit import (
PHOTOS_VERSION_CURRENT,
PHOTOS_VERSION_ORIGINAL,
PHOTOS_VERSION_UNADJUSTED,
LivePhotoAsset,
PhotoAsset,
PhotoLibrary,
VideoAsset,
)
else:
pytest.skip(allow_module_level=True)
skip_test = "OSXPHOTOS_TEST_EXPORT" not in os.environ
pytestmark = pytest.mark.skipif(
skip_test, reason="Skip if not running with author's personal library."
)
UUID_DICT = {
"plain_photo": {
"uuid": "C6C712C5-9316-408D-A3C3-125661422DA9",
"filename": "IMG_8844.JPG",
},
"hdr": {"uuid": "DD641004-4E37-4233-AF31-CAA0896490B2", "filename": "IMG_6162.JPG"},
"selfie": {
"uuid": "C925CFDC-FF2B-4E71-AC9D-C669B6453A8B",
"filename": "IMG_1929.JPG",
},
"video": {
"uuid": "F4430659-7B17-487E-8029-8C1ABEBE23DF",
"filename": "IMG_9411.TRIM.MOV",
},
"hasadjustments": {
"uuid": "2F252D2C-C9DE-4BE1-8610-9F968C634D3D",
"filename": "IMG_2860.JPG",
"adjusted_size": 3012634,
"unadjusted_size": 2580058,
},
"slow_mo": {
"uuid": "160447F8-4EB0-4FAE-A26A-3D32EA698F75",
"filename": "IMG_4055.MOV",
},
"live_photo": {
"uuid": "8EC216A2-0032-4934-BD3F-04C6259B3304",
"filename": "IMG_3259.HEIC",
"filename_video": "IMG_3259.mov",
},
"burst": {
"uuid": "CDE4E5D9-1428-41E6-8569-EC0C45FD8E5A",
"filename": "IMG_8196.JPG",
"burst_selected": 4,
"burst_all": 5,
},
"raw+jpeg": {
"uuid": "E3DD04AF-CB65-4D9B-BB79-FF4C955533DB",
"filename": "IMG_1994.JPG",
"raw_filename": "IMG_1994.CR2",
"unadjusted_size": 16128420,
"uti_raw": "com.canon.cr2-raw-image",
"uti": "public.jpeg",
},
}
def test_fetch_uuid():
"""test fetch_uuid"""
uuid = UUID_DICT["plain_photo"]["uuid"]
filename = UUID_DICT["plain_photo"]["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert isinstance(photo, PhotoAsset)
def test_plain_photo():
"""test plain_photo"""
uuid = UUID_DICT["plain_photo"]["uuid"]
filename = UUID_DICT["plain_photo"]["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert photo.original_filename == filename
assert photo.raw_filename is None
assert photo.isphoto
assert not photo.ismovie
def test_raw_plus_jpeg():
"""test RAW+JPEG"""
uuid = UUID_DICT["raw+jpeg"]["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert photo.original_filename == UUID_DICT["raw+jpeg"]["filename"]
assert photo.raw_filename == UUID_DICT["raw+jpeg"]["raw_filename"]
assert photo.uti_raw() == UUID_DICT["raw+jpeg"]["uti_raw"]
assert photo.uti() == UUID_DICT["raw+jpeg"]["uti"]
def test_hdr():
"""test hdr"""
uuid = UUID_DICT["hdr"]["uuid"]
filename = UUID_DICT["hdr"]["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert photo.original_filename == filename
assert photo.hdr
def test_burst():
"""test burst and burstid"""
test_dict = UUID_DICT["burst"]
uuid = test_dict["uuid"]
filename = test_dict["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert photo.original_filename == filename
assert photo.burst
assert photo.burstid
# def test_selfie():
# """ test selfie """
# uuid = UUID_DICT["selfie"]["uuid"]
# filename = UUID_DICT["selfie"]["filename"]
# lib = PhotoLibrary()
# photo = lib.fetch_uuid(uuid)
# assert photo.original_filename == filename
# assert photo.selfie
def test_video():
"""test ismovie"""
uuid = UUID_DICT["video"]["uuid"]
filename = UUID_DICT["video"]["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert isinstance(photo, VideoAsset)
assert photo.original_filename == filename
assert photo.ismovie
assert not photo.isphoto
def test_slow_mo():
"""test slow_mo"""
test_dict = UUID_DICT["slow_mo"]
uuid = test_dict["uuid"]
filename = test_dict["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert isinstance(photo, VideoAsset)
assert photo.original_filename == filename
assert photo.ismovie
assert photo.slow_mo
assert not photo.isphoto
### PhotoAsset
def test_export_photo_original():
"""test PhotoAsset.export"""
test_dict = UUID_DICT["hasadjustments"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_ORIGINAL)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
assert export_path.stat().st_size == test_dict["unadjusted_size"]
def test_export_photo_unadjusted():
"""test PhotoAsset.export"""
test_dict = UUID_DICT["hasadjustments"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_UNADJUSTED)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
assert export_path.stat().st_size == test_dict["unadjusted_size"]
def test_export_photo_current():
"""test PhotoAsset.export"""
test_dict = UUID_DICT["hasadjustments"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
assert export_path.stat().st_size == test_dict["adjusted_size"]
def test_export_photo_raw():
"""test PhotoAsset.export for raw component"""
test_dict = UUID_DICT["raw+jpeg"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, raw=True)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["raw_filename"]
assert export_path.stem == pathlib.Path(filename).stem
assert export_path.stat().st_size == test_dict["unadjusted_size"]
### VideoAsset
def test_export_video_original():
"""test VideoAsset.export"""
test_dict = UUID_DICT["video"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_ORIGINAL)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
def test_export_video_unadjusted():
"""test VideoAsset.export"""
test_dict = UUID_DICT["video"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_UNADJUSTED)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
def test_export_video_current():
"""test VideoAsset.export"""
test_dict = UUID_DICT["video"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_CURRENT)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
### Slow-Mo VideoAsset
def test_export_slow_mo_original():
"""test VideoAsset.export for slow mo video"""
test_dict = UUID_DICT["slow_mo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_ORIGINAL)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
def test_export_slow_mo_unadjusted():
"""test VideoAsset.export for slow mo video"""
test_dict = UUID_DICT["slow_mo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_UNADJUSTED)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
def test_export_slow_mo_current():
"""test VideoAsset.export for slow mo video"""
test_dict = UUID_DICT["slow_mo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_CURRENT)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
### LivePhotoAsset
def test_export_live_original():
"""test LivePhotoAsset.export"""
test_dict = UUID_DICT["live_photo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_ORIGINAL)
for f in export_path:
filepath = pathlib.Path(f)
assert filepath.is_file()
filename = test_dict["filename"]
assert filepath.stem == pathlib.Path(filename).stem
def test_export_live_unadjusted():
"""test LivePhotoAsset.export"""
test_dict = UUID_DICT["live_photo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_UNADJUSTED)
for file in export_path:
filepath = pathlib.Path(file)
assert filepath.is_file()
filename = test_dict["filename"]
assert filepath.stem == pathlib.Path(filename).stem
def test_export_live_current():
"""test LivePhotAsset.export"""
test_dict = UUID_DICT["live_photo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_CURRENT)
for file in export_path:
filepath = pathlib.Path(file)
assert filepath.is_file()
filename = test_dict["filename"]
assert filepath.stem == pathlib.Path(filename).stem
def test_export_live_current_just_photo():
"""test LivePhotAsset.export"""
test_dict = UUID_DICT["live_photo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, photo=True, video=False)
assert len(export_path) == 1
assert export_path[0].lower().endswith(".heic")
def test_export_live_current_just_video():
"""test LivePhotAsset.export"""
test_dict = UUID_DICT["live_photo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, photo=False, video=True)
assert len(export_path) == 1
assert export_path[0].lower().endswith(".mov")
def test_fetch_burst_uuid():
"""test fetch_burst_uuid"""
test_dict = UUID_DICT["burst"]
uuid = test_dict["uuid"]
filename = test_dict["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
bursts_selected = lib.fetch_burst_uuid(photo.burstid)
assert len(bursts_selected) == test_dict["burst_selected"]
assert isinstance(bursts_selected[0], PhotoAsset)
bursts_all = lib.fetch_burst_uuid(photo.burstid, all=True)
assert len(bursts_all) == test_dict["burst_all"]
assert isinstance(bursts_all[0], PhotoAsset)
|
822f5b3fbfe890b5369a500f0849289c239d7927
|
5eff7a36d9a9917dce9111f0c3074375fe6f7656
|
/lib/mesa/src/compiler/glsl/tests/lower_precision_test.py
|
999c6267af338a6d54a6f30679e9829bf938a023
|
[] |
no_license
|
openbsd/xenocara
|
cb392d02ebba06f6ff7d826fd8a89aa3b8401779
|
a012b5de33ea0b977095d77316a521195b26cc6b
|
refs/heads/master
| 2023-08-25T12:16:58.862008
| 2023-08-12T16:16:25
| 2023-08-12T16:16:25
| 66,967,384
| 177
| 66
| null | 2023-07-22T18:12:37
| 2016-08-30T18:36:01
|
C
|
UTF-8
|
Python
| false
| false
| 56,024
|
py
|
lower_precision_test.py
|
# encoding=utf-8
# Copyright © 2019 Google
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import subprocess
import tempfile
import re
from collections import namedtuple
Test = namedtuple("Test", "name source match_re")
TESTS = [
Test("f32 simple division",
"""
uniform mediump float a, b;
void main()
{
gl_FragColor.rgba = vec4(a / b);
}
""",
r'\(expression +float16_t +/'),
Test("i32 simple division",
"""
#version 300 es
precision mediump float;
precision mediump int;
uniform mediump int a, b;
out vec4 color;
void main()
{
color = vec4(a / b);
}
""",
r'\(expression +int16_t +/'),
Test("u32 simple division",
"""
#version 300 es
precision mediump float;
precision mediump int;
uniform mediump uint a, b;
out vec4 color;
void main()
{
color = vec4(a / b);
}
""",
r'\(expression +uint16_t +/'),
Test("dot",
"""
uniform mediump vec2 a, b;
void main()
{
gl_FragColor.rgba = vec4(dot(a, b));
}
""",
r'\(expression +float16_t +dot\b'),
Test("f32 array with const index",
"""
precision mediump float;
uniform float in_simple[2];
void main()
{
gl_FragColor = vec4(in_simple[0] / in_simple[1]);
}
""",
r'\(expression +float16_t +/'),
Test("i32 array with const index",
"""
#version 300 es
precision mediump float;
precision mediump int;
uniform int in_simple[2];
out vec4 color;
void main()
{
color = vec4(in_simple[0] / in_simple[1]);
}
""",
r'\(expression +int16_t +/'),
Test("u32 array with const index",
"""
#version 300 es
precision mediump float;
precision mediump int;
uniform uint in_simple[2];
out vec4 color;
void main()
{
color = vec4(in_simple[0] / in_simple[1]);
}
""",
r'\(expression +uint16_t +/'),
Test("f32 array with uniform index",
"""
precision mediump float;
uniform float in_simple[2];
uniform int i0, i1;
void main()
{
gl_FragColor = vec4(in_simple[i0] / in_simple[i1]);
}
""",
r'\(expression +float16_t +/'),
Test("i32 array with uniform index",
"""
#version 300 es
precision mediump float;
precision mediump int;
uniform int in_simple[2];
uniform int i0, i1;
out vec4 color;
void main()
{
color = vec4(in_simple[i0] / in_simple[i1]);
}
""",
r'\(expression +int16_t +/'),
Test("u32 array with uniform index",
"""
#version 300 es
precision mediump float;
precision mediump int;
uniform uint in_simple[2];
uniform int i0, i1;
out vec4 color;
void main()
{
color = vec4(in_simple[i0] / in_simple[i1]);
}
""",
r'\(expression +uint16_t +/'),
Test("f32 array-of-array with const index",
"""
#version 310 es
precision mediump float;
uniform float in_aoa[2][2];
layout(location = 0) out float out_color;
void main()
{
out_color = in_aoa[0][0] / in_aoa[1][1];
}
""",
r'\(expression +float16_t +/'),
Test("i32 array-of-array with const index",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform int in_aoa[2][2];
layout(location = 0) out highp int out_color;
void main()
{
out_color = in_aoa[0][0] / in_aoa[1][1];
}
""",
r'\(expression +int16_t +/'),
Test("u32 array-of-array with const index",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform uint in_aoa[2][2];
layout(location = 0) out highp uint out_color;
void main()
{
out_color = in_aoa[0][0] / in_aoa[1][1];
}
""",
r'\(expression +uint16_t +/'),
Test("f32 array-of-array with uniform index",
"""
#version 310 es
precision mediump float;
uniform float in_aoa[2][2];
uniform int i0, i1;
layout(location = 0) out float out_color;
void main()
{
out_color = in_aoa[i0][i0] / in_aoa[i1][i1];
}
""",
r'\(expression +float16_t +/'),
Test("i32 array-of-array with uniform index",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform int in_aoa[2][2];
uniform int i0, i1;
layout(location = 0) out highp int out_color;
void main()
{
out_color = in_aoa[i0][i0] / in_aoa[i1][i1];
}
""",
r'\(expression +int16_t +/'),
Test("u32 array-of-array with uniform index",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform uint in_aoa[2][2];
uniform int i0, i1;
layout(location = 0) out highp uint out_color;
void main()
{
out_color = in_aoa[i0][i0] / in_aoa[i1][i1];
}
""",
r'\(expression +uint16_t +/'),
Test("f32 array index",
"""
uniform mediump float a, b;
uniform mediump float values[2];
void main()
{
gl_FragColor.rgba = vec4(values[int(a / b)]);
}
""",
r'\(expression +float16_t +/'),
Test("i32 array index",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform mediump int a, b;
uniform mediump int values[2];
out highp int color;
void main()
{
color = values[a / b];
}
""",
r'\(expression +int16_t +/'),
Test("f32 function",
"""
precision mediump float;
uniform float a, b;
mediump float
get_a()
{
return a;
}
float
get_b()
{
return b;
}
void main()
{
gl_FragColor = vec4(get_a() / get_b());
}
""",
r'\(expression +float16_t +/'),
Test("i32 function",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform int a, b;
mediump int
get_a()
{
return a;
}
int
get_b()
{
return b;
}
out highp int color;
void main()
{
color = get_a() / get_b();
}
""",
r'\(expression +int16_t +/'),
Test("u32 function",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform uint a, b;
mediump uint
get_a()
{
return a;
}
uint
get_b()
{
return b;
}
out highp uint color;
void main()
{
color = get_a() / get_b();
}
""",
r'\(expression +uint16_t +/'),
Test("f32 function mediump args",
"""
precision mediump float;
uniform float a, b;
mediump float
do_div(float x, float y)
{
return x / y;
}
void main()
{
gl_FragColor = vec4(do_div(a, b));
}
""",
r'\(expression +float16_t +/'),
Test("i32 function mediump args",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform int a, b;
mediump int
do_div(int x, int y)
{
return x / y;
}
out highp int color;
void main()
{
color = do_div(a, b);
}
""",
r'\(expression +int16_t +/'),
Test("u32 function mediump args",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform uint a, b;
mediump uint
do_div(uint x, uint y)
{
return x / y;
}
out highp uint color;
void main()
{
color = do_div(a, b);
}
""",
r'\(expression +uint16_t +/'),
Test("f32 function highp args",
"""
precision mediump float;
uniform float a, b;
mediump float
do_div(highp float x, highp float y)
{
return x / y;
}
void main()
{
gl_FragColor = vec4(do_div(a, b));
}
""",
r'\(expression +float +/'),
Test("i32 function highp args",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform int a, b;
mediump int
do_div(highp int x, highp int y)
{
return x / y;
}
out highp int color;
void main()
{
color = do_div(a, b);
}
""",
r'\(expression +int +/'),
Test("u32 function highp args",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform uint a, b;
mediump uint
do_div(highp uint x, highp uint y)
{
return x / y;
}
out highp uint color;
void main()
{
color = do_div(a, b);
}
""",
r'\(expression +uint +/'),
Test("f32 function inout different precision highp",
"""
uniform mediump float a, b;
void
do_div(inout highp float x, highp float y)
{
x = x / y;
}
void main()
{
mediump float temp = a;
do_div(temp, b);
gl_FragColor = vec4(temp);
}
""",
r'\(expression +float +/'),
Test("i32 function inout different precision highp",
"""
#version 310 es
uniform mediump int a, b;
void
do_div(inout highp int x, highp int y)
{
x = x / y;
}
out mediump int color;
void main()
{
mediump int temp = a;
do_div(temp, b);
color = temp;
}
""",
r'\(expression +int +/'),
Test("u32 function inout different precision highp",
"""
#version 310 es
uniform mediump uint a, b;
void
do_div(inout highp uint x, highp uint y)
{
x = x / y;
}
out mediump uint color;
void main()
{
mediump uint temp = a;
do_div(temp, b);
color = temp;
}
""",
r'\(expression +uint +/'),
Test("f32 function inout different precision mediump",
"""
uniform highp float a, b;
void
do_div(inout mediump float x, mediump float y)
{
x = x / y;
}
void main()
{
highp float temp = a;
do_div(temp, b);
gl_FragColor = vec4(temp);
}
""",
r'\(expression +float16_t +/'),
Test("i32 function inout different precision mediump",
"""
#version 310 es
uniform highp int a, b;
out highp int color;
void
do_div(inout mediump int x, mediump int y)
{
x = x / y;
}
void main()
{
highp int temp = a;
do_div(temp, b);
color = temp;
}
""",
r'\(expression +int16_t +/'),
Test("u32 function inout different precision mediump",
"""
#version 310 es
uniform highp uint a, b;
out highp uint color;
void
do_div(inout mediump uint x, mediump uint y)
{
x = x / y;
}
void main()
{
highp uint temp = a;
do_div(temp, b);
color = temp;
}
""",
r'\(expression +uint16_t +/'),
Test("f32 if",
"""
precision mediump float;
uniform float a, b;
void
main()
{
if (a / b < 0.31)
gl_FragColor = vec4(0.0, 1.0, 0.0, 1.0);
else
gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
}
""",
r'\(expression +float16_t +/'),
Test("i32 if",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform int a, b;
out vec4 color;
void
main()
{
if (a / b < 10)
color = vec4(0.0, 1.0, 0.0, 1.0);
else
color = vec4(1.0, 0.0, 0.0, 1.0);
}
""",
r'\(expression +int16_t +/'),
Test("u32 if",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform uint a, b;
out vec4 color;
void
main()
{
if (a / b < 10u)
color = vec4(0.0, 1.0, 0.0, 1.0);
else
color = vec4(1.0, 0.0, 0.0, 1.0);
}
""",
r'\(expression +uint16_t +/'),
Test("matrix",
"""
precision mediump float;
uniform vec2 a;
uniform mat2 b;
void main()
{
gl_FragColor = vec4(b * a, 0.0, 0.0);
}
""",
r'\(expression +f16vec2 \* \(var_ref b\) \(var_ref a\)'),
Test("f32 simple struct deref",
"""
precision mediump float;
struct simple {
float a, b;
};
uniform simple in_simple;
void main()
{
gl_FragColor = vec4(in_simple.a / in_simple.b);
}
""",
r'\(expression +float16_t +/'),
Test("i32 simple struct deref",
"""
#version 310 es
precision mediump float;
precision mediump int;
struct simple {
int a, b;
};
uniform simple in_simple;
out highp int color;
void main()
{
color = in_simple.a / in_simple.b;
}
""",
r'\(expression +int16_t +/'),
Test("u32 simple struct deref",
"""
#version 310 es
precision mediump float;
precision mediump int;
struct simple {
uint a, b;
};
uniform simple in_simple;
out highp uint color;
void main()
{
color = in_simple.a / in_simple.b;
}
""",
r'\(expression +uint16_t +/'),
Test("f32 embedded struct deref",
"""
precision mediump float;
struct simple {
float a, b;
};
struct embedded {
simple a, b;
};
uniform embedded in_embedded;
void main()
{
gl_FragColor = vec4(in_embedded.a.a / in_embedded.b.b);
}
""",
r'\(expression +float16_t +/'),
Test("i32 embedded struct deref",
"""
#version 310 es
precision mediump float;
precision mediump int;
struct simple {
int a, b;
};
struct embedded {
simple a, b;
};
uniform embedded in_embedded;
out highp int color;
void main()
{
color = in_embedded.a.a / in_embedded.b.b;
}
""",
r'\(expression +int16_t +/'),
Test("u32 embedded struct deref",
"""
#version 310 es
precision mediump float;
precision mediump int;
struct simple {
uint a, b;
};
struct embedded {
simple a, b;
};
uniform embedded in_embedded;
out highp uint color;
void main()
{
color = in_embedded.a.a / in_embedded.b.b;
}
""",
r'\(expression +uint16_t +/'),
Test("f32 arrayed struct deref",
"""
precision mediump float;
struct simple {
float a, b;
};
struct arrayed {
simple a[2];
};
uniform arrayed in_arrayed;
void main()
{
gl_FragColor = vec4(in_arrayed.a[0].a / in_arrayed.a[1].b);
}
""",
r'\(expression +float16_t +/'),
Test("i32 arrayed struct deref",
"""
#version 310 es
precision mediump float;
precision mediump int;
struct simple {
int a, b;
};
struct arrayed {
simple a[2];
};
uniform arrayed in_arrayed;
out highp int color;
void main()
{
color = in_arrayed.a[0].a / in_arrayed.a[1].b;
}
""",
r'\(expression +int16_t +/'),
Test("u32 arrayed struct deref",
"""
#version 310 es
precision mediump float;
precision mediump int;
struct simple {
uint a, b;
};
struct arrayed {
simple a[2];
};
uniform arrayed in_arrayed;
out highp uint color;
void main()
{
color = in_arrayed.a[0].a / in_arrayed.a[1].b;
}
""",
r'\(expression +uint16_t +/'),
Test("f32 mixed precision not lowered",
"""
uniform mediump float a;
uniform highp float b;
void main()
{
gl_FragColor = vec4(a / b);
}
""",
r'\(expression +float +/'),
Test("i32 mixed precision not lowered",
"""
#version 310 es
uniform mediump int a;
uniform highp int b;
out mediump int color;
void main()
{
color = a / b;
}
""",
r'\(expression +int +/'),
Test("u32 mixed precision not lowered",
"""
#version 310 es
uniform mediump uint a;
uniform highp uint b;
out mediump uint color;
void main()
{
color = a / b;
}
""",
r'\(expression +uint +/'),
Test("f32 sampler array",
"""
#version 320 es
precision mediump float;
precision mediump int;
uniform sampler2D tex[2];
// highp shouldn't affect the return value of texture2D
uniform highp vec2 coord;
uniform float divisor;
uniform int index;
out highp vec4 color;
void main()
{
color = texture2D(tex[index], coord) / divisor;
}
""",
r'\(expression +f16vec4 +/.*\(tex +f16vec4 +'),
Test("f32 texture sample",
"""
precision mediump float;
uniform sampler2D tex;
// highp shouldn't affect the return value of texture2D
uniform highp vec2 coord;
uniform float divisor;
void main()
{
gl_FragColor = texture2D(tex, coord) / divisor;
}
""",
r'\(expression +f16vec4 +/.*\(tex +f16vec4 +'),
Test("i32 texture sample",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform mediump isampler2D tex;
// highp shouldn't affect the return value of texture
uniform highp vec2 coord;
uniform int divisor;
out highp ivec4 color;
void main()
{
color = texture(tex, coord) / divisor;
}
""",
r'\(expression +i16vec4 +/.*\(tex +i16vec4 +'),
Test("u32 texture sample",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform mediump usampler2D tex;
// highp shouldn't affect the return value of texture
uniform highp vec2 coord;
uniform uint divisor;
out highp uvec4 color;
void main()
{
color = texture(tex, coord) / divisor;
}
""",
r'\(expression +u16vec4 +/.*\(tex +u16vec4 +'),
Test("f32 image array",
"""
#version 320 es
precision mediump float;
layout(rgba16f) readonly uniform mediump image2D img[2];
// highp shouldn't affect the return value of imageLoad
uniform highp ivec2 coord;
uniform float divisor;
out highp vec4 color;
void main()
{
color = imageLoad(img[1], coord) / divisor;
}
""",
r'\(expression +f16vec4 +/'),
Test("f32 image load",
"""
#version 310 es
precision mediump float;
precision mediump int;
layout(rgba16f) readonly uniform mediump image2D img;
// highp shouldn't affect the return value of imageLoad
uniform highp ivec2 coord;
uniform float divisor;
out highp vec4 color;
void main()
{
color = imageLoad(img, coord) / divisor;
}
""",
r'\(expression +f16vec4 +/'),
Test("i32 image load",
"""
#version 310 es
precision mediump float;
precision mediump int;
layout(rgba16i) readonly uniform mediump iimage2D img;
// highp shouldn't affect the return value of imageLoad
uniform highp ivec2 coord;
uniform int divisor;
out highp ivec4 color;
void main()
{
color = imageLoad(img, coord) / divisor;
}
""",
r'\(expression +i16vec4 +/'),
Test("u32 image load",
"""
#version 310 es
precision mediump float;
precision mediump int;
layout(rgba16ui) readonly uniform mediump uimage2D img;
// highp shouldn't affect the return value of imageLoad
uniform highp ivec2 coord;
uniform uint divisor;
out highp uvec4 color;
void main()
{
color = imageLoad(img, coord) / divisor;
}
""",
r'\(expression +u16vec4 +/'),
Test("f32 expression in lvalue",
"""
uniform mediump float a, b;
void main()
{
gl_FragColor = vec4(1.0);
gl_FragColor[int(a / b)] = 0.5;
}
""",
r'\(expression +float16_t +/'),
Test("i32 expression in lvalue",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform mediump int a, b;
out vec4 color;
void main()
{
color = vec4(1.0);
color[a / b] = 0.5;
}
""",
r'\(expression +int16_t +/'),
Test("f32 builtin with const arg",
"""
uniform mediump float a;
void main()
{
gl_FragColor = vec4(min(a, 3.0));
}
""",
r'\(expression +float16_t min'),
Test("i32 builtin with const arg",
"""
#version 310 es
uniform mediump int a;
out highp int color;
void main()
{
color = min(a, 3);
}
""",
r'\(expression +int16_t min'),
Test("u32 builtin with const arg",
"""
#version 310 es
uniform mediump uint a;
out highp uint color;
void main()
{
color = min(a, 3u);
}
""",
r'\(expression +uint16_t min'),
Test("dFdx",
"""
#version 300 es
precision mediump float;
in vec4 var;
out vec4 color;
void main()
{
color = dFdx(var);
}
""",
r'\(expression +f16vec4 +dFdx +\(expression +f16vec4'),
Test("dFdy",
"""
#version 300 es
precision mediump float;
in vec4 var;
out vec4 color;
void main()
{
color = dFdy(var);
}
""",
r'\(expression +f16vec4 +dFdy +\(expression +f16vec4'),
Test("textureSize",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform mediump sampler2D tex;
out ivec2 color;
void main()
{
color = textureSize(tex, 0) * ivec2(2);
}
""",
r'expression ivec2 \* \(txs ivec2 \(var_ref tex'),
Test("floatBitsToInt",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform float val;
out int color;
void main()
{
color = floatBitsToInt(val + 1.0) + 1;
}
""",
r'expression int bitcast_f2i \(expression float'),
Test("floatBitsToUint",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform float val;
out uint color;
void main()
{
color = floatBitsToUint(val + 1.0) + 1u;
}
""",
r'expression uint bitcast_f2u \(expression float'),
Test("intBitsToFloat",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform int val;
out float color;
void main()
{
color = intBitsToFloat(val + 1) + 1.0;
}
""",
r'expression float bitcast_i2f \(expression int'),
Test("uintBitsToFloat",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform uint val;
out float color;
void main()
{
color = uintBitsToFloat(val + 1u) + 1.0;
}
""",
r'expression float bitcast_u2f \(expression uint'),
Test("bitfieldReverse",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform int val;
out int color;
void main()
{
color = bitfieldReverse(val + 1) + 1;
}
""",
r'expression int bitfield_reverse \(expression int'),
Test("frexp",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform float val;
out float color;
out int color2;
void main()
{
int y;
float x = frexp(val + 1.0, y);
color = x + 1.0;
color2 = y + 1;
}
""",
r'assign \(x\) \(var_ref x@2\) \(expression float f162f'),
Test("ldexp",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform float val;
uniform int exp;
out float color;
void main()
{
color = ldexp(val + 1.0, exp + 1) + 1.0;
}
""",
r'expression float ldexp \(expression float'),
Test("uaddCarry",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform uint x, y;
out uint color;
void main()
{
lowp uint carry;
color = uaddCarry(x * 2u, y * 2u, carry) * 2u;
color *= carry;
}
""",
r'expression uint \+ \(var_ref x\) \(var_ref y'),
Test("usubBorrow",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform uint x, y;
out uint color;
void main()
{
lowp uint borrow;
color = usubBorrow(x * 2u, y * 2u, borrow) * 2u;
color *= borrow;
}
""",
r'expression uint \- \(var_ref x\) \(var_ref y'),
Test("imulExtended",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform int x, y;
out int color;
void main()
{
int msb, lsb;
imulExtended(x + 2, y + 2, msb, lsb);
color = msb + lsb;
}
""",
r'expression int64_t \* \(expression int'),
Test("umulExtended",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform uint x, y;
out uint color;
void main()
{
uint msb, lsb;
umulExtended(x + 2u, y + 2u, msb, lsb);
color = msb + lsb;
}
""",
r'expression uint64_t \* \(expression uint'),
Test("unpackUnorm2x16",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform uint val;
out vec2 color;
void main()
{
color = unpackUnorm2x16(val + 1u) + vec2(1.0);
}
""",
r'expression vec2 unpackUnorm2x16 \(expression uint'),
Test("unpackSnorm2x16",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform uint val;
out vec2 color;
void main()
{
color = unpackSnorm2x16(val + 1u) + vec2(1.0);
}
""",
r'expression vec2 unpackSnorm2x16 \(expression uint'),
Test("packUnorm2x16",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform vec2 val;
out uint color;
void main()
{
color = packUnorm2x16(val + vec2(1.0)) + 1u;
}
""",
r'expression uint packUnorm2x16 \(expression vec2'),
Test("packSnorm2x16",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform vec2 val;
out uint color;
void main()
{
color = packSnorm2x16(val + vec2(1.0)) + 1u;
}
""",
r'expression uint packSnorm2x16 \(expression vec2'),
Test("packHalf2x16",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform vec2 val;
out uint color;
void main()
{
color = packHalf2x16(val + vec2(1.0)) + 1u;
}
""",
r'expression uint packHalf2x16 \(expression vec2'),
Test("packUnorm4x8",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform vec4 val;
out uint color;
void main()
{
color = packUnorm4x8(val + vec4(1.0)) + 1u;
}
""",
r'expression uint packUnorm4x8 \(expression vec4'),
Test("packSnorm4x8",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform vec4 val;
out uint color;
void main()
{
color = packSnorm4x8(val + vec4(1.0)) + 1u;
}
""",
r'expression uint packSnorm4x8 \(expression vec4'),
Test("interpolateAtCentroid",
"""
#version 320 es
precision mediump float;
precision mediump int;
in float val;
out float color;
void main()
{
color = interpolateAtCentroid(val) + 1.0;
}
""",
r'expression float16_t interpolate_at_centroid \(expression float16_t'),
Test("interpolateAtOffset",
"""
#version 320 es
precision mediump float;
precision mediump int;
uniform highp vec2 offset;
in float val;
out float color;
void main()
{
color = interpolateAtOffset(val, offset) + 1.0;
}
""",
r'expression float16_t interpolate_at_offset \(expression float16_t'),
Test("interpolateAtSample",
"""
#version 320 es
precision mediump float;
precision mediump int;
uniform highp int sample_index;
in float val;
out float color;
void main()
{
color = interpolateAtSample(val, sample_index) + 1.0;
}
""",
r'expression float16_t interpolate_at_sample \(expression float16_t'),
Test("bitfieldExtract",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform highp int offset, bits;
uniform int val;
out int color;
void main()
{
color = bitfieldExtract(val, offset, bits) + 1;
}
""",
r'expression int16_t bitfield_extract \(expression int16_t'),
Test("bitfieldInsert",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform highp int offset, bits;
uniform int val, val2;
out int color;
void main()
{
color = bitfieldInsert(val, val2, offset, bits) + 1;
}
""",
r'expression int16_t bitfield_insert \(expression int16_t'),
Test("bitCount",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform highp int val;
out int color;
void main()
{
color = bitCount(val) + 1;
}
""",
r'expression int16_t \+ \(expression int16_t i2imp \(expression int bit_count \(var_ref val'),
Test("findLSB",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform highp int val;
out int color;
void main()
{
color = findLSB(val) + 1;
}
""",
r'expression int16_t \+ \(expression int16_t i2imp \(expression int find_lsb \(var_ref val'),
Test("findMSB",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform highp int val;
out int color;
void main()
{
color = findMSB(val) + 1;
}
""",
r'expression int16_t \+ \(expression int16_t i2imp \(expression int find_msb \(var_ref val'),
Test("unpackHalf2x16",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform highp uint val;
out vec2 color;
void main()
{
color = unpackHalf2x16(val) + vec2(1.0);
}
""",
r'expression f16vec2 \+ \(expression f16vec2 f2fmp \(expression vec2 unpackHalf2x16 \(var_ref val'),
Test("unpackUnorm4x8",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform highp uint val;
out vec4 color;
void main()
{
color = unpackUnorm4x8(val) + vec4(1.0);
}
""",
r'expression f16vec4 \+ \(expression f16vec4 f2fmp \(expression vec4 unpackUnorm4x8 \(var_ref val'),
Test("unpackSnorm4x8",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform highp uint val;
out vec4 color;
void main()
{
color = unpackSnorm4x8(val) + vec4(1.0);
}
""",
r'expression f16vec4 \+ \(expression f16vec4 f2fmp \(expression vec4 unpackSnorm4x8 \(var_ref val'),
Test("f32 csel",
"""
#version 300 es
precision mediump float;
in vec4 var;
out vec4 color;
void main()
{
color = (var.x > var.y) ? var : vec4(10.0);
}
""",
r'\(constant +f16vec4 \(10'),
Test("i32 csel",
"""
#version 310 es
precision mediump int;
in flat ivec4 var;
out ivec4 color;
void main()
{
color = (var.x > var.y) ? var : ivec4(10);
}
""",
r'\(constant +i16vec4 \(10'),
Test("u32 csel",
"""
#version 310 es
precision mediump int;
in flat uvec4 var;
out uvec4 color;
void main()
{
color = (var.x > var.y) ? var : uvec4(10);
}
""",
r'\(constant +u16vec4 \(10'),
Test("f32 loop counter",
"""
#version 300 es
precision mediump float;
uniform float n, incr;
out float color;
void main()
{
color = 0.0;
for (float x = 0.0; x < n; x += incr)
color += x;
}
""",
r'\(assign \(x\) \(var_ref x\) \(expression float16_t \+ \(var_ref x\) \(var_ref incr'),
Test("i32 loop counter",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform int n, incr;
out int color;
void main()
{
color = 0;
for (int x = 0; x < n; x += incr)
color += x;
}
""",
r'\(assign \(x\) \(var_ref x\) \(expression int16_t \+ \(var_ref x\) \(expression int16_t i2imp \(var_ref incr'),
Test("u32 loop counter",
"""
#version 310 es
precision mediump float;
precision mediump int;
uniform uint n, incr;
out uint color;
void main()
{
color = 0u;
for (uint x = 0u; x < n; x += incr)
color += x;
}
""",
r'\(assign \(x\) \(var_ref x\) \(expression uint16_t \+ \(var_ref x\) \(expression uint16_t u2ump \(var_ref incr'),
Test("f32 temp array",
"""
#version 300 es
precision mediump float;
uniform float x,y;
out float color;
void main()
{
float a[2] = float[2](x, y);
if (x > 0.0)
a[1] = 3.0;
color = a[0] + a[1];
}
""",
r'\(constant float16_t \(3'),
Test("i32 temp array",
"""
#version 310 es
precision mediump int;
uniform int x,y;
out int color;
void main()
{
int a[2] = int[2](x, y);
if (x > 0)
a[1] = 3;
color = a[0] + a[1];
}
""",
r'\(constant int16_t \(3'),
Test("u32 temp array",
"""
#version 310 es
precision mediump int;
uniform uint x,y;
out uint color;
void main()
{
uint a[2] = uint[2](x, y);
if (x > 0u)
a[1] = 3u;
color = a[0] + a[1];
}
""",
r'\(constant uint16_t \(3'),
Test("f32 temp array of array",
"""
#version 310 es
precision mediump float;
uniform float x,y;
out float color;
void main()
{
float a[2][2] = float[2][2](float[2](x, y), float[2](x, y));
if (x > 0.0)
a[1][1] = 3.0;
color = a[0][0] + a[1][1];
}
""",
r'\(constant float16_t \(3'),
Test("i32 temp array of array",
"""
#version 310 es
precision mediump int;
uniform int x,y;
out int color;
void main()
{
int a[2][2] = int[2][2](int[2](x, y), int[2](x, y));
if (x > 0)
a[1][1] = 3;
color = a[0][0] + a[1][1];
}
""",
r'\(constant int16_t \(3'),
Test("u32 temp array of array",
"""
#version 310 es
precision mediump int;
uniform uint x,y;
out uint color;
void main()
{
uint a[2][2] = uint[2][2](uint[2](x, y), uint[2](x, y));
if (x > 0u)
a[1][1] = 3u;
color = a[0][0] + a[1][1];
}
""",
r'\(constant uint16_t \(3'),
Test("f32 temp array of array assigned from highp",
"""
#version 310 es
precision mediump float;
uniform float x,y;
out float color;
void main()
{
highp float b[2][2] = float[2][2](float[2](x, y), float[2](x, y));
float a[2][2];
a = b;
if (x > 0.0)
a[1][1] = 3.0;
color = a[0][0] + a[1][1];
}
""",
r'\(constant float16_t \(3'),
Test("i32 temp array of array assigned from highp",
"""
#version 310 es
precision mediump int;
uniform int x,y;
out int color;
void main()
{
highp int b[2][2] = int[2][2](int[2](x, y), int[2](x, y));
int a[2][2];
a = b;
if (x > 0)
a[1][1] = 3;
color = a[0][0] + a[1][1];
}
""",
r'\(constant int16_t \(3'),
Test("u32 temp array of array assigned from highp",
"""
#version 310 es
precision mediump int;
uniform uint x,y;
out uint color;
void main()
{
highp uint b[2][2] = uint[2][2](uint[2](x, y), uint[2](x, y));
uint a[2][2];
a = b;
if (x > 0u)
a[1][1] = 3u;
color = a[0][0] + a[1][1];
}
""",
r'\(constant uint16_t \(3'),
Test("f32 temp array of array assigned to highp",
"""
#version 310 es
precision mediump float;
uniform float x,y;
out float color;
void main()
{
float a[2][2] = float[2][2](float[2](x, y), float[2](x, y));
highp float b[2][2];
b = a;
a = b;
if (x > 0.0)
a[1][1] = 3.0;
color = a[0][0] + a[1][1];
}
""",
r'\(constant float16_t \(3'),
Test("i32 temp array of array assigned to highp",
"""
#version 310 es
precision mediump int;
uniform int x,y;
out int color;
void main()
{
int a[2][2] = int[2][2](int[2](x, y), int[2](x, y));
highp int b[2][2];
b = a;
a = b;
if (x > 0)
a[1][1] = 3;
color = a[0][0] + a[1][1];
}
""",
r'\(constant int16_t \(3'),
Test("u32 temp array of array assigned to highp",
"""
#version 310 es
precision mediump int;
uniform uint x,y;
out uint color;
void main()
{
uint a[2][2] = uint[2][2](uint[2](x, y), uint[2](x, y));
highp uint b[2][2];
b = a;
a = b;
if (x > 0u)
a[1][1] = 3u;
color = a[0][0] + a[1][1];
}
""",
r'\(constant uint16_t \(3'),
Test("f32 temp array of array returned by function",
"""
#version 310 es
precision mediump float;
uniform float x,y;
out float color;
float[2][2] f(void)
{
return float[2][2](float[2](x, y), float[2](x, y));
}
void main()
{
float a[2][2] = f();
if (x > 0.0)
a[1][1] = 3.0;
color = a[0][0] + a[1][1];
}
""",
r'\(constant float16_t \(3'),
Test("i32 temp array of array returned by function",
"""
#version 310 es
precision mediump int;
uniform int x,y;
out int color;
int[2][2] f(void)
{
return int[2][2](int[2](x, y), int[2](x, y));
}
void main()
{
int a[2][2] = f();
if (x > 0)
a[1][1] = 3;
color = a[0][0] + a[1][1];
}
""",
r'\(constant int16_t \(3'),
Test("u32 temp array of array returned by function",
"""
#version 310 es
precision mediump int;
uniform uint x,y;
out uint color;
uint[2][2] f(void)
{
return uint[2][2](uint[2](x, y), uint[2](x, y));
}
void main()
{
uint a[2][2] = f();
if (x > 0u)
a[1][1] = 3u;
color = a[0][0] + a[1][1];
}
""",
r'\(constant uint16_t \(3'),
Test("f32 temp array of array as function out",
"""
#version 310 es
precision mediump float;
uniform float x,y;
out float color;
void f(out float[2][2] v)
{
v = float[2][2](float[2](x, y), float[2](x, y));
}
void main()
{
float a[2][2];
f(a);
if (x > 0.0)
a[1][1] = 3.0;
color = a[0][0] + a[1][1];
}
""",
r'\(constant float16_t \(3'),
Test("i32 temp array of array as function out",
"""
#version 310 es
precision mediump int;
uniform int x,y;
out int color;
void f(out int[2][2] v)
{
v = int[2][2](int[2](x, y), int[2](x, y));
}
void main()
{
int a[2][2];
f(a);
if (x > 0)
a[1][1] = 3;
color = a[0][0] + a[1][1];
}
""",
r'\(constant int16_t \(3'),
Test("u32 temp array of array as function out",
"""
#version 310 es
precision mediump int;
uniform uint x,y;
out uint color;
void f(out uint[2][2] v)
{
v = uint[2][2](uint[2](x, y), uint[2](x, y));
}
void main()
{
uint a[2][2];
f(a);
if (x > 0u)
a[1][1] = 3u;
color = a[0][0] + a[1][1];
}
""",
r'\(constant uint16_t \(3'),
Test("f32 temp array of array as function in",
"""
#version 310 es
precision mediump float;
uniform float x,y;
out float color;
float[2][2] f(in float[2][2] v)
{
float t[2][2] = v;
return t;
}
void main()
{
float a[2][2];
a = f(a);
if (x > 0.0)
a[1][1] = 3.0;
color = a[0][0] + a[1][1];
}
""",
r'\(constant float16_t \(3'),
Test("i32 temp array of array as function in",
"""
#version 310 es
precision mediump int;
uniform int x,y;
out int color;
int[2][2] f(in int[2][2] v)
{
int t[2][2] = v;
return t;
}
void main()
{
int a[2][2];
a = f(a);
if (x > 0)
a[1][1] = 3;
color = a[0][0] + a[1][1];
}
""",
r'\(constant int16_t \(3'),
Test("u32 temp array of array as function in",
"""
#version 310 es
precision mediump int;
uniform uint x,y;
out uint color;
uint[2][2] f(in uint[2][2] v)
{
uint t[2][2] = v;
return t;
}
void main()
{
uint a[2][2];
a = f(a);
if (x > 0u)
a[1][1] = 3u;
color = a[0][0] + a[1][1];
}
""",
r'\(constant uint16_t \(3'),
Test("f32 temp array of array as function inout",
"""
#version 310 es
precision mediump float;
uniform float x,y;
out float color;
void f(inout float[2][2] v)
{
float t[2][2] = v;
v = t;
}
void main()
{
float a[2][2];
f(a);
if (x > 0.0)
a[1][1] = 3.0;
color = a[0][0] + a[1][1];
}
""",
r'\(constant float16_t \(3'),
Test("i32 temp array of array as function inout",
"""
#version 310 es
precision mediump int;
uniform int x,y;
out int color;
void f(inout int[2][2] v)
{
int t[2][2] = v;
v = t;
}
void main()
{
int a[2][2];
f(a);
if (x > 0)
a[1][1] = 3;
color = a[0][0] + a[1][1];
}
""",
r'\(constant int16_t \(3'),
Test("u32 temp array of array as function inout",
"""
#version 310 es
precision mediump int;
uniform uint x,y;
out uint color;
void f(inout uint[2][2] v)
{
uint t[2][2] = v;
v = t;
}
void main()
{
uint a[2][2];
f(a);
if (x > 0u)
a[1][1] = 3u;
color = a[0][0] + a[1][1];
}
""",
r'\(constant uint16_t \(3'),
Test("f32 temp struct (not lowered in the presence of control flow - TODO)",
"""
#version 300 es
precision mediump float;
uniform float x,y;
out float color;
void main()
{
struct { float x,y; } s;
s.x = x;
s.y = y;
if (x > 0.0)
s.y = 3.0;
color = s.x + s.y;
}
""",
r'\(constant float \(3'), # should be float16_t
Test("i32 temp struct (not lowered in the presence of control flow - TODO)",
"""
#version 300 es
precision mediump int;
uniform int x,y;
out int color;
void main()
{
struct { int x,y; } s;
s.x = x;
s.y = y;
if (x > 0)
s.y = 3;
color = s.x + s.y;
}
""",
r'\(constant int \(3'), # should be int16_t
Test("u32 temp struct (not lowered in the presence of control flow - TODO)",
"""
#version 300 es
precision mediump int;
uniform uint x,y;
out uint color;
void main()
{
struct { uint x,y; } s;
s.x = x;
s.y = y;
if (x > 0u)
s.y = 3u;
color = s.x + s.y;
}
""",
r'\(constant uint \(3'), # should be uint16_t
]
def compile_shader(standalone_compiler, source):
with tempfile.NamedTemporaryFile(mode='wt', suffix='.frag') as source_file:
print(source, file=source_file)
source_file.flush()
return subprocess.check_output([standalone_compiler,
'--version', '300',
'--lower-precision',
'--dump-lir',
source_file.name],
universal_newlines=True)
def run_test(standalone_compiler, test):
ir = compile_shader(standalone_compiler, test.source)
if re.search(test.match_re, ir) is None:
print(ir)
return False
return True
def main():
standalone_compiler = sys.argv[1]
passed = 0
for test in TESTS:
print('Testing {} ... '.format(test.name), end='')
result = run_test(standalone_compiler, test)
if result:
print('PASS')
passed += 1
else:
print('FAIL')
print('{}/{} tests returned correct results'.format(passed, len(TESTS)))
sys.exit(0 if passed == len(TESTS) else 1)
if __name__ == '__main__':
main()
|
56f5b081680326bcb97e681f7a0cf5bd56540920
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/home_connect/light.py
|
17dc842358f963298d10f5463c13208e5afe4c4e
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 7,796
|
py
|
light.py
|
"""Provides a light for Home Connect."""
import logging
from math import ceil
from typing import Any
from homeconnect.api import HomeConnectError
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
ColorMode,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_ENTITIES
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
import homeassistant.util.color as color_util
from .const import (
ATTR_VALUE,
BSH_AMBIENT_LIGHT_BRIGHTNESS,
BSH_AMBIENT_LIGHT_COLOR,
BSH_AMBIENT_LIGHT_COLOR_CUSTOM_COLOR,
BSH_AMBIENT_LIGHT_CUSTOM_COLOR,
BSH_AMBIENT_LIGHT_ENABLED,
COOKING_LIGHTING,
COOKING_LIGHTING_BRIGHTNESS,
DOMAIN,
)
from .entity import HomeConnectEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Home Connect light."""
def get_entities():
"""Get a list of entities."""
entities = []
hc_api = hass.data[DOMAIN][config_entry.entry_id]
for device_dict in hc_api.devices:
entity_dicts = device_dict.get(CONF_ENTITIES, {}).get("light", [])
entity_list = [HomeConnectLight(**d) for d in entity_dicts]
entities += entity_list
return entities
async_add_entities(await hass.async_add_executor_job(get_entities), True)
class HomeConnectLight(HomeConnectEntity, LightEntity):
"""Light for Home Connect."""
def __init__(self, device, desc, ambient):
"""Initialize the entity."""
super().__init__(device, desc)
self._state = None
self._brightness = None
self._hs_color = None
self._ambient = ambient
if self._ambient:
self._brightness_key = BSH_AMBIENT_LIGHT_BRIGHTNESS
self._key = BSH_AMBIENT_LIGHT_ENABLED
self._custom_color_key = BSH_AMBIENT_LIGHT_CUSTOM_COLOR
self._color_key = BSH_AMBIENT_LIGHT_COLOR
self._attr_color_mode = ColorMode.HS
self._attr_supported_color_modes = {ColorMode.HS}
else:
self._brightness_key = COOKING_LIGHTING_BRIGHTNESS
self._key = COOKING_LIGHTING
self._custom_color_key = None
self._color_key = None
self._attr_color_mode = ColorMode.BRIGHTNESS
self._attr_supported_color_modes = {ColorMode.BRIGHTNESS}
@property
def is_on(self):
"""Return true if the light is on."""
return bool(self._state)
@property
def brightness(self):
"""Return the brightness of the light."""
return self._brightness
@property
def hs_color(self):
"""Return the color property."""
return self._hs_color
async def async_turn_on(self, **kwargs: Any) -> None:
"""Switch the light on, change brightness, change color."""
if self._ambient:
_LOGGER.debug("Switching ambient light on for: %s", self.name)
try:
await self.hass.async_add_executor_job(
self.device.appliance.set_setting, self._key, True
)
except HomeConnectError as err:
_LOGGER.error("Error while trying to turn on ambient light: %s", err)
return
if ATTR_BRIGHTNESS in kwargs or ATTR_HS_COLOR in kwargs:
try:
await self.hass.async_add_executor_job(
self.device.appliance.set_setting,
self._color_key,
BSH_AMBIENT_LIGHT_COLOR_CUSTOM_COLOR,
)
except HomeConnectError as err:
_LOGGER.error("Error while trying selecting customcolor: %s", err)
if self._brightness is not None:
brightness = 10 + ceil(self._brightness / 255 * 90)
if ATTR_BRIGHTNESS in kwargs:
brightness = 10 + ceil(kwargs[ATTR_BRIGHTNESS] / 255 * 90)
hs_color = kwargs.get(ATTR_HS_COLOR, self._hs_color)
if hs_color is not None:
rgb = color_util.color_hsv_to_RGB(
hs_color[0], hs_color[1], brightness
)
hex_val = color_util.color_rgb_to_hex(rgb[0], rgb[1], rgb[2])
try:
await self.hass.async_add_executor_job(
self.device.appliance.set_setting,
self._custom_color_key,
f"#{hex_val}",
)
except HomeConnectError as err:
_LOGGER.error(
"Error while trying setting the color: %s", err
)
elif ATTR_BRIGHTNESS in kwargs:
_LOGGER.debug("Changing brightness for: %s", self.name)
brightness = 10 + ceil(kwargs[ATTR_BRIGHTNESS] / 255 * 90)
try:
await self.hass.async_add_executor_job(
self.device.appliance.set_setting, self._brightness_key, brightness
)
except HomeConnectError as err:
_LOGGER.error("Error while trying set the brightness: %s", err)
else:
_LOGGER.debug("Switching light on for: %s", self.name)
try:
await self.hass.async_add_executor_job(
self.device.appliance.set_setting, self._key, True
)
except HomeConnectError as err:
_LOGGER.error("Error while trying to turn on light: %s", err)
self.async_entity_update()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Switch the light off."""
_LOGGER.debug("Switching light off for: %s", self.name)
try:
await self.hass.async_add_executor_job(
self.device.appliance.set_setting, self._key, False
)
except HomeConnectError as err:
_LOGGER.error("Error while trying to turn off light: %s", err)
self.async_entity_update()
async def async_update(self) -> None:
"""Update the light's status."""
if self.device.appliance.status.get(self._key, {}).get(ATTR_VALUE) is True:
self._state = True
elif self.device.appliance.status.get(self._key, {}).get(ATTR_VALUE) is False:
self._state = False
else:
self._state = None
_LOGGER.debug("Updated, new light state: %s", self._state)
if self._ambient:
color = self.device.appliance.status.get(self._custom_color_key, {})
if not color:
self._hs_color = None
self._brightness = None
else:
colorvalue = color.get(ATTR_VALUE)[1:]
rgb = color_util.rgb_hex_to_rgb_list(colorvalue)
hsv = color_util.color_RGB_to_hsv(rgb[0], rgb[1], rgb[2])
self._hs_color = [hsv[0], hsv[1]]
self._brightness = ceil((hsv[2] - 10) * 255 / 90)
_LOGGER.debug("Updated, new brightness: %s", self._brightness)
else:
brightness = self.device.appliance.status.get(self._brightness_key, {})
if brightness is None:
self._brightness = None
else:
self._brightness = ceil((brightness.get(ATTR_VALUE) - 10) * 255 / 90)
_LOGGER.debug("Updated, new brightness: %s", self._brightness)
|
4bda781947961f5eafe7d3e01be1e44db187bf0a
|
fdbb74a95924e2677466614f6ab6e2bb13b2a95a
|
/third_party/python/Lib/test/test_poplib.py
|
889d9dae08bf447f966cde8fa865fdee3a129470
|
[
"Python-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft",
"ISC"
] |
permissive
|
jart/cosmopolitan
|
fb11b5658939023977060a7c6c71a74093d9cb44
|
0d748ad58e1063dd1f8560f18a0c75293b9415b7
|
refs/heads/master
| 2023-09-06T09:17:29.303607
| 2023-09-02T03:49:13
| 2023-09-02T03:50:18
| 272,457,606
| 11,887
| 435
|
ISC
| 2023-09-14T17:47:58
| 2020-06-15T14:16:13
|
C
|
UTF-8
|
Python
| false
| false
| 16,978
|
py
|
test_poplib.py
|
"""Test script for poplib module."""
# Modified by Giampaolo Rodola' to give poplib.POP3 and poplib.POP3_SSL
# a real test suite
import poplib
import asyncore
import asynchat
import socket
import os
import errno
from unittest import TestCase, skipUnless
from test import support as test_support
threading = test_support.import_module('threading')
HOST = test_support.HOST
PORT = 0
SUPPORTS_SSL = False
if hasattr(poplib, 'POP3_SSL'):
try:
import ssl
except ImportError:
assert False
SUPPORTS_SSL = True
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "keycert3.pem")
CAFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "pycacert.pem")
requires_ssl = skipUnless(SUPPORTS_SSL, 'SSL not supported')
# the dummy data returned by server when LIST and RETR commands are issued
LIST_RESP = b'1 1\r\n2 2\r\n3 3\r\n4 4\r\n5 5\r\n.\r\n'
RETR_RESP = b"""From: postmaster@python.org\
\r\nContent-Type: text/plain\r\n\
MIME-Version: 1.0\r\n\
Subject: Dummy\r\n\
\r\n\
line1\r\n\
line2\r\n\
line3\r\n\
.\r\n"""
class DummyPOP3Handler(asynchat.async_chat):
CAPAS = {'UIDL': [], 'IMPLEMENTATION': ['python-testlib-pop-server']}
enable_UTF8 = False
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.push('+OK dummy pop3 server ready. <timestamp>')
self.tls_active = False
self.tls_starting = False
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer)
line = str(line, 'ISO-8859-1')
self.in_buffer = []
cmd = line.split(' ')[0].lower()
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('-ERR unrecognized POP3 command "%s".' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data.encode("ISO-8859-1") + b'\r\n')
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
if arg != "guido":
self.push("-ERR no such user")
self.push('+OK password required')
def cmd_pass(self, arg):
if arg != "python":
self.push("-ERR wrong password")
self.push('+OK 10 messages')
def cmd_stat(self, arg):
self.push('+OK 10 100')
def cmd_list(self, arg):
if arg:
self.push('+OK %s %s' % (arg, arg))
else:
self.push('+OK')
asynchat.async_chat.push(self, LIST_RESP)
cmd_uidl = cmd_list
def cmd_retr(self, arg):
self.push('+OK %s bytes' %len(RETR_RESP))
asynchat.async_chat.push(self, RETR_RESP)
cmd_top = cmd_retr
def cmd_dele(self, arg):
self.push('+OK message marked for deletion.')
def cmd_noop(self, arg):
self.push('+OK done nothing.')
def cmd_rpop(self, arg):
self.push('+OK done nothing.')
def cmd_apop(self, arg):
self.push('+OK done nothing.')
def cmd_quit(self, arg):
self.push('+OK closing.')
self.close_when_done()
def _get_capas(self):
_capas = dict(self.CAPAS)
if not self.tls_active and SUPPORTS_SSL:
_capas['STLS'] = []
return _capas
def cmd_capa(self, arg):
self.push('+OK Capability list follows')
if self._get_capas():
for cap, params in self._get_capas().items():
_ln = [cap]
if params:
_ln.extend(params)
self.push(' '.join(_ln))
self.push('.')
def cmd_utf8(self, arg):
self.push('+OK I know RFC6856'
if self.enable_UTF8
else '-ERR What is UTF8?!')
if SUPPORTS_SSL:
def cmd_stls(self, arg):
if self.tls_active is False:
self.push('+OK Begin TLS negotiation')
context = ssl.SSLContext()
context.load_cert_chain(CERTFILE)
tls_sock = context.wrap_socket(self.socket,
server_side=True,
do_handshake_on_connect=False,
suppress_ragged_eofs=False)
self.del_channel()
self.set_socket(tls_sock)
self.tls_active = True
self.tls_starting = True
self.in_buffer = []
self._do_tls_handshake()
else:
self.push('-ERR Command not permitted when TLS active')
def _do_tls_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self.tls_active = True
self.tls_starting = False
def handle_read(self):
if self.tls_starting:
self._do_tls_handshake()
else:
try:
asynchat.async_chat.handle_read(self)
except ssl.SSLEOFError:
self.handle_close()
class DummyPOP3Server(asyncore.dispatcher, threading.Thread):
handler = DummyPOP3Handler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
try:
while self.active and asyncore.socket_map:
with self.active_lock:
asyncore.loop(timeout=0.1, count=1)
finally:
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
class TestPOP3Class(TestCase):
def assertOK(self, resp):
self.assertTrue(resp.startswith(b"+OK"))
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.start()
self.client = poplib.POP3(self.server.host, self.server.port, timeout=3)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(),
b'+OK dummy pop3 server ready. <timestamp>')
def test_exceptions(self):
self.assertRaises(poplib.error_proto, self.client._shortcmd, 'echo -err')
def test_user(self):
self.assertOK(self.client.user('guido'))
self.assertRaises(poplib.error_proto, self.client.user, 'invalid')
def test_pass_(self):
self.assertOK(self.client.pass_('python'))
self.assertRaises(poplib.error_proto, self.client.user, 'invalid')
def test_stat(self):
self.assertEqual(self.client.stat(), (10, 100))
def test_list(self):
self.assertEqual(self.client.list()[1:],
([b'1 1', b'2 2', b'3 3', b'4 4', b'5 5'],
25))
self.assertTrue(self.client.list('1').endswith(b"OK 1 1"))
def test_retr(self):
expected = (b'+OK 116 bytes',
[b'From: postmaster@python.org', b'Content-Type: text/plain',
b'MIME-Version: 1.0', b'Subject: Dummy',
b'', b'line1', b'line2', b'line3'],
113)
foo = self.client.retr('foo')
self.assertEqual(foo, expected)
def test_too_long_lines(self):
self.assertRaises(poplib.error_proto, self.client._shortcmd,
'echo +%s' % ((poplib._MAXLINE + 10) * 'a'))
def test_dele(self):
self.assertOK(self.client.dele('foo'))
def test_noop(self):
self.assertOK(self.client.noop())
def test_rpop(self):
self.assertOK(self.client.rpop('foo'))
def test_apop_normal(self):
self.assertOK(self.client.apop('foo', 'dummypassword'))
def test_apop_REDOS(self):
# Replace welcome with very long evil welcome.
# NB The upper bound on welcome length is currently 2048.
# At this length, evil input makes each apop call take
# on the order of milliseconds instead of microseconds.
evil_welcome = b'+OK' + (b'<' * 1000000)
with test_support.swap_attr(self.client, 'welcome', evil_welcome):
# The evil welcome is invalid, so apop should throw.
self.assertRaises(poplib.error_proto, self.client.apop, 'a', 'kb')
def test_top(self):
expected = (b'+OK 116 bytes',
[b'From: postmaster@python.org', b'Content-Type: text/plain',
b'MIME-Version: 1.0', b'Subject: Dummy', b'',
b'line1', b'line2', b'line3'],
113)
self.assertEqual(self.client.top(1, 1), expected)
def test_uidl(self):
self.client.uidl()
self.client.uidl('foo')
def test_utf8_raises_if_unsupported(self):
self.server.handler.enable_UTF8 = False
self.assertRaises(poplib.error_proto, self.client.utf8)
def test_utf8(self):
self.server.handler.enable_UTF8 = True
expected = b'+OK I know RFC6856'
result = self.client.utf8()
self.assertEqual(result, expected)
def test_capa(self):
capa = self.client.capa()
self.assertTrue('IMPLEMENTATION' in capa.keys())
def test_quit(self):
resp = self.client.quit()
self.assertTrue(resp)
self.assertIsNone(self.client.sock)
self.assertIsNone(self.client.file)
@requires_ssl
def test_stls_capa(self):
capa = self.client.capa()
self.assertTrue('STLS' in capa.keys())
@requires_ssl
def test_stls(self):
expected = b'+OK Begin TLS negotiation'
resp = self.client.stls()
self.assertEqual(resp, expected)
@requires_ssl
def test_stls_context(self):
expected = b'+OK Begin TLS negotiation'
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.load_verify_locations(CAFILE)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.check_hostname = True
with self.assertRaises(ssl.CertificateError):
resp = self.client.stls(context=ctx)
self.client = poplib.POP3("localhost", self.server.port, timeout=3)
resp = self.client.stls(context=ctx)
self.assertEqual(resp, expected)
if SUPPORTS_SSL:
from test.test_ftplib import SSLConnection
class DummyPOP3_SSLHandler(SSLConnection, DummyPOP3Handler):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.secure_connection()
self.set_terminator(b"\r\n")
self.in_buffer = []
self.push('+OK dummy pop3 server ready. <timestamp>')
self.tls_active = True
self.tls_starting = False
@requires_ssl
class TestPOP3_SSLClass(TestPOP3Class):
# repeat previous tests by using poplib.POP3_SSL
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.handler = DummyPOP3_SSLHandler
self.server.start()
self.client = poplib.POP3_SSL(self.server.host, self.server.port)
def test__all__(self):
self.assertIn('POP3_SSL', poplib.__all__)
def test_context(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, keyfile=CERTFILE, context=ctx)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, certfile=CERTFILE, context=ctx)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, keyfile=CERTFILE,
certfile=CERTFILE, context=ctx)
self.client.quit()
self.client = poplib.POP3_SSL(self.server.host, self.server.port,
context=ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.assertIs(self.client.sock.context, ctx)
self.assertTrue(self.client.noop().startswith(b'+OK'))
def test_stls(self):
self.assertRaises(poplib.error_proto, self.client.stls)
test_stls_context = test_stls
def test_stls_capa(self):
capa = self.client.capa()
self.assertFalse('STLS' in capa.keys())
@requires_ssl
class TestPOP3_TLSClass(TestPOP3Class):
# repeat previous tests by using poplib.POP3.stls()
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.start()
self.client = poplib.POP3(self.server.host, self.server.port, timeout=3)
self.client.stls()
def tearDown(self):
if self.client.file is not None and self.client.sock is not None:
try:
self.client.quit()
except poplib.error_proto:
# happens in the test_too_long_lines case; the overlong
# response will be treated as response to QUIT and raise
# this exception
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
def test_stls(self):
self.assertRaises(poplib.error_proto, self.client.stls)
test_stls_context = test_stls
def test_stls_capa(self):
capa = self.client.capa()
self.assertFalse(b'STLS' in capa.keys())
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(60) # Safety net. Look issue 11812
self.port = test_support.bind_port(self.sock)
self.thread = threading.Thread(target=self.server, args=(self.evt,self.sock))
self.thread.setDaemon(True)
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
# Explicitly clear the attribute to prevent dangling thread
self.thread = None
def server(self, evt, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
conn.send(b"+ Hola mundo\n")
conn.close()
except socket.timeout:
pass
finally:
serv.close()
def testTimeoutDefault(self):
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
pop = poplib.POP3(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(pop.sock.gettimeout(), 30)
pop.close()
def testTimeoutNone(self):
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
pop = poplib.POP3(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(pop.sock.gettimeout())
pop.close()
def testTimeoutValue(self):
pop = poplib.POP3(HOST, self.port, timeout=30)
self.assertEqual(pop.sock.gettimeout(), 30)
pop.close()
def test_main():
tests = [TestPOP3Class, TestTimeouts,
TestPOP3_SSLClass, TestPOP3_TLSClass]
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
bfaf46b9f78483181d939c39cc117fbc512809be
|
be5789fb62a876a192759c3c683bf93829845d43
|
/setup.py
|
f72056e829f271ea00336ce08152308cdf5bbcfa
|
[
"Apache-2.0"
] |
permissive
|
onnx/onnx-mxnet
|
6d09a0cbb342e3d65ef3fe3d00ca8898a43bb87b
|
b602d75c5a01f5ed8f68b11150a06374f058a86b
|
refs/heads/master
| 2023-06-10T08:57:44.568011
| 2019-03-08T17:57:56
| 2019-03-08T17:57:56
| 109,312,982
| 121
| 32
|
Apache-2.0
| 2018-06-26T19:02:49
| 2017-11-02T20:02:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,409
|
py
|
setup.py
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# pylint: disable=invalid-name, exec-used
"""Setup onnx-mxnet package"""
# To build and upload a new version, follow the steps below.
# Notes:
# - this is a "Universal Wheels" package that is pure Python and supports both Python2 and Python3
# - Twine is a secure PyPi upload package
# $ pip install twine
# $ pip install wheel
# $ python setup.py bdist_wheel --universal
# $ twine upload dist/*
from setuptools import setup, find_packages
pkgs = find_packages()
setup(
name='onnx-mxnet',
version='0.4.2',
description='ONNX-MXNet Model converter',
url='https://github.com/onnx/onnx-mxnet',
keywords='ONNX MXNet model converter deep learning',
packages=pkgs,
install_requires=['mxnet>=0.11.0', 'onnx>=1.0.1'],
tests_require=['pytest', 'pylint'],
include_package_data=True,
license='Apache 2.0'
)
|
bdb750409bed7f2d7c0347fc964db86e3d4e00d5
|
8246092010e656920e7199f889f9cbf54b83a729
|
/pycoin/symbols/btdx.py
|
bb5cb4ebbd3a7ac66aec905957c5442d78ca33d4
|
[
"MIT"
] |
permissive
|
richardkiss/pycoin
|
5717411a11445773ac922c1d1c1b7dbe4835cd77
|
b41ad7d02e52d9869a8c9f0dbd7d3b2b496c98c0
|
refs/heads/main
| 2023-08-07T12:14:04.974934
| 2023-04-18T02:27:15
| 2023-04-18T02:27:15
| 10,917,677
| 1,306
| 489
|
MIT
| 2023-06-03T23:24:50
| 2013-06-24T19:17:52
|
Python
|
UTF-8
|
Python
| false
| false
| 455
|
py
|
btdx.py
|
from pycoin.networks.bitcoinish import create_bitcoinish_network
network = create_bitcoinish_network(
symbol="BTDX", network_name="Bitcloud", subnet_name="mainnet",
wif_prefix_hex="99", sec_prefix="BTDXSEC:", address_prefix_hex="19", pay_to_script_prefix_hex="05",
bip32_prv_prefix_hex="0488ADE4", bip32_pub_prefix_hex="0488B21E",
magic_header_hex="E4E8BDFD", default_port=8329,
dns_bootstrap=[
"seed.bitcloud.network"
])
|
8405a646971434f3d064a5c9b861471cc52889f9
|
c3542b98289c1ba85f62d08b5edbe1a3c18f3c80
|
/oneeven.py
|
0c0a8c5253062245ac3717a5e7067c09c00e44f4
|
[
"LicenseRef-scancode-unknown",
"GPL-1.0-or-later",
"MIT"
] |
permissive
|
geekcomputers/Python
|
16674289843f89f6cc287097f033b928f4181d84
|
bc55e2a2c5a98f4c7597e901a04457dfb9d5df0c
|
refs/heads/master
| 2023-08-18T21:04:18.163283
| 2023-08-17T17:38:16
| 2023-08-17T17:38:16
| 2,881,789
| 32,418
| 15,024
|
MIT
| 2023-09-02T18:40:33
| 2011-11-30T09:04:08
|
Python
|
UTF-8
|
Python
| false
| false
| 233
|
py
|
oneeven.py
|
# Python Program to Print Even Numbers from 1 to N
maximum = int(input(" Please Enter the Maximum Value : "))
number = 1
while number <= maximum:
if number % 2 == 0:
print("{0}".format(number))
number = number + 1
|
a30855d8fa51950c8e851a76f197776b438a33db
|
6d7e44292e34bbc5e8cbc0eb9e9b264c0b498c5d
|
/test/conftest.py
|
e3708c0a4a5cdac5b834d14dd1d86e1168c6b53b
|
[
"Python-2.0",
"MIT"
] |
permissive
|
smicallef/spiderfoot
|
69585266dad860d3230d3ce7b801e34eeb359f90
|
6e8e6a8277ea251fdd62a0946268f5dfe9162817
|
refs/heads/master
| 2023-08-28T09:40:10.136780
| 2023-08-18T05:47:39
| 2023-08-18T05:47:39
| 4,165,675
| 10,620
| 2,130
|
MIT
| 2023-09-13T08:18:31
| 2012-04-28T07:10:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,738
|
py
|
conftest.py
|
import pytest
from spiderfoot import SpiderFootHelpers
@pytest.fixture(autouse=True)
def default_options(request):
request.cls.default_options = {
'_debug': False,
'__logging': True, # Logging in general
'__outputfilter': None, # Event types to filter from modules' output
'_useragent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0', # User-Agent to use for HTTP requests
'_dnsserver': '', # Override the default resolver
'_fetchtimeout': 5, # number of seconds before giving up on a fetch
'_internettlds': 'https://publicsuffix.org/list/effective_tld_names.dat',
'_internettlds_cache': 72,
'_genericusers': ",".join(SpiderFootHelpers.usernamesFromWordlists(['generic-usernames'])),
'__database': f"{SpiderFootHelpers.dataPath()}/spiderfoot.test.db", # note: test database file
'__modules__': None, # List of modules. Will be set after start-up.
'__correlationrules__': None, # List of correlation rules. Will be set after start-up.
'_socks1type': '',
'_socks2addr': '',
'_socks3port': '',
'_socks4user': '',
'_socks5pwd': '',
'__logstdout': False
}
request.cls.web_default_options = {
'root': '/'
}
request.cls.cli_default_options = {
"cli.debug": False,
"cli.silent": False,
"cli.color": True,
"cli.output": "pretty",
"cli.history": True,
"cli.history_file": "",
"cli.spool": False,
"cli.spool_file": "",
"cli.ssl_verify": True,
"cli.username": "",
"cli.password": "",
"cli.server_baseurl": "http://127.0.0.1:5001"
}
|
d6b1b02541082aa4c5ff348fe20332e3b700e2e1
|
de2d6e6cbb65729255f288f3b3bf1c065c538573
|
/running_modes/reinforcement_learning/logging/console_message.py
|
7d2188db89c9907a12ebfa5343bdc96119dfc5ce
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
MolecularAI/Reinvent
|
2edf7736b77107f3c04a6ba7ad8b8a3b39691f31
|
b7324d222a49d18b08335a01649abdb0ac66a734
|
refs/heads/master
| 2022-08-09T00:09:11.752865
| 2022-06-01T15:17:01
| 2022-06-01T15:17:01
| 248,080,601
| 306
| 110
|
Apache-2.0
| 2023-03-05T15:55:52
| 2020-03-17T21:54:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,333
|
py
|
console_message.py
|
import time
from reinvent_scoring.scoring.score_summary import FinalSummary
from reinvent_chemistry.logging import fraction_valid_smiles
class ConsoleMessage:
def create(self, start_time, n_steps, step, smiles,
mean_score, score_summary: FinalSummary, score,
agent_likelihood, prior_likelihood, augmented_likelihood):
time_message = self._time_progress(start_time, n_steps, step, smiles, mean_score)
score_message = self._score_profile(score_summary.scored_smiles, agent_likelihood, prior_likelihood,
augmented_likelihood, score)
score_breakdown = self._score_summary_breakdown(score_summary)
message = time_message + score_message + score_breakdown
return message
def _time_progress(self, start_time, n_steps, step, smiles, mean_score):
time_elapsed = int(time.time() - start_time)
time_left = (time_elapsed * ((n_steps - step) / (step + 1)))
valid_fraction = fraction_valid_smiles(smiles)
message = (f"\n Step {step} Fraction valid SMILES: {valid_fraction:4.1f} Score: {mean_score:.4f} "
f"Time elapsed: {time_elapsed} "
f"Time left: {time_left:.1f}\n")
return message
def _score_profile(self, smiles, agent_likelihood, prior_likelihood, augmented_likelihood, score):
# Convert to numpy arrays so that we can print them
augmented_likelihood = augmented_likelihood.data.cpu().numpy()
agent_likelihood = agent_likelihood.data.cpu().numpy()
message = " ".join([" Agent", "Prior", "Target", "Score"] + ["SMILES\n"])
for i in range(min(10, len(smiles))):
message += f'{agent_likelihood[i]:6.2f} {prior_likelihood[i]:6.2f} ' \
f'{augmented_likelihood[i]:6.2f} {score[i]:6.2f} '
message += f" {smiles[i]}\n"
return message
def _score_summary_breakdown(self, score_summary: FinalSummary):
message = " ".join([c.name for c in score_summary.profile])
message += "\n"
for i in range(min(10, len(score_summary.scored_smiles))):
for summary in score_summary.profile:
message += f"{summary.score[i]} "
message += "\n"
return message
|
7740d30f3a3be38e0f0dcab7035bacafa8128c0a
|
518bf342bc4138982af3e2724e75f1d9ca3ba56c
|
/solutions/2337. Move Pieces to Obtain a String/2337.py
|
eac7e66dd1225a84f490eadb551a4281f254632e
|
[
"MIT"
] |
permissive
|
walkccc/LeetCode
|
dae85af7cc689882a84ee5011f0a13a19ad97f18
|
a27be41c174565d365cbfe785f0633f634a01b2a
|
refs/heads/main
| 2023-08-28T01:32:43.384999
| 2023-08-20T19:00:45
| 2023-08-20T19:00:45
| 172,231,974
| 692
| 302
|
MIT
| 2023-08-13T14:48:42
| 2019-02-23T15:46:23
|
C++
|
UTF-8
|
Python
| false
| false
| 555
|
py
|
2337.py
|
class Solution:
def canChange(self, start: str, target: str) -> bool:
n = len(start)
i = 0 # start's index
j = 0 # target's index
while i <= n and j <= n:
while i < n and start[i] == '_':
i += 1
while j < n and target[j] == '_':
j += 1
if i == n or j == n:
return i == n and j == n
if start[i] != target[j]:
return False
if start[i] == 'R' and i > j:
return False
if start[i] == 'L' and i < j:
return False
i += 1
j += 1
return True
|
ae2ed89cf467047139b7e08933604a797f066458
|
8ca19f1a31070738b376c0370c4bebf6b7efcb43
|
/examples/insights/list_used.py
|
cf4e05d4d11b50693b23d36f6f4ee0c0cac306ae
|
[
"MIT"
] |
permissive
|
vgrem/Office365-REST-Python-Client
|
2ef153d737c6ed5445ba1e446aeaec39c4ef4ed3
|
cbd245d1af8d69e013c469cfc2a9851f51c91417
|
refs/heads/master
| 2023-09-02T14:20:40.109462
| 2023-08-31T19:14:05
| 2023-08-31T19:14:05
| 51,305,798
| 1,006
| 326
|
MIT
| 2023-08-28T05:38:02
| 2016-02-08T15:24:51
|
Python
|
UTF-8
|
Python
| false
| false
| 455
|
py
|
list_used.py
|
"""
Calculate and list the documents that a user has viewed or modified.
https://learn.microsoft.com/en-us/graph/api/insights-list-used?view=graph-rest-1.0&tabs=http
"""
import json
from office365.graph_client import GraphClient
from tests.graph_case import acquire_token_by_username_password
client = GraphClient(acquire_token_by_username_password)
result = client.me.insights.used.get().execute_query()
print(json.dumps(result.to_json(), indent=4))
|
8e779ef768250e028df8146b8a825841978b2824
|
7c1f157acafec729d060847469a5cf36cd88e792
|
/cords/selectionstrategies/SL/weightedrandomexplorationstrategy.py
|
d2303e6bfe1789faeecd6a087eb9d78703eca73e
|
[
"MIT"
] |
permissive
|
decile-team/cords
|
025415cc7b4577e01acba312908b9e12da27da9b
|
8d10c7f5d96e071f98c20e4e9ff4c41c2c4ea2af
|
refs/heads/main
| 2023-05-25T01:57:51.429546
| 2023-05-24T19:46:54
| 2023-05-24T19:46:54
| 330,041,216
| 289
| 54
|
MIT
| 2023-05-24T17:25:40
| 2021-01-15T23:02:58
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,894
|
py
|
weightedrandomexplorationstrategy.py
|
import numpy as np
import torch, time
import pickle
from torch.nn import Softmax
import math
def pickle2dict(file_name, key):
"""
Load dictionary from pickle file
"""
with open(file_name, "rb") as fIn:
stored_data = pickle.load(fIn)
value = stored_data[key]
return value
def taylor_softmax_v1(x, dim=1, n=2, use_log=False):
assert n % 2 == 0 and n > 0
fn = torch.ones_like(x)
denor = 1.
for i in range(1, n + 1):
denor *= i
fn = fn + x.pow(i) / denor
out = fn / fn.sum(dim=dim, keepdims=True)
if use_log: out = out.log()
return out
class WeightedRandomExplorationStrategy(object):
"""
Implementation of the Weighted Random Exploration Strategy class defined in the paper :footcite:`killamsetty2023milo`, where we select a set of points based on a global ordering of the dataset.
Global Ordering has to be provided in prior for selection. We provide a way to compute global ordering for text and image datasets
using various submodular functions as a util function.
Parameters
----------
trainloader: class
Loading the training data using pytorch DataLoader
"""
def __init__(self, trainloader, global_order_file, online=False, temperature=1, per_class=False):
"""
Constructor method
"""
self.trainloader = trainloader
self.N_trn = len(trainloader.sampler.data_source)
self.online = online
self.indices = None
self.gammas = None
globalorder = pickle2dict(global_order_file, 'globalorder')
self.global_idxs = np.array([x[0] for x in globalorder])
self.global_gains = np.array([x[1] for x in globalorder])
self.global_gains = self.global_gains - self.global_gains.min()
self.global_gains = np.maximum(self.global_gains, 1e-10)
self.temperature = temperature
self.probs = taylor_softmax_v1(torch.from_numpy(np.array([self.global_gains])/self.temperature)).numpy()[0]
self.cluster_idxs = pickle2dict(global_order_file, 'cluster_idxs')
self.per_class = per_class
self.num_classes = len(list(self.cluster_idxs.keys()))
#self.probs = softmax(torch.from_numpy(np.array([self.global_gains])/self.temperature)).numpy()[0]
def select(self, budget):
"""
Samples subset of size budget from the generated probability distribution.
Parameters
----------
budget: int
The number of data points to be selected
Returns
----------
indices: ndarray
Array of indices of size budget selected randomly
gammas: Tensor
Gradient weight values of selected indices
"""
if self.per_class:
per_cls_cnt = [len(self.cluster_idxs[key]) for key in self.cluster_idxs.keys()]
min_cls_cnt = min(per_cls_cnt)
total_sample_cnt = sum(per_cls_cnt)
if min_cls_cnt < math.ceil(budget/self.num_classes):
per_cls_budget = [min_cls_cnt]*self.num_classes
while sum(per_cls_budget) < budget:
for cls in range(self.num_classes):
if per_cls_budget[cls] < per_cls_cnt[cls]:
per_cls_budget[cls] += 1
else:
per_cls_budget = [math.ceil(budget/self.num_classes) for _ in per_cls_cnt]
cluster_labels = list(self.cluster_idxs.keys())
if self.online:
self.indices = []
for i in range(len(cluster_labels)):
per_cls_idxs = self.cluster_idxs[cluster_labels[i]]
rng = np.random.default_rng(int(time.time()))
sel_idxs = rng.choice(per_cls_idxs, size=per_cls_budget[i], replace=False, p=self.probs[per_cls_idxs]/self.probs[per_cls_idxs].sum())
sel_idxs = [int(x) for x in sel_idxs]
self.indices.extend(sel_idxs)
elif self.indices is None:
self.indices = []
for i in range(len(cluster_labels)):
per_cls_idxs = self.cluster_idxs[cluster_labels[i]]
sel_idxs = per_cls_idxs[:per_cls_budget[i]]
# sel_idxs = [x.item() for x in sel_idxs]
self.indices.extend(sel_idxs)
else:
if self.online:
rng = np.random.default_rng(int(time.time()))
self.indices = rng.choice(self.global_idxs, size=budget, replace=False, p=self.probs)
self.indices = [int(x) for x in self.indices]
#self.gammas = torch.ones(budget)
elif self.indices is None:
self.indices = self.global_idxs[:budget]
self.gammas = torch.ones(len(self.indices))
return self.indices, self.gammas
|
722d3c8b5aff4abb19c42913c090d4dfd880a97d
|
1f399edf85d995443d01f66d77eca0723886d0ff
|
/misc/config_tools/scenario_config/upgrader.py
|
3788f328315aab5b8c83708e729a005153617873
|
[
"BSD-3-Clause"
] |
permissive
|
projectacrn/acrn-hypervisor
|
f9c5864d54929a5d2fa36b5e78c08f19b46b8f98
|
390740aa1b1e9d62c51f8e3afa0c29e07e43fa23
|
refs/heads/master
| 2023-08-18T05:07:01.310327
| 2023-08-11T07:49:36
| 2023-08-16T13:20:27
| 123,983,554
| 1,059
| 686
|
BSD-3-Clause
| 2023-09-14T09:51:10
| 2018-03-05T21:52:25
|
C
|
UTF-8
|
Python
| false
| false
| 47,353
|
py
|
upgrader.py
|
#!/usr/bin/env python3
#
# Copyright (C) 2022 Intel Corporation.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import argparse, logging
import re
from functools import lru_cache, partialmethod
from collections import defaultdict, namedtuple
import lxml.etree as etree
from scenario_transformer import ScenarioTransformer
from pipeline import PipelineObject, PipelineStage, PipelineEngine
from lxml_loader import LXMLLoadStage
from schema_slicer import SlicingSchemaByVMTypeStage
class VirtualUartConnections:
class VirtualUartEndpoint:
# The BDF of PCI virtual UARTs starts from 00:10.0
next_dev = defaultdict(lambda: 16)
@classmethod
def from_endpoint_definition(cls, element):
# For v2.x style scenario XML, the name of the VM is a child of a `vm` node.
vm_name = element.xpath("ancestor::vm/name/text()").pop()
if "legacy" in element.tag:
base = element.find("base").text
io_port = \
"0x3F8" if base.endswith("COM1_BASE") else \
"0x2F8" if base.endswith("COM2_BASE") else \
"0x3E8" if base.endswith("COM3_BASE") else \
"0x2E8" if base.endswith("COM4_BASE") else \
base
return cls(vm_name, io_port = io_port)
else:
dev = cls.next_dev[vm_name]
cls.next_dev[vm_name] += 1
return cls(vm_name, pci_bdf = f"00:{dev:02x}.0")
def __init__(self, vm_name, io_port = None, pci_bdf = None):
self.vm_name = vm_name
self.io_port = io_port
self.pci_bdf = pci_bdf
class VirtualUartConnection:
next_id = 1
@classmethod
def from_connection_definition(cls, element):
name = element.find("name").text
ty = element.find("type").text
conn = cls(name = name, ty = ty)
for endpoint in element.findall("endpoint"):
vm_name_node = endpoint.find("vm_name")
vm_name = vm_name_node.text if vm_name_node is not None else ""
io_port_node = endpoint.find("io_port")
io_port = io_port_node.text if io_port_node is not None else None
vbdf_node = endpoint.find("vbdf")
vbdf = vbdf_node.text if vbdf_node is not None else None
conn.add_endpoint(VirtualUartConnections.VirtualUartEndpoint(vm_name, io_port, vbdf))
return conn
def __init__(self, name = None, ty = "legacy"):
if name:
self.name = name
else:
self.name = f"vUART connection {self.next_id}"
self.__class__.next_id += 1
self.ty = ty
self.endpoints = []
def add_endpoint(self, endpoint):
self.endpoints.append(endpoint)
def __init__(self):
self.conns = [] # List of connections
self.dangling_conns = {} # (vm_id, vuart_id) -> conn whose target is the key
def add_endpoint(self, element):
"""Parse the vUART endpoint definition in ACRN v2.x. Returns True if and only if the element is parsed properly."""
try:
key = (element.xpath("ancestor::vm/@id").pop(), element.xpath("@id").pop())
if key in self.dangling_conns.keys():
conn = self.dangling_conns.pop(key)
conn.add_endpoint(self.VirtualUartEndpoint.from_endpoint_definition(element))
self.conns.append(conn)
else:
ty = "legacy" if "legacy" in element.tag else "pci"
conn = self.VirtualUartConnection(ty = ty)
conn.add_endpoint(self.VirtualUartEndpoint.from_endpoint_definition(element))
self.dangling_conns[(element.xpath("target_vm_id/text()").pop(), element.xpath("target_uart_id/text()").pop())] = conn
return True
except Exception as e:
# Skip vUART endpoint definition not satisfying the schema. The discarded-data warnings will report those
# unmigrated data.
logging.debug(e)
return False
def add_connection(self, element):
"""Parse the vUART connection definition in ACRN v3.x"""
self.conns.append(self.VirtualUartConnection.from_connection_definition(element))
return True
def format_xml_elements(self, xsd_element_node):
new_parent_node = etree.Element(xsd_element_node.get("name"))
for conn in self.conns:
new_node = etree.Element("vuart_connection")
etree.SubElement(new_node, "name").text = conn.name
etree.SubElement(new_node, "type").text = conn.ty
for endpoint in conn.endpoints:
new_endpoint_node = etree.SubElement(new_node, "endpoint")
etree.SubElement(new_endpoint_node, "vm_name").text = endpoint.vm_name
if endpoint.io_port:
etree.SubElement(new_endpoint_node, "io_port").text = endpoint.io_port
if endpoint.pci_bdf:
etree.SubElement(new_endpoint_node, "vbdf").text = endpoint.pci_bdf
new_parent_node.append(new_node)
return [new_parent_node]
class SharedMemoryRegions:
class SharedMemoryRegion(namedtuple("SharedMemoryRegion", ["provided_by", "name", "size", "shared_vms"])):
# The BDF of IVSHMEM PCI functions starts from 00:08.0
next_dev = defaultdict(lambda: 8)
nr_regions = 0
@classmethod
def from_encoding(cls, text, old_xml_etree):
provided_by = "Device Model" if text.startswith("dm:/") else "Hypervisor"
parts = [p.strip() for p in text[text.find("/") + 1 :].split(",")]
name = parts[0]
size = parts[1]
shared_vm_ids = parts[2].split(":")
shared_vms = []
for vm_id in shared_vm_ids:
vm_name_node = old_xml_etree.xpath(f"//vm[@id='{vm_id}']/name")
if not vm_name_node:
logging.warning(f"VM {vm_id}, which is referred by shared memory region {name}, is not defined in the scenario.")
continue
vm_name = vm_name_node[0].text
dev = cls.next_dev[vm_name]
cls.next_dev[vm_name] += 1
shared_vms.append((vm_name, f"00:{dev:02x}.0"))
return cls(provided_by, name, size, shared_vms)
@classmethod
def from_launch_xml_node(cls, node):
text = node.text
provided_by = "Device Model"
parts = [p.strip() for p in text[text.find("/") + 1 :].split(",")]
name = parts[0]
size = parts[1]
shared_vms = []
vm_name = node.xpath("ancestor::user_vm/vm_name/text()")
if vm_name:
vm_name = vm_name[0]
dev = cls.next_dev[vm_name]
cls.next_dev[vm_name] += 1
shared_vms.append((vm_name, f"00:{dev:02x}.0"))
return cls(provided_by, name, size, shared_vms)
@classmethod
def from_xml_node(cls, node):
cls.nr_regions += 1
name = node.get("name") if "name" in node.keys() else \
node.find("NAME").text if node.find("NAME") is not None else \
f"shared_memory_region_{nr_regions}"
provided_by = node.find("PROVIDED_BY").text if node.find("PROVIDED_BY") is not None else "Hypervisor"
size = node.find("IVSHMEM_SIZE").text
shared_vms = []
for shared_vm_node in node.find("IVSHMEM_VMS"):
vm_name = shared_vm_node.find("VM_NAME").text
vbdf = shared_vm_node.find("VBDF").text
shared_vms.append((vm_name, vbdf))
return cls(provided_by, name, size, shared_vms)
def extend(self, region):
self.shared_vms.extend(region.shared_vms)
def format_xml_element(self):
node = etree.Element("IVSHMEM_REGION")
etree.SubElement(node, "NAME").text = self.name
etree.SubElement(node, "PROVIDED_BY").text = self.provided_by
etree.SubElement(node, "IVSHMEM_SIZE").text = self.size
vms_node = etree.SubElement(node, "IVSHMEM_VMS")
for vm_name, vbdf in self.shared_vms:
vm_node = etree.SubElement(vms_node, "IVSHMEM_VM")
etree.SubElement(vm_node, "VM_NAME").text = vm_name
etree.SubElement(vm_node, "VBDF").text = vbdf
return node
def __init__(self, old_xml_etree):
self.old_xml_etree = old_xml_etree
self.regions = {}
def add_ivshmem_region(self, ivshmem_region_node):
"""Parse IVSHMEM_REGION nodes in either v2.x and v3.x format."""
if len(ivshmem_region_node) == 0:
if ivshmem_region_node.tag == "IVSHMEM_REGION" and ivshmem_region_node.text is not None:
# ACRN v2.x scenario XML format
region = self.SharedMemoryRegion.from_encoding(ivshmem_region_node.text, self.old_xml_etree)
self.regions[region.name] = region
elif ivshmem_region_node.tag == "shm_region":
# ACRN v2.x launch XML format
if ivshmem_region_node.text:
region = self.SharedMemoryRegion.from_launch_xml_node(ivshmem_region_node)
if region.name in self.regions.keys():
self.regions[region.name].extend(region)
else:
self.regions[region.name] = region
else:
# ACRN v3.x format
region = self.SharedMemoryRegion.from_xml_node(ivshmem_region_node)
self.regions[region.name] = region
def format_xml_element(self):
node = etree.Element("IVSHMEM")
for region in self.regions.values():
node.append(region.format_xml_element())
return node
class VirtioDevices(object):
def __init__(self, old_xml_etree):
self.gpu = []
self.blocks = []
self.inputs = []
self.networks = []
self.consoles = []
def console_encoding(self, console):
if console.text is not None:
use_type = "Virtio console" if console.text.startswith("@") else "Virtio serial port"
backend_type = console.text.split(":")[0].replace("@", "")
file_path = console.text.split("=")[1].split(":")[0] if "=" in console.text else None
else:
use_type = console.xpath("./use_type")[0].text if console.xpath("./use_type") else None
backend_type = console.xpath("./backend_type")[0].text if console.xpath("./backend_type") else None
file_path = console.xpath("./file_path")[0].text if console.xpath("./file_path") else None
self.consoles.append((use_type, backend_type, file_path))
def gpu_encoding(self, gpu):
if gpu.text is not None:
window_regex = re.compile(f"geometry=([0-9]+x[0-9]+)\+([0-9]+)\+([0-9]+)")
m = window_regex.match(gpu.text)
if m is not None:
self.gpu.append(("Window", m.group(1), m.group(2), m.group(3)))
else:
self.gpu.append(("Full screen", gpu.text.split(':')[1]))
else:
display_type = gpu.xpath("./display_type")[0].text
for display in gpu.xpath("./displays/display"):
if display_type == "Window":
window_resolutions = display.xpath("./window_resolutions")[0].text if display.xpath("./window_resolutions") else None
horizontal_offset = display.xpath("./horizontal_offset")[0].text if display.xpath("./horizontal_offset") else None
vertical_offset = display.xpath("./vertical_offset")[0].text if display.xpath("./vertical_offset") else None
self.gpu.append((display_type, window_resolutions, horizontal_offset, vertical_offset))
elif display_type == "Full screen":
monitor_id = display.xpath("./monitor_id")[0].text if display.xpath("./monitor_id") else None
self.gpu.append((display_type, monitor_id))
def format_console_element(self, console):
node = etree.Element("console")
if console[0] is not None:
etree.SubElement(node, "use_type").text = console[0]
if console[1] is not None:
etree.SubElement(node, "backend_type").text = console[1]
if console[1] == "socket":
etree.SubElement(node, "sock_file_path").text = console[2]
if console[1] == "tty":
etree.SubElement(node, "tty_device_path").text = console[2]
if console[1] == "file":
etree.SubElement(node, "output_file_path").text = console[2]
return node
def format_network_element(self, network):
node = etree.Element("network")
if network[0] is not None:
etree.SubElement(node, "virtio_framework").text = network[0]
if network[1] is not None:
etree.SubElement(node, "interface_name").text = network[1]
return node
def format_input_element(self, input):
node = etree.Element("input")
if input[0] is not None:
etree.SubElement(node, "backend_device_file").text = input[0]
if input[1] is not None:
etree.SubElement(node, "id").text = input[1]
return node
def format_block_element(self, block):
node = etree.Element("block")
node.text = block
return node
def format_gpu_element(self, displays):
node = etree.Element("gpu")
if len(displays) > 0:
etree.SubElement(node, "display_type").text = displays[0][0]
displays_node = etree.SubElement(node, "displays")
for display in displays:
if display[0] == "Window":
display_node = etree.SubElement(displays_node, "display")
etree.SubElement(display_node, "window_resolutions").text = display[1]
etree.SubElement(display_node, "horizontal_offset").text = display[2]
etree.SubElement(display_node, "vertical_offset").text = display[3]
elif display[0] == "Full screen":
display_node = etree.SubElement(displays_node, "display")
etree.SubElement(display_node, "monitor_id").text = display[1]
return node
def format_xml_element(self):
node = etree.Element("virtio_devices")
for console in self.consoles:
node.append(self.format_console_element(console))
for network in self.networks:
node.append(self.format_network_element(network))
for input in self.inputs:
node.append(self.format_input_element(input))
for block in self.blocks:
node.append(self.format_block_element(block))
node.append(self.format_gpu_element(self.gpu))
return node
def add_virtio_devices(self, virtio_device_node):
if virtio_device_node.xpath("./network")[0].text is not None:
for network in virtio_device_node.xpath("./network"):
self.networks.append((None, network.text))
else:
for network in virtio_device_node.xpath("./network"):
virtio_framework = network.xpath("./virtio_framework")[0].text if network.xpath("./virtio_framework") else None
interface_name = network.xpath("./interface_name")[0].text if network.xpath("./interface_name") else None
self.networks.append((virtio_framework, interface_name))
if len(virtio_device_node.xpath("./input")) > 0:
if virtio_device_node.xpath("./input")[0].text is not None:
for input in virtio_device_node.xpath("./input"):
self.inputs.append((None, input.text))
else:
for input in virtio_device_node.xpath("./input"):
backend_device_file = input.xpath("./backend_device_file")[0].text if input.xpath("./backend_device_file") else None
id = input.xpath("./id")[0].text if input.xpath("./id") else None
self.inputs.append((backend_device_file, id))
for console in virtio_device_node.xpath("./console"):
self.console_encoding(console)
for block in virtio_device_node.xpath("./block"):
self.blocks.append(block.text)
for gpu in virtio_device_node.xpath("./gpu"):
self.gpu_encoding(gpu)
class ScenarioUpgrader(ScenarioTransformer):
@classmethod
def get_node(cls, element, xpath):
return next(iter(element.xpath(xpath, namespaces=cls.xpath_ns)), None)
def __init__(self, xsd_etree, old_xml_etree, old_launch_etree = None):
super().__init__(xsd_etree, visit_optional_node=True)
self.old_xml_etree = old_xml_etree
self.old_launch_etree = old_launch_etree
if old_launch_etree is not None:
service_vm_id = old_xml_etree.xpath("//vm[.//load_order = 'SERVICE_VM' or .//vm_type = 'SERVICE_VM']/@id")
if not service_vm_id:
self.old_launch_etree = None
else:
self.service_vm_id = int(service_vm_id[0])
# Collect all nodes in old_xml_etree which will be used to track data not moved
self.old_data_nodes = set()
for node in old_xml_etree.iter():
if node.text:
self.old_data_nodes.add(node)
if self.old_launch_etree is not None:
for node in self.old_launch_etree.iter():
if node.text:
self.old_data_nodes.add(node)
self.hv_vm_node_map = {}
def get_from_old_data(self, new_parent_node, xpath):
hv_vm_node = new_parent_node
if hv_vm_node.tag not in ["vm", "hv"]:
hv_vm_node = next(new_parent_node.iterancestors(["vm", "hv"]), None)
old_hv_vm_node = self.hv_vm_node_map[hv_vm_node]
old_data_node = old_hv_vm_node.xpath(xpath)
return old_data_node
def get_from_old_launch_data(self, new_parent_node, xpath):
if self.old_launch_etree is None:
return []
vm_node = new_parent_node
if vm_node.tag != "vm":
vm_node = next(new_parent_node.iterancestors("vm"), None)
if vm_node is None:
return []
old_vm_node = self.hv_vm_node_map[vm_node]
user_vm_id = int(old_vm_node.get("id")) - self.service_vm_id
user_vm_node = self.old_launch_etree.xpath(f"//user_vm[@id = '{user_vm_id}']")
old_data_node = user_vm_node[0].xpath(xpath) if user_vm_node else []
return old_data_node
def move_build_type(self, xsd_element_node, xml_parent_node, new_nodes):
old_data_node = self.get_node(self.old_xml_etree, f"//hv//RELEASE")
if old_data_node is not None:
new_node = etree.Element(xsd_element_node.get("name"))
new_node.text = "release" if old_data_node.text == "y" else "debug"
new_nodes.append(new_node)
self.old_data_nodes.discard(old_data_node)
else:
self.move_data_by_xpath(".//BUILD_TYPE", xsd_element_node, xml_parent_node, new_nodes)
return False
def move_virtio_devices(self, xsd_element_node, xml_parent_node, new_nodes):
virtio = VirtioDevices(self.old_xml_etree)
try:
old_data_virtio = self.get_from_old_data(xml_parent_node, ".//virtio_devices").pop()
except IndexError as e:
logging.debug(e)
return
virtio.add_virtio_devices(old_data_virtio)
for child in old_data_virtio.iter():
self.old_data_nodes.discard(child)
new_nodes.append(virtio.format_xml_element())
return False
def move_memory(self, xsd_element_node, xml_parent_node, new_nodes):
new_node = etree.Element(xsd_element_node.get("name"))
memory_node = self.hv_vm_node_map[xml_parent_node].xpath("./memory")
old_data_start_hpa = []
old_data_size_hpa = []
old_data_whole = []
if len(memory_node) != 0:
for element in memory_node[0]:
if "start_hpa" in element.tag:
old_data_start_hpa.append(element)
elif "size" in element.tag:
if "0x" in element.text:
element.text = str(int(element.text, 16) // 1024 // 1024)
old_data_size_hpa.append(element)
elif "whole" in element.tag:
old_data_whole.append(element)
elif "hpa_region" in element.tag:
for subelement in element:
if "start_hpa" in subelement.tag:
old_data_start_hpa.append(subelement)
elif "size" in subelement.tag:
old_data_size_hpa.append(subelement)
elif "whole" in subelement.tag:
old_data_whole.append(subelement)
if len(old_data_start_hpa) != 0 and len(old_data_size_hpa) != 0:
for i in range(len(old_data_start_hpa)):
if int(old_data_start_hpa[i].text, 16) != 0 and int(old_data_size_hpa[i].text, 16) != 0:
hpa_region_node = etree.SubElement(new_node, 'hpa_region')
old_data_size_hpa[i].tag = "size_hpa"
hpa_region_node.append(old_data_start_hpa[i])
hpa_region_node.append(old_data_size_hpa[i])
elif len(old_data_whole) != 0 or (len(old_data_start_hpa) == 0 and len(old_data_size_hpa) != 0):
if len(old_data_whole) != 0:
for i in range(len(old_data_whole)):
old_data_whole[i].tag = "size"
new_node.append(old_data_whole[i])
else:
for i in range(len(old_data_size_hpa)):
old_data_size_hpa[i].tag = "size"
new_node.append(old_data_size_hpa[i])
new_nodes.append(new_node)
for n in old_data_start_hpa:
self.old_data_nodes.discard(n)
for n in old_data_size_hpa:
self.old_data_nodes.discard(n)
for n in old_data_whole:
self.old_data_nodes.discard(n)
return False
def move_console_vuart(self, xsd_element_node, xml_parent_node, new_nodes):
new_node = etree.Element(xsd_element_node.get("name"))
new_node.text = "None"
new_nodes.append(new_node)
vm_load_order = next(iter(self.get_from_old_data(xml_parent_node, ".//load_order/text()")), None)
legacy_vuart = self.get_from_old_data(xml_parent_node, ".//legacy_vuart[@id = '0']")
legacy_vuart = legacy_vuart[0] if legacy_vuart else None
console_vuart = self.get_from_old_data(xml_parent_node, ".//console_vuart")
console_vuart = console_vuart[0] if console_vuart else None
launch_console_vuart = self.get_from_old_launch_data(xml_parent_node, ".//console_vuart")
launch_console_vuart = launch_console_vuart[0] if launch_console_vuart else None
if legacy_vuart is None and console_vuart is None and launch_console_vuart is None:
return False
if console_vuart is not None and console_vuart.text:
new_node.text = console_vuart.text
elif legacy_vuart is not None and legacy_vuart.find("type").text == "VUART_LEGACY_PIO":
vuart_base = legacy_vuart.find("base").text
if vuart_base == "CONFIG_COM_BASE":
# The new schema does not support arbitrary configuration of console vUART bases. Report the data as lost.
return False
elif vuart_base.endswith("COM1_BASE"):
new_node.text = "COM Port 1"
elif vuart_base.endswith("COM2_BASE"):
new_node.text = "COM Port 2"
elif vuart_base.endswith("COM3_BASE"):
new_node.text = "COM Port 3"
elif vuart_base.endswith("COM4_BASE"):
new_node.text = "COM Port 4"
if vm_load_order == "SERVICE_VM":
logging.info(f"The console virtual UART of the service VM is moved to {new_node.text}. Please double check the console= command line option in the OS bootargs of the service VM.")
elif console_vuart is not None:
if console_vuart.find("base") == "PCI_VUART":
new_node.text = "PCI"
else:
new_node.text = console_vuart.text
elif launch_console_vuart and launch_console_vuart.text != "Disable":
new_node.text = "PCI"
if legacy_vuart is not None:
for n in legacy_vuart.iter():
self.old_data_nodes.discard(n)
if console_vuart is not None:
for n in console_vuart.iter():
self.old_data_nodes.discard(n)
if launch_console_vuart is not None:
for n in launch_console_vuart.iter():
self.old_data_nodes.discard(n)
return False
def move_vuart_connections(self, xsd_element_node, xml_parent_node, new_nodes):
conns = VirtualUartConnections()
# Fetch vUART endpoints in the old data
vuart_endpoints = self.old_xml_etree.xpath("//legacy_vuart[@id != '0' and base != 'INVALID_COM_BASE'] | //communication_vuart[base != 'INVALID_PCI_BASE']")
vuart_connections = self.old_xml_etree.xpath("//vuart_connection")
for endpoint in vuart_endpoints:
if conns.add_endpoint(endpoint):
for child in endpoint.iter():
self.old_data_nodes.discard(child)
for connection in vuart_connections:
if conns.add_connection(connection):
for child in connection.iter():
self.old_data_nodes.discard(child)
new_nodes.extend(conns.format_xml_elements(xsd_element_node))
# Disconnected endpoints do not migrate, but remove such nodes from old_data_nodes to avoid raising
# data-is-discarded warnings.
for n in self.old_xml_etree.xpath("//legacy_vuart[@id != '0' and base = 'INVALID_COM_BASE'] | //communication_vuart[base = 'INVALID_PCI_BASE']"):
for child in n.iter():
self.old_data_nodes.discard(child)
return False
def move_ivshmem(self, xsd_element_node, xml_parent_node, new_nodes):
regions = SharedMemoryRegions(self.old_xml_etree)
for old_region in self.old_xml_etree.xpath("//IVSHMEM_REGION"):
regions.add_ivshmem_region(old_region)
for child in old_region.iter():
self.old_data_nodes.discard(child)
if self.old_launch_etree:
for old_region in self.old_launch_etree.xpath("//shm_region"):
regions.add_ivshmem_region(old_region)
for child in old_region.iter():
self.old_data_nodes.discard(child)
new_nodes.append(regions.format_xml_element())
return False
def move_vm_type(self, xsd_element_node, xml_parent_node, new_nodes):
try:
old_vm_type_node = self.get_from_old_data(xml_parent_node, ".//vm_type").pop()
except IndexError as e:
logging.debug(e)
return
old_guest_flag_nodes = self.get_from_old_data(xml_parent_node, ".//guest_flag[text() = 'GUEST_FLAG_RT']")
old_rtos_type_nodes = self.get_from_old_launch_data(xml_parent_node, ".//rtos_type")
new_node = etree.Element(xsd_element_node.get("name"))
if old_vm_type_node.text in ["PRE_RT_VM", "POST_RT_VM"] or \
old_guest_flag_nodes or \
(old_rtos_type_nodes and old_rtos_type_nodes[0].text in ["Soft RT", "Hard RT"]):
new_node.text = "RTVM"
elif old_vm_type_node.text in ["SAFETY_VM", "PRE_STD_VM", "POST_STD_VM", "SERVICE_VM", "SOS_VM"]:
new_node.text = "STANDARD_VM"
else:
new_node.text = old_vm_type_node.text
new_nodes.append(new_node)
self.old_data_nodes.discard(old_vm_type_node)
for n in old_guest_flag_nodes:
self.old_data_nodes.discard(n)
for n in old_rtos_type_nodes:
self.old_data_nodes.discard(n)
return False
def move_pcpu(self, xsd_element_node, xml_parent_node, new_nodes):
vm_type = self.get_node(xml_parent_node, "parent::vm/vm_type/text()")
pcpus = self.get_from_old_launch_data(xml_parent_node, "cpu_affinity/pcpu_id[text() != '']")
if not pcpus:
pcpus = self.get_from_old_data(xml_parent_node, "cpu_affinity/pcpu_id[text() != '']")
if pcpus:
for n in pcpus:
new_node = etree.Element(xsd_element_node.get("name"))
etree.SubElement(new_node, "pcpu_id").text = n.text
if vm_type == "RTVM":
etree.SubElement(new_node, "real_time_vcpu").text = "y"
new_nodes.append(new_node)
self.old_data_nodes.discard(n)
else:
for n in self.get_from_old_data(xml_parent_node, "cpu_affinity/pcpu"):
new_nodes.append(n)
for child in n.iter():
self.old_data_nodes.discard(child)
return False
def move_os_type(self, xsd_element_node, xml_parent_node, new_nodes):
old_os_type_nodes = self.get_from_old_launch_data(xml_parent_node, ".//user_vm_type")
if old_os_type_nodes:
new_node = etree.Element(xsd_element_node.get("name"))
if old_os_type_nodes[0].text == "WINDOWS":
new_node.text = "Windows OS"
else:
new_node.text = "Non-Windows OS"
new_nodes.append(new_node)
for n in old_os_type_nodes:
self.old_data_nodes.discard(n)
else:
self.move_data_by_same_tag(xsd_element_node, xml_parent_node, new_nodes)
return False
def move_guest_flag(self, guest_flag, xsd_element_node, xml_parent_node, new_nodes):
old_data_nodes = self.get_from_old_data(xml_parent_node, f".//guest_flag[text() = '{guest_flag}']")
if old_data_nodes:
new_node = etree.Element(xsd_element_node.get("name"))
new_node.text = "y"
new_nodes.append(new_node)
for n in old_data_nodes:
self.old_data_nodes.discard(n)
else:
self.move_data_by_same_tag(xsd_element_node, xml_parent_node, new_nodes)
return False
def move_lapic_passthrough(self, xsd_element_node, xml_parent_node, new_nodes):
old_rtos_type_nodes = self.get_from_old_launch_data(xml_parent_node, ".//rtos_type")
if old_rtos_type_nodes and old_rtos_type_nodes[0].text == "Hard RT":
new_node = etree.Element(xsd_element_node.get("name"))
new_node.text = "y"
new_nodes.append(new_node)
# The rtos_type node will be consumed by the vm_type mover
else:
self.move_guest_flag("GUEST_FLAG_LAPIC_PASSTHROUGH", xsd_element_node, xml_parent_node, new_nodes)
return False
def move_enablement(self, xpath, xsd_element_node, xml_parent_node, new_nodes, values_as_enabled = ["Enable"], values_as_disabled = ["Disable"]):
ret = self.move_data_by_xpath(xpath, xsd_element_node, xml_parent_node, new_nodes)
for n in new_nodes:
if n.text in values_as_enabled:
n.text = "y"
elif n.text in values_as_disabled:
n.text = "n"
return ret
def move_hierarchy(self, xsd_element_node, xml_parent_node, new_nodes):
element_tag = xsd_element_node.get("name")
for n in self.get_from_old_data(xml_parent_node, f"//{element_tag}"):
new_nodes.append(n)
for child in n.iter():
self.old_data_nodes.discard(child)
def move_data_by_xpath(self, xpath, xsd_element_node, xml_parent_node, new_nodes, scenario_xml_only = False, launch_xml_only = False):
element_tag = xsd_element_node.get("name")
old_data_nodes = []
if not launch_xml_only:
old_data_nodes = self.get_from_old_data(xml_parent_node, xpath)
if not scenario_xml_only and not old_data_nodes and self.old_launch_etree is not None:
old_data_nodes = self.get_from_old_launch_data(xml_parent_node, xpath)
if self.complex_type_of_element(xsd_element_node) is None:
max_occurs_raw = xsd_element_node.get("maxOccurs")
# Use `len(old_data_nodes)` to ensure that all old data nodes are moved if an unbound number of
# occurrences is allowed.
max_occurs = \
len(old_data_nodes) if max_occurs_raw == "unbounded" else \
1 if max_occurs_raw is None else \
int(max_occurs_raw)
if len(old_data_nodes) <= max_occurs:
for n in old_data_nodes:
new_node = etree.Element(element_tag)
new_node.text = n.text
for k, v in n.items():
if k in ["id", "name"]:
new_node.set(k, v)
new_nodes.append(new_node)
self.old_data_nodes.discard(n)
return False
else:
# For each complex type containing multiple configuration items, this method can only create at most one
# single node, as there is no way for the default data movers to migrate multiple pieces of data of the same
# type to the new XML.
if old_data_nodes:
if old_data_nodes[0].tag == "usb_xhci":
old_data_nodes[0].attrib.clear()
new_node = etree.Element(element_tag)
for k, v in old_data_nodes[0].items():
new_node.set(k, v)
new_nodes.append(new_node)
return True
def move_data_by_same_tag(self, xsd_element_node, xml_parent_node, new_nodes):
element_tag = xsd_element_node.get("name")
return self.move_data_by_xpath(f".//{element_tag}", xsd_element_node, xml_parent_node, new_nodes)
def rename_data(self, old_xpath, new_xpath, xsd_element_node, xml_parent_node, new_nodes):
ret = self.move_data_by_xpath(old_xpath, xsd_element_node, xml_parent_node, new_nodes)
if not new_nodes:
ret = self.move_data_by_xpath(new_xpath, xsd_element_node, xml_parent_node, new_nodes)
return ret
def move_data_from_either_xml(self, scenario_xpath, launch_xpath, xsd_element_node, xml_parent_node, new_nodes):
# When moving data from either XML files, data in the launch XML take precedence.
ret = self.move_data_by_xpath(launch_xpath, xsd_element_node, xml_parent_node, new_nodes, launch_xml_only = True)
if not new_nodes:
ret = self.move_data_by_xpath(scenario_xpath, xsd_element_node, xml_parent_node, new_nodes, scenario_xml_only = True)
else:
self.move_data_by_xpath(scenario_xpath, xsd_element_node, xml_parent_node, list(), scenario_xml_only = True)
return ret
def move_data_from_both_xmls(self, scenario_xpath, launch_xpath, xsd_element_node, xml_parent_node, new_nodes):
ret_scenario = self.move_data_by_xpath(scenario_xpath, xsd_element_node, xml_parent_node, new_nodes, scenario_xml_only = True)
ret_launch = self.move_data_by_xpath(launch_xpath, xsd_element_node, xml_parent_node, new_nodes, launch_xml_only = True)
return ret_scenario or ret_launch
def create_node_if(self, scenario_xpath, launch_xpath, xsd_element_node, xml_parent_node, new_nodes):
if self.get_from_old_data(xml_parent_node, scenario_xpath) or \
self.get_from_old_launch_data(xml_parent_node, launch_xpath):
new_node = etree.Element(xsd_element_node.get("name"))
new_nodes.append(new_node)
return True
return False
def move_data_null(self, xsd_element_node, xml_parent_node, new_nodes):
return False
data_movers = {
"vm/name": partialmethod(move_data_from_either_xml, "name", "vm_name"),
"pcpu": move_pcpu,
"pcpu_id": partialmethod(move_data_from_either_xml, "cpu_affinity/pcpu_id[text() != '']", "cpu_affinity/pcpu_id[text() != '']"),
"pci_dev": partialmethod(move_data_from_both_xmls, ".//pci_devs/pci_dev[text()]", "passthrough_devices/*[text()] | sriov/*[text()]"),
"PTM": partialmethod(move_data_from_either_xml, ".//PTM", "enable_ptm"),
# Configuration items with the same name but under different parents
"os_config/name": partialmethod(move_data_by_xpath, ".//os_config/name"),
"epc_section/base": partialmethod(move_data_by_xpath, ".//epc_section/base"),
"console_vuart/base": partialmethod(move_data_by_xpath, ".//console_vuart/base"),
"epc_section/size": partialmethod(move_data_by_xpath, ".//epc_section/size"),
"memory/size": partialmethod(move_data_by_xpath, ".//memory/size"),
# Guest flags
"lapic_passthrough": move_lapic_passthrough,
"io_completion_polling": partialmethod(move_guest_flag, "GUEST_FLAG_IO_COMPLETION_POLLING"),
"nested_virtualization_support": partialmethod(move_guest_flag, "GUEST_FLAG_NVMX_ENABLED"),
"virtual_cat_support": partialmethod(move_guest_flag, "GUEST_FLAG_VCAT_ENABLED"),
"secure_world_support": partialmethod(move_guest_flag, "GUEST_FLAG_SECURITY_VM"),
"hide_mtrr_support": partialmethod(move_guest_flag, "GUEST_FLAG_HIDE_MTRR"),
"security_vm": partialmethod(move_guest_flag, "GUEST_FLAG_SECURITY_VM"),
# Feature enabling or disabling
"vuart0": partialmethod(move_enablement, ".//vuart0"),
"vbootloader": partialmethod(move_enablement, ".//vbootloader", values_as_enabled = ["ovmf", "Enable"], values_as_disabled = ["no", "Disable"]),
"MCE_ON_PSC_ENABLED": partialmethod(move_enablement, ".//MCE_ON_PSC_DISABLED", values_as_enabled = ["n"], values_as_disabled = ["y"]),
"SPLIT_LOCK_DETECTION_ENABLED": partialmethod(move_enablement, ".//ENFORCE_TURNOFF_AC", values_as_enabled = ["n"], values_as_disabled = ["y"]),
"UC_LOCK_DETECTION_ENABLED": partialmethod(move_enablement, ".//ENFORCE_TURNOFF_GP", values_as_enabled = ["n"], values_as_disabled = ["y"]),
# Intermediate nodes
"pci_devs": partialmethod(create_node_if, ".//pci_devs", ".//passthrough_devices/*[text() != ''] | .//sriov/*[text() != '']"),
"BUILD_TYPE": move_build_type,
"RELOC_ENABLED": partialmethod(rename_data, "FEATURES/RELOC", "FEATURES/RELOC_ENABLED"),
"MULTIBOOT2_ENABLED": partialmethod(rename_data, "FEATURES/MULTIBOOT2", "FEATURES/MULTIBOOT2_ENABLED"),
"console_vuart": move_console_vuart,
"vuart_connections": move_vuart_connections,
"IVSHMEM": move_ivshmem,
"vm_type": move_vm_type,
"os_type": move_os_type,
"virtio_devices": move_virtio_devices,
"memory": move_memory,
"CACHE_REGION": move_hierarchy,
"default": move_data_by_same_tag,
}
def add_missing_nodes(self, xsd_element_node, xml_parent_node, xml_anchor_node):
new_nodes = []
def call_mover(mover):
if isinstance(mover, list):
ret = False
for fn in mover:
ret = call_mover(fn)
return ret
elif isinstance(mover, partialmethod):
return mover.__get__(self, type(self))(xsd_element_node, xml_parent_node, new_nodes)
else:
return mover(self, xsd_element_node, xml_parent_node, new_nodes)
# Common names (such as 'name' or 'base') may be used as tags in multiple places each of which has different
# meanings. In such cases it is ambiguious to query old data by that common tag alone.
element_tag = xsd_element_node.get("name")
element_tag_with_parent = f"{xml_parent_node.tag}/{element_tag}"
mover_key = \
element_tag_with_parent if element_tag_with_parent in self.data_movers.keys() else \
element_tag if element_tag in self.data_movers.keys() else \
"default"
visit_children = call_mover(self.data_movers[mover_key])
if xml_anchor_node is not None:
for n in new_nodes:
xml_anchor_node.addprevious(n)
else:
xml_parent_node.extend(new_nodes)
if visit_children:
return new_nodes
else:
return []
@property
@lru_cache
def upgraded_etree(self):
new_xml_etree = etree.ElementTree(etree.Element(self.old_xml_etree.getroot().tag))
root_node = new_xml_etree.getroot()
# Migrate the HV and VM nodes, which are needed to kick off a thorough traversal of the existing scenario.
for old_node in self.old_xml_etree.getroot():
new_node = etree.Element(old_node.tag)
if old_node.tag == "vm":
# FIXME: Here we still hard code how the load order of a VM is specified in different versions of
# schemas. While it is not subject to frequent changes, it would be better if we use a more generic
# approach instead.
load_order_node = etree.SubElement(new_node, "load_order")
# In the history we have two ways of specifying the load order of a VM: either by vm_type or by
# loader_order.
vm_type = old_node.xpath(".//vm_type/text()")
old_load_order_node = old_node.xpath(".//load_order")
if old_load_order_node:
load_order_node.text = old_load_order_node[0].text
self.old_data_nodes.discard(old_load_order_node[0])
elif vm_type:
if vm_type[0].startswith("PRE_") or vm_type[0] in ["SAFETY_VM"]:
load_order_node.text = "PRE_LAUNCHED_VM"
elif vm_type[0].startswith("POST_"):
load_order_node.text = "POST_LAUNCHED_VM"
else:
load_order_node.text = "SERVICE_VM"
else:
logging.error(f"Cannot infer the loader order of VM {self.old_xml_etree.getelementpath(old_node)}")
continue
root_node.append(new_node)
for k, v in old_node.items():
new_node.set(k, v)
self.hv_vm_node_map[new_node] = old_node
# Now fill the rest of configuration items using the old data
self.transform(new_xml_etree)
return new_xml_etree
class UpgradingScenarioStage(PipelineStage):
uses = {"schema_etree", "scenario_etree"}
provides = {"scenario_etree"}
def __init__(self, has_launch_xml = False):
self.has_launch_xml = has_launch_xml
if has_launch_xml:
self.uses.add("launch_etree")
class DiscardedDataFilter(namedtuple("DiscardedDataFilter", ["path", "data", "info"])):
def filter(self, path, data):
simp_path = re.sub(r"\[[^\]]*\]", "", path)
if not simp_path.endswith(self.path):
return False
if self.data and data != self.data:
return False
if self.info:
logging.info(f"{path} = '{data}': {self.info}")
return True
filters = [
DiscardedDataFilter("hv/FEATURES/IVSHMEM", None, "IVSHMEM is now automatically enabled if any IVSHMEM region is specified."),
DiscardedDataFilter("hv/FEATURES/NVMX_ENABLED", None, "Nest virtualization support is now automatically included if enabled for any VM."),
DiscardedDataFilter("hv/CAPACITIES/IOMMU_BUS_NUM", None, "The maximum bus number to be supported by ACRN IOMMU configuration is now inferred from board data."),
DiscardedDataFilter("hv/MISC_CFG/UEFI_OS_LOADER_NAME", None, None),
DiscardedDataFilter("vm/guest_flags/guest_flag", "0", None),
DiscardedDataFilter("vm/clos/vcpu_clos", None, "clos nodes are no longer needed in scenario definitions."),
DiscardedDataFilter("vm/epc_section/base", "0", "Post-launched VMs cannot have EPC sections."),
DiscardedDataFilter("vm/epc_section/size", "0", "Post-launched VMs cannot have EPC sections."),
DiscardedDataFilter("vm/os_config/name", None, "Guest OS names are no longer needed in scenario definitions."),
]
def run(self, obj):
if self.has_launch_xml:
upgrader = ScenarioUpgrader(obj.get("schema_etree"), obj.get("scenario_etree"), obj.get("launch_etree"))
else:
upgrader = ScenarioUpgrader(obj.get("schema_etree"), obj.get("scenario_etree"))
new_scenario_etree = upgrader.upgraded_etree
discarded_data = [(n.getroottree().getelementpath(n), n.text) for n in upgrader.old_data_nodes]
for path, data in sorted(discarded_data):
if not any(map(lambda x: x.filter(path, data), self.filters)):
escaped_data = data.replace("\n", "\\n")
logging.warning(f"{path} = '{escaped_data}' is discarded")
obj.set("scenario_etree", new_scenario_etree)
def main(args):
if args.launch:
pipeline = PipelineEngine(["schema_path", "scenario_path", "launch_path"])
pipeline.add_stages([
LXMLLoadStage("schema"),
LXMLLoadStage("scenario"),
LXMLLoadStage("launch"),
SlicingSchemaByVMTypeStage(),
UpgradingScenarioStage(has_launch_xml=True),
])
else:
pipeline = PipelineEngine(["schema_path", "scenario_path"])
pipeline.add_stages([
LXMLLoadStage("schema"),
LXMLLoadStage("scenario"),
SlicingSchemaByVMTypeStage(),
UpgradingScenarioStage(),
])
obj = PipelineObject(schema_path = args.schema, scenario_path = args.scenario, launch_path=args.launch)
pipeline.run(obj)
# We know we are using lxml to parse the scenario XML, so it is ok to use lxml specific write options here.
obj.get("scenario_etree").write(args.out, pretty_print=True)
if __name__ == "__main__":
config_tools_dir = os.path.join(os.path.dirname(__file__), "..")
schema_dir = os.path.join(config_tools_dir, "schema")
parser = argparse.ArgumentParser(description="Try adapting data in a scenario XML to the latest schema.")
parser.add_argument("scenario", help="Path to the scenario XML file from users")
parser.add_argument("out", nargs="?", default="out.xml", help="Path where the output is placed")
parser.add_argument("--schema", default=os.path.join(schema_dir, "config.xsd"), help="the XML schema that defines the syntax of scenario XMLs")
parser.add_argument("--launch", default=None, help="Path to the launch XML file")
args = parser.parse_args()
logging.basicConfig(level="INFO")
main(args)
|
e6c1facd802bfb0181a17b0a71104123ac50e64b
|
9a2dfbb97f375ed497e5d033344641a04a2710d9
|
/sdk/python/touca/__init__.py
|
e1fc11ae549a72c155099d9afc09480773e66d57
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
trytouca/trytouca
|
50eca4164635c293a891dcdd2e1836cb60627274
|
f69d7314dd861cd25a85d02739af5d01b4846916
|
refs/heads/main
| 2023-06-07T05:52:25.159264
| 2023-03-30T21:20:05
| 2023-03-30T21:20:05
| 468,843,509
| 425
| 27
|
Apache-2.0
| 2023-09-14T06:50:11
| 2022-03-11T17:32:38
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 2,668
|
py
|
__init__.py
|
# Copyright 2023 Touca, Inc. Subject to Apache-2.0 License.
"""
Entry-point to the Touca SDK for Python.
You can install this sdk via ``pip install touca`` and import it in your code via::
import touca
Alternatively, you can import individual functions which may be useful in rare
cases if and when you want to call them from production code::
from touca import check
If you are just getting started with Touca, we generally recommend that you
install the SDK as a development-only dependency.
"""
from typing import Any, Callable, Dict, List, Optional, Type
from touca._client import Client
from touca._rules import ComparisonRule, decimal_rule
from touca._runner import Workflow, run, workflow
from touca._transport import __version__
from touca._utils import scoped_timer
def clientmethod(f):
import inspect
f.__doc__ = inspect.getdoc(getattr(Client, f.__name__))
return f
@clientmethod
def configure(**kwargs) -> bool:
return Client.instance().configure(**kwargs)
@clientmethod
def is_configured() -> bool:
return Client.instance().is_configured()
@clientmethod
def configuration_error() -> str:
return Client.instance().configuration_error()
@clientmethod
def declare_testcase(name: str):
Client.instance().declare_testcase(name)
@clientmethod
def forget_testcase(name: str):
Client.instance().forget_testcase(name)
@clientmethod
def check(key: str, value: Any, *, rule: Optional[ComparisonRule] = None):
Client.instance().check(key, value, rule=rule)
@clientmethod
def check_file(key: str, value: Any):
Client.instance().check_file(key, value)
@clientmethod
def assume(key: str, value: Any):
Client.instance().assume(key, value)
@clientmethod
def add_array_element(key: str, value: Any):
Client.instance().add_array_element(key, value)
@clientmethod
def add_hit_count(key: str):
Client.instance().add_hit_count(key)
@clientmethod
def add_metric(key: str, milliseconds: int):
Client.instance().add_metric(key, milliseconds)
@clientmethod
def start_timer(key: str):
Client.instance().start_timer(key)
@clientmethod
def stop_timer(key: str):
Client.instance().stop_timer(key)
@clientmethod
def add_serializer(datatype: Type, serializer: Callable[[Any], Dict]):
Client.instance().add_serializer(datatype, serializer)
@clientmethod
def save_binary(key: str, cases: List[str] = []):
Client.instance().save_binary(key, cases)
@clientmethod
def save_json(key: str, cases: List[str] = []):
Client.instance().save_json(key, cases)
@clientmethod
def post():
Client.instance().post()
@clientmethod
def seal():
Client.instance().seal()
|
cd4fe5f316b3a2d28023ff37a9ec649a14d0d0d1
|
b2a0015525eb65d143891c911a30f9f6c30f246a
|
/setup.py
|
dc634f57f72f9dc16a8bf40264a7b1e1b810e2d4
|
[
"MIT"
] |
permissive
|
qhgz2013/anime-face-detector
|
65a381848ff89293af422ed16aaa18ebb1c7101a
|
94d75475a17f48c7636345cd316c2eeae242a58e
|
refs/heads/master
| 2022-03-08T09:33:30.513973
| 2022-02-21T11:48:33
| 2022-02-21T11:48:33
| 143,052,594
| 240
| 37
|
MIT
| 2022-02-21T11:48:34
| 2018-07-31T18:31:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,139
|
py
|
setup.py
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
from os.path import join as pjoin
import numpy as np
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import sys
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
build_ext.build_extensions(self)
ext_modules = [
Extension(
"nms.cpu_nms",
["nms/cpu_nms.pyx"],
extra_compile_args=["-Wno-cpp", "-Wno-unused-function"] if sys.platform == 'linux' else [],
include_dirs = [numpy_include]
)
]
setup(
name='tf_faster_rcnn',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
)
|
43bf996ba4d67f087832cdf5f210b5408e77c337
|
bed3ac926beac0f4e0293303d7b2a6031ee476c9
|
/Modules/Filtering/DisplacementField/wrapping/test/itkDisplacementFieldTransformTest.py
|
146f7360d9ab0616d0cb626e18cfe55a33d83307
|
[
"IJG",
"Zlib",
"LicenseRef-scancode-proprietary-license",
"SMLNJ",
"BSD-3-Clause",
"BSD-4.3TAHOE",
"LicenseRef-scancode-free-unknown",
"Spencer-86",
"LicenseRef-scancode-llnl",
"FSFUL",
"Libpng",
"libtiff",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-hdf5",
"MIT",
"NTP",
"LicenseRef-scancode-mit-old-style",
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"MPL-2.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
InsightSoftwareConsortium/ITK
|
ed9dbbc5b8b3f7511f007c0fc0eebb3ad37b88eb
|
3eb8fd7cdfbc5ac2d0c2e5e776848a4cbab3d7e1
|
refs/heads/master
| 2023-08-31T17:21:47.754304
| 2023-08-31T00:58:51
| 2023-08-31T14:12:21
| 800,928
| 1,229
| 656
|
Apache-2.0
| 2023-09-14T17:54:00
| 2010-07-27T15:48:04
|
C++
|
UTF-8
|
Python
| false
| false
| 1,934
|
py
|
itkDisplacementFieldTransformTest.py
|
# ==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==========================================================================*/
import itk
itk.auto_progress(2)
ScalarType = itk.F
VectorDimension = 2
VectorType = itk.Vector[ScalarType,VectorDimension]
ImageDimension = 2
ImageType = itk.Image[VectorType, ImageDimension]
image_size = [10, 10]
transform = itk.DisplacementFieldTransform[ScalarType, ImageDimension].New()
# Test setting image of vectors
pixel_value = 5
image = ImageType.New()
image.SetRegions(image_size)
image.Allocate()
image.FillBuffer([pixel_value] * VectorDimension)
transform.SetDisplacementField(image)
# Verify all parameters match expected value
for value in list(transform.GetParameters()):
assert value == pixel_value
# Test setting vector image
pixel_value = 20
vector_image = itk.VectorImage[ScalarType, ImageDimension].New()
vector_image.SetRegions(image_size)
vector_image.SetVectorLength(VectorDimension)
vector_image.Allocate()
pixel_default = itk.VariableLengthVector[ScalarType]()
pixel_default.SetSize(VectorDimension)
pixel_default.Fill(pixel_value)
vector_image.FillBuffer(pixel_default)
transform.SetDisplacementField(vector_image)
# Verify all parameters match expected value
for value in list(transform.GetParameters()):
assert value == pixel_value
|
0397e692e51cbb40c012c333ecc631b31a402321
|
1b0804b390ce6c9bc52359bf894aee29b7545de1
|
/demo/wildcard.py
|
926e43a51ebfb6010f0ac1d2a1f360042e620a9c
|
[
"Apache-2.0"
] |
permissive
|
iogf/crocs
|
b8ebd76311292a19a0e78d85cdcd0164913eb7ba
|
83c9b27d20b26787a9f76b9a34cd19944c295657
|
refs/heads/master
| 2022-07-03T09:45:14.526745
| 2022-01-23T00:06:55
| 2022-01-23T00:06:55
| 96,660,373
| 565
| 29
| null | 2017-08-03T20:37:37
| 2017-07-09T04:11:37
|
Python
|
UTF-8
|
Python
| false
| false
| 82
|
py
|
wildcard.py
|
from crocs.regex import Pattern, X
e = Pattern('a', X(), 'b')
e.test()
e.hits()
|
9171fe936c20bdbdddbc7aa76086715ad089147e
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/Scaleform/daapi/view/bootcamp/BCBattleResult.py
|
02989fb3a56aa28020c7a9456cd327faed4b052c
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 6,607
|
py
|
BCBattleResult.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/bootcamp/BCBattleResult.py
import BigWorld
from CurrentVehicle import g_currentVehicle
from gui.Scaleform.Waiting import Waiting
from gui.Scaleform.daapi.view.meta.BCBattleResultMeta import BCBattleResultMeta
from gui.Scaleform.genConsts.BOOTCAMP_BATTLE_RESULT_CONSTANTS import BOOTCAMP_BATTLE_RESULT_CONSTANTS as AWARD
from gui.shared import event_bus_handlers, events, EVENT_BUS_SCOPE
from helpers import dependency
from bootcamp.Bootcamp import g_bootcamp, BOOTCAMP_SOUND
from gui.sounds.ambients import BattleResultsEnv
import SoundGroups
from bootcamp.BootCampEvents import g_bootcampEvents
from gui.app_loader import settings as app_settings
from gui import GUI_CTRL_MODE_FLAG as _CTRL_FLAG
from PlayerEvents import g_playerEvents
from skeletons.gui.app_loader import IAppLoader
from skeletons.gui.battle_results import IBattleResultsService
from uilogging.deprecated.bootcamp.loggers import BootcampLogger
from uilogging.deprecated.decorators import loggerTarget, loggerEntry, simpleLog
from uilogging.deprecated.bootcamp.constants import BC_LOG_ACTIONS as DEPRECATED_BC_LOG_ACTIONS, BC_LOG_KEYS, BC_AWARDS_MAP
from uilogging.deprecated.bootcamp.loggers import BootcampUILogger
_SNDID_ACHIEVEMENT = 'result_screen_achievements'
_SNDID_BONUS = 'result_screen_bonus'
_AMBIENT_SOUND = 'bc_result_screen_ambient'
@loggerTarget(logKey=BC_LOG_KEYS.BC_RESULT_SCREEN, loggerCls=BootcampUILogger)
class BCBattleResult(BCBattleResultMeta):
battleResults = dependency.descriptor(IBattleResultsService)
appLoader = dependency.descriptor(IAppLoader)
uiBootcampLogger = BootcampLogger(BC_LOG_KEYS.BC_RESULT_SCREEN)
__sound_env__ = BattleResultsEnv
__metaclass__ = event_bus_handlers.EventBusListener
def __init__(self, ctx=None):
super(BCBattleResult, self).__init__()
if 'arenaUniqueID' not in ctx:
raise UserWarning('Key "arenaUniqueID" is not found in context', ctx)
if not ctx['arenaUniqueID']:
raise UserWarning('Value of "arenaUniqueID" must be greater than 0')
self.__arenaUniqueID = ctx['arenaUniqueID']
self.__hasShowRewards = False
self.__hasBonusInMedals = False
self.__hasBonusInStats = False
self.__awardSounds = []
self.__music = None
return
def onFocusIn(self, alias):
if self.__music is None:
if self.alias == alias:
self.__music = SoundGroups.g_instance.getSound2D(_AMBIENT_SOUND)
self.__music.play()
elif self.alias != alias:
self.__music.stop()
self.__music = None
return
def click(self):
self.destroy()
Waiting.show('exit_battle')
BigWorld.callback(0.5, self.delayedFinish)
@staticmethod
def delayedFinish():
g_bootcampEvents.onResultScreenFinished()
def onVideoButtonClick(self, index):
g_bootcampEvents.onInterludeVideoStarted(index)
for sound in self.__awardSounds:
sound.stop()
@event_bus_handlers.eventBusHandler(events.HideWindowEvent.HIDE_BATTLE_RESULT_WINDOW, EVENT_BUS_SCOPE.LOBBY)
def selectVehicle(self, inventoryId):
g_currentVehicle.selectVehicle(inventoryId)
return g_currentVehicle.invID == inventoryId
@simpleLog(argsIndex=0, resetTime=False, logOnce=True, argMap=BC_AWARDS_MAP, argMapSection=lambda : g_bootcamp.getContext()['lessonNum'])
def onToolTipShow(self, rendererId):
pass
def onAnimationAwardStart(self, id):
if not self.battleResults.areResultsPosted(self.__arenaUniqueID):
return
else:
soundid = _SNDID_ACHIEVEMENT if self.__hasShowRewards else None
if id == AWARD.MEDAlS_LIST and self.__hasBonusInMedals:
soundid = _SNDID_BONUS
elif id == AWARD.STATS_LIST and self.__hasBonusInStats:
soundid = _SNDID_BONUS
if soundid is not None:
sound = SoundGroups.g_instance.getSound2D(soundid)
self.__awardSounds.append(sound)
sound.play()
return
@loggerEntry
def _populate(self):
g_bootcampEvents.onResultScreenFinished += self.__onResultScreenFinished
g_bootcampEvents.onInterludeVideoStarted += self.__onInterludeVideoStarted
g_bootcampEvents.onInterludeVideoFinished += self.__onInterludeVideoFinished
g_playerEvents.onDisconnected += self._onDisconnected
self.__music = SoundGroups.g_instance.getSound2D(_AMBIENT_SOUND)
if self.__music is not None:
self.__music.play()
self.soundManager.playSound(BOOTCAMP_SOUND.NEW_UI_ELEMENT_SOUND)
super(BCBattleResult, self)._populate()
if self.battleResults.areResultsPosted(self.__arenaUniqueID):
self.__setBattleResults()
self.app.as_loadLibrariesS(['guiControlsLobbyBattleDynamic.swf', 'guiControlsLobbyDynamic.swf'])
self.appLoader.attachCursor(app_settings.APP_NAME_SPACE.SF_LOBBY, _CTRL_FLAG.GUI_ENABLED)
g_bootcampEvents.onResultScreenPopulated()
return
@simpleLog(action=DEPRECATED_BC_LOG_ACTIONS.CONTINUE_BUTTON_PRESSED)
def _dispose(self):
g_bootcampEvents.onResultScreenFinished -= self.__onResultScreenFinished
g_bootcampEvents.onInterludeVideoStarted -= self.__onInterludeVideoStarted
g_bootcampEvents.onInterludeVideoFinished -= self.__onInterludeVideoFinished
g_playerEvents.onDisconnected -= self._onDisconnected
for sound in self.__awardSounds:
sound.stop()
del self.__awardSounds[:]
if self.__music is not None:
self.__music.stop()
self.__music = None
super(BCBattleResult, self)._dispose()
return
def __setBattleResults(self):
vo = self.battleResults.getResultsVO(self.__arenaUniqueID)
self.as_setDataS(vo)
self.uiBootcampLogger.log(action=DEPRECATED_BC_LOG_ACTIONS.SHOW, finishReason=vo.get('finishReason', None))
self.__hasShowRewards = vo['showRewards']
self.__hasBonusInMedals = vo['hasUnlocks']
self.__hasBonusInStats = vo['xp']['value'] > 0 or vo['credits']['value'] > 0
return
def __onResultScreenFinished(self):
self.destroy()
def __onInterludeVideoStarted(self, _):
self.onFocusIn(alias='')
def __onInterludeVideoFinished(self):
self.onFocusIn(alias=self.alias)
def _onDisconnected(self):
self.destroy()
|
aeefd84886a4c7830526a73274204cac27e9e9c1
|
ea910d9946ff96f44bf8895508d053f3075f7c3a
|
/dist/bbct/build.py
|
a650bed3af35038ce45594a1c1255bc2673097f6
|
[
"BSD-2-Clause",
"GPL-1.0-or-later",
"MIT",
"GPL-3.0-only",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain"
] |
permissive
|
davidgiven/cowgol
|
290e4ba4e6e87b4619a5f69c0c156145f31cee88
|
ec33ae1293df3937c6397575a0f7defb5e3b4a4d
|
refs/heads/master
| 2023-07-13T20:31:01.261973
| 2023-03-06T19:06:34
| 2023-03-06T19:06:34
| 93,265,830
| 180
| 29
|
BSD-2-Clause
| 2023-03-05T19:29:03
| 2017-06-03T18:12:02
|
C
|
UTF-8
|
Python
| false
| false
| 1,940
|
py
|
build.py
|
from build.ab2 import export, Rule, Target, normalrule
from tools.build import tocpm, mkdfs
@Rule
def bbcify(self, name, src: Target = None):
normalrule(
replaces=self,
ins=[src],
outs=[self.localname + ".txt"],
commands=[
r"""sed -e 's/include "\(.*\)\.coh"/include "h.\1"/' < {ins} | expand -t4 | tr '\n' '\r' > {outs}"""
],
label="BBCIFY",
)
bbcify(name="mandelcow", src="examples/mandel.cow")
bbcify(name="cowgolcoh", src="rt/bbct/cowgol.coh")
bbcify(name="commoncoh", src="rt/common.coh")
mkdfs(
name="ssd",
flags=[
["-f", Target("./!boot")],
[
"-f",
Target("src/cowfe+cowfe-for-16bit-with-bbct"),
"-e0x400",
"-l0x400",
"-ncowfe",
],
[
"-f",
Target("src/cowbe+cowbe-for-6502-with-bbct"),
"-e0x400",
"-l0x400",
"-ncowbe",
],
[
"-f",
Target("src/cowlink+cowlink-for-bbct-with-bbct"),
"-e0x400",
"-l0x400",
"-ncowlink",
],
["-f", Target("rt/bbct+cowgolcoo"), "-no.cowgol"],
["-f", Target("+cowgolcoh"), "-nh.cowgol"],
["-f", Target("+commoncoh"), "-nh.common"],
["-f", Target("+mandelcow"), "-nw.source"],
"-B3",
],
)
export(
name="bbct",
items={
"bin/dist/bbct/!boot": "./!boot",
"bin/dist/bbct/mandel.cow": "+mandelcow",
"bin/dist/bbct/cowgol.coh": "+cowgolcoh",
"bin/dist/bbct/common.coh": "+commoncoh",
"bin/dist/bbct/cowgol.coo": "rt/bbct+cowgolcoo",
"bin/dist/bbct/cowfe.com": "src/cowfe+cowfe-for-16bit-with-bbct",
"bin/dist/bbct/cowbe.com": "src/cowbe+cowbe-for-6502-with-bbct",
"bin/dist/bbct/cowlink.com": "src/cowlink+cowlink-for-bbct-with-bbct",
"bin/dist/bbct.ssd": "+ssd",
},
)
|
fa9a439c9a6a5e22c9adca425adef475e4e114b7
|
5770a3fc8bd224d926d4aff5b7d8f1863f145cab
|
/quarkchain/cluster/subscription.py
|
11b6c84a749d648f3b467374aeeb675124730978
|
[
"MIT"
] |
permissive
|
QuarkChain/pyquarkchain
|
d06a59d630fd0c4a07e1c10548ba044329da95ba
|
2068153c9386a1eacb5eccb8cf93d98f87537203
|
refs/heads/master
| 2023-02-27T14:16:07.419575
| 2022-04-18T20:35:59
| 2022-04-18T20:35:59
| 143,354,339
| 253
| 133
|
MIT
| 2023-02-07T21:54:01
| 2018-08-02T23:28:47
|
Python
|
UTF-8
|
Python
| false
| false
| 3,764
|
py
|
subscription.py
|
import asyncio
import json
from typing import List, Dict, Tuple, Optional, Callable
from jsonrpcserver.exceptions import InvalidParams
from websockets import WebSocketServerProtocol
from quarkchain.core import MinorBlock
SUB_NEW_HEADS = "newHeads"
SUB_NEW_PENDING_TX = "newPendingTransactions"
SUB_LOGS = "logs"
SUB_SYNC = "syncing"
class SubscriptionManager:
def __init__(self):
self.subscribers = {
SUB_NEW_HEADS: {},
SUB_NEW_PENDING_TX: {},
SUB_LOGS: {},
SUB_SYNC: {},
} # type: Dict[str, Dict[str, WebSocketServerProtocol]]
self.log_filter_gen = {} # type: Dict[str, Callable]
def add_subscriber(self, sub_type, sub_id, conn, extra=None):
if sub_type not in self.subscribers:
raise InvalidParams("Invalid subscription")
self.subscribers[sub_type][sub_id] = conn
if sub_type == SUB_LOGS:
assert extra and isinstance(extra, Callable)
self.log_filter_gen[sub_id] = extra
def remove_subscriber(self, sub_id):
for sub_type, subscriber_dict in self.subscribers.items():
if sub_id in subscriber_dict:
del subscriber_dict[sub_id]
if sub_type == SUB_LOGS:
del self.log_filter_gen[sub_id]
return
raise InvalidParams("subscription not found")
async def notify_new_heads(self, blocks: List[MinorBlock]):
from quarkchain.cluster.jsonrpc import minor_block_header_encoder
if len(self.subscribers[SUB_NEW_HEADS]) == 0:
return
tasks = []
for block in blocks:
header = block.header
data = minor_block_header_encoder(header)
for sub_id, websocket in self.subscribers[SUB_NEW_HEADS].items():
response = self.response_encoder(sub_id, data)
tasks.append(websocket.send(json.dumps(response)))
await asyncio.gather(*tasks)
async def notify_new_pending_tx(self, tx_hashes: List[bytes]):
tasks = []
for sub_id, websocket in self.subscribers[SUB_NEW_PENDING_TX].items():
for tx_hash in tx_hashes:
tx_hash = "0x" + tx_hash.hex()
response = self.response_encoder(sub_id, tx_hash)
tasks.append(websocket.send(json.dumps(response)))
await asyncio.gather(*tasks)
async def notify_log(
self, candidate_blocks: List[MinorBlock], is_removed: bool = False
):
from quarkchain.cluster.jsonrpc import loglist_encoder
tasks = []
for sub_id, websocket in self.subscribers[SUB_LOGS].items():
log_filter = self.log_filter_gen[sub_id](candidate_blocks)
logs = log_filter.run()
log_list = loglist_encoder(logs, is_removed)
for log in log_list:
response = self.response_encoder(sub_id, log)
tasks.append(websocket.send(json.dumps(response)))
await asyncio.gather(*tasks)
async def notify_sync(self, data: Optional[Tuple[int, ...]] = None):
result = {"syncing": bool(data)}
if data:
tip_height, highest_block = data
result["status"] = {
"currentBlock": tip_height,
"highestBlock": highest_block,
}
for sub_id, websocket in self.subscribers[SUB_SYNC].items():
response = self.response_encoder(sub_id, result)
asyncio.ensure_future(websocket.send(json.dumps(response)))
@staticmethod
def response_encoder(sub_id, result):
return {
"jsonrpc": "2.0",
"method": "subscription",
"params": {"subscription": sub_id, "result": result},
}
|
d2dddb54f5d04fef324abf2ae628b52aef3452fe
|
71b8b60c5627ace1bbda39f679f93f60b55543ca
|
/tensorflow_federated/examples/program/program_logic.py
|
e3f0801aa97b45d649d11441db31549940359fcb
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/federated
|
ff94b63e9f4af448795bae77cee5b627dcae9051
|
ad4bca66f4b483e09d8396e9948630813a343d27
|
refs/heads/main
| 2023-08-31T11:46:28.559047
| 2023-08-31T02:04:38
| 2023-08-31T02:09:59
| 161,556,784
| 2,297
| 631
|
Apache-2.0
| 2023-09-13T22:54:14
| 2018-12-12T23:15:35
|
Python
|
UTF-8
|
Python
| false
| false
| 18,541
|
py
|
program_logic.py
|
# Copyright 2022, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example of program logic to use in a federated program.
The program logic is abstracted into a separate function to illustrate the
boundary between the program and the program logic. Note the Python types of the
function signature, this program logic only depends on the abstract interfaces
defined by the TFF's federated program API and does not depend and the platform,
therefore this program logic is portable across platforms.
Note: This example focuses on the federated program API and does not use TFF's
domain specific APIs (e.g. `tff.learning`), though it is an example of a
federated learning training loop.
"""
import asyncio
import typing
from typing import NamedTuple, Optional
import tensorflow_federated as tff
class UnexpectedTypeSignatureError(Exception):
pass
def _check_expected_type_signatures(
*,
initialize: tff.Computation,
train: tff.Computation,
train_data_source: tff.program.FederatedDataSource,
evaluation: tff.Computation,
evaluation_data_source: tff.program.FederatedDataSource,
) -> None:
"""Checks the computations and data sources for the expected type signatures.
Note: These kind of checks may not be useful for all program logic. For
example, if you are using a `tff.learning.templates.LearningProcess` as an
input to the program logic, then these checks might not make sense because the
the `tff.learning.templates.LearningProcess` has already validated that those
`tff.Computation` have the expected type signatures.
See `train_federated_model` for more information on the expected type
signatures of the computations and data sources.
Args:
initialize: A `tff.Computation` to invoke before training.
train: A `tff.Computation` to invoke during training.
train_data_source: A `tff.program.FederatedDataSource` which returns client
data used during training.
evaluation: A `tff.Computation` to invoke to evaluate the model produced
after training.
evaluation_data_source: A `tff.program.FederatedDataSource` which returns
client data used during evaluation.
Raises:
UnexpectedTypeSignatureError: If the computations or data sources have an
unexpected type signature.
"""
try:
# Check initialize type.
initialize.type_signature.check_function()
# Check initialize parameter type.
if initialize.type_signature.parameter is not None:
raise UnexpectedTypeSignatureError(
'Expected `initialize` to have no parameters, found '
f'{initialize.type_signature.parameter}.'
)
# Check initialize result type.
initialize.type_signature.result.check_federated()
if initialize.type_signature.result.placement is not tff.SERVER: # pytype: disable=attribute-error
raise UnexpectedTypeSignatureError(
'Expected the result of `initialize` to be placed at `tff.SERVER`, '
f'found {initialize.type_signature.result.placement}.' # pytype: disable=attribute-error
)
# Check train data source type.
if train_data_source.federated_type.placement is not tff.CLIENTS:
raise UnexpectedTypeSignatureError(
'Expected the data returned by `train_data_source` to be placed at '
'`tff.CLIENTS`, found '
f'{train_data_source.federated_type.placement}.'
)
# Check train type.
train.type_signature.check_function()
# Check train result type.
train.type_signature.result.check_struct()
if len(train.type_signature.result) != 2: # pytype: disable=wrong-arg-types
raise UnexpectedTypeSignatureError(
'Expected `train` to return two values, found '
f'{train.type_signature.result}.'
)
train_result_state_type, train_result_metrics_type = (
train.type_signature.result
) # pytype: disable=attribute-error
# Check train result state type.
train_result_state_type.check_federated()
if train_result_state_type.placement is not tff.SERVER:
raise UnexpectedTypeSignatureError(
'Expected the first result of `train` to be placed at `tff.SERVER`, '
f'found {train_result_state_type.placement}.'
)
# Check train result metrics type.
train_result_metrics_type.check_federated()
if train_result_metrics_type.placement is not tff.SERVER:
raise UnexpectedTypeSignatureError(
'Expected the second result of `train` to be placed at `tff.SERVER`, '
f'found {train_result_metrics_type.placement}.'
)
# Check train parameter type.
train.type_signature.parameter.check_struct() # pytype: disable=attribute-error
if len(train.type_signature.parameter) != 2: # pytype: disable=wrong-arg-types
raise UnexpectedTypeSignatureError(
'Expected `train` to have two parameters, found '
f'{train.type_signature.parameter}.'
)
train_parameter_state_type, train_parameter_client_data_type = (
train.type_signature.parameter
) # pytype: disable=attribute-error
# Check train parameter state type.
train_parameter_state_type.check_federated()
if train_parameter_state_type.placement is not tff.SERVER:
raise UnexpectedTypeSignatureError(
'Expected the first parameter of `train` to be placed at'
f' `tff.SERVER`, found {train_parameter_state_type.placement}.'
)
train_parameter_state_type.check_assignable_from(
initialize.type_signature.result
)
train_parameter_state_type.check_assignable_from(train_result_state_type)
# Check train parameter client data type.
train_parameter_client_data_type.check_federated()
if train_parameter_client_data_type.placement is not tff.CLIENTS:
raise UnexpectedTypeSignatureError(
'Expected the second parameter of `train` to be placed at '
f'`tff.CLIENTS`, found {train_parameter_client_data_type.placement}.'
)
train_parameter_client_data_type.check_assignable_from(
train_data_source.federated_type
)
# Check evaluation data source type.
if evaluation_data_source.federated_type.placement is not tff.CLIENTS:
raise UnexpectedTypeSignatureError(
'Expected the data returned by `evaluation_data_source` to be placed '
'at `tff.CLIENTS`, found '
f'{evaluation_data_source.federated_type.placement}.'
)
# Check evaluation type.
evaluation.type_signature.check_function()
# Check evaluation result type.
evaluation.type_signature.result.check_federated()
if evaluation.type_signature.result.placement is not tff.SERVER: # pytype: disable=attribute-error
raise UnexpectedTypeSignatureError(
'Expected the result of `evaluation` to be placed at `tff.SERVER`, '
f'found {evaluation.type_signature.result.placement}.' # pytype: disable=attribute-error
)
# Check evaluation parameter type.
evaluation.type_signature.parameter.check_struct() # pytype: disable=attribute-error
if len(evaluation.type_signature.parameter) != 2: # pytype: disable=wrong-arg-types
raise UnexpectedTypeSignatureError(
'Expected `evaluation` to have two parameters, found '
f'{evaluation.type_signature.parameter}.'
)
evaluation_parameter_state_type, evaluation_parameter_client_data_type = (
evaluation.type_signature.parameter
) # pytype: disable=attribute-error
# Check evaluation parameter state type.
evaluation_parameter_state_type.check_federated()
if evaluation_parameter_state_type.placement is not tff.SERVER:
raise UnexpectedTypeSignatureError(
'Expected the first parameter of `evaluation` to be placed at '
f'`tff.SERVER`, found {evaluation_parameter_state_type.placement}.'
)
evaluation_parameter_state_type.check_assignable_from(
train_result_state_type
)
# Check evaluation parameter client data type.
evaluation_parameter_client_data_type.check_federated()
if evaluation_parameter_client_data_type.placement is not tff.CLIENTS:
raise UnexpectedTypeSignatureError(
'Expected the second parameter of `evaluation` to be placed at '
'`tff.CLIENTS`, found '
f'{evaluation_parameter_client_data_type.placement}.'
)
evaluation_parameter_client_data_type.check_assignable_from(
evaluation_data_source.federated_type
)
except TypeError as e:
raise UnexpectedTypeSignatureError() from e
class _TaskGroup:
"""An asynchronous context manager holding a group of tasks.
Tasks are used to schedule coroutines concurrently. Tasks can be added to the
group using `_TaskGroup.create_task()`. All tasks are awaited when the context
manager exits.
This is a simplified version of
[`asyncio.TaskGroup`](https://docs.python.org/3/library/asyncio-task.html#task-groups)
with less sophisticated error handling. It can be removed once Python 3.11 is
the minimum supported version of Python.
"""
def __init__(self):
self._tasks = set()
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_value, traceback):
if self._tasks:
await asyncio.wait(self._tasks)
def create_task(self, coro):
task = asyncio.create_task(coro)
self._tasks.add(task)
def _task_done(task):
self._tasks.discard(task)
task.add_done_callback(_task_done)
return task
class _ProgramState(NamedTuple):
"""Defines the intermediate program state of the program logic.
The program logic is responsible for defining the data required to restore the
execution of the program logic after a failure.
Important: Updating the fields of this program state will impact the ability
of the program logic to load previously saved program state. If this is
required it may be useful to version the structure of the program state.
Attributes:
state: The server state produced at `round_num`.
round_num: The training round.
"""
state: object
round_num: int
async def train_federated_model(
*,
initialize: tff.Computation,
train: tff.Computation,
train_data_source: tff.program.FederatedDataSource,
evaluation: tff.Computation,
evaluation_data_source: tff.program.FederatedDataSource,
total_rounds: int,
num_clients: int,
train_metrics_manager: Optional[
tff.program.ReleaseManager[tff.program.ReleasableStructure, int]
] = None,
evaluation_metrics_manager: Optional[
tff.program.ReleaseManager[tff.program.ReleasableStructure, int]
] = None,
model_output_manager: Optional[
tff.program.ReleaseManager[
tff.program.ReleasableStructure, Optional[object]
]
] = None,
program_state_manager: Optional[
tff.program.ProgramStateManager[tff.program.ProgramStateStructure]
] = None,
) -> None:
"""Trains a federated model for some number of rounds.
The following types signatures are required:
1. `initialize`: `( -> S@SERVER)`
2. `train`: `(<S@SERVER, D1@CLIENTS> -> <S@SERVER, M1@SERVER>)`
3. `evaluation`: `(<S@SERVER, D2@CLIENTS> -> M2@SERVER)`
And
4. `train_data_source`: `D1@CLIENTS`
5. `evaluation_data_source`: `D2@CLIENTS`
Where:
* `S`: The server state.
* `M1`: The train metrics.
* `M2`: The evaluation metrics.
* `D1`: The train client data.
* `D2`: The evaluation client data.
Note: `S`, `D1`, and `D2` are only required to be assignable as described
below, not necessarily identical.
This function invokes `initialize` to construct a local `state` and then runs
`total_rounds` rounds updating this `state`. At each round, this update occurs
by invoking `train` with the `state` and the `client_data` selected from the
`train_data_source`. Each round, the training metrics are released to the
`train_metrics_managers` and the updated `state` used in the next round of
training.
* Round 0 represents the initialized state
* Round 1 through `total_rounds` represent the training rounds
After training, this function invokes `evaluation` once with the updated
`state` and the `client_data` selected from the `evaluation_data_source`; and
the evaluation metrics are released to the `evaluation_metrics_managers`.
Finally, `state` is released to the `model_output_manager`.
Args:
initialize: A `tff.Computation` to invoke before training.
train: A `tff.Computation` to invoke during training.
train_data_source: A `tff.program.FederatedDataSource` which returns client
data used during training.
evaluation: A `tff.Computation` to invoke to evaluate the model produced
after training.
evaluation_data_source: A `tff.program.FederatedDataSource` which returns
client data used during evaluation.
total_rounds: The number of training rounds to run.
num_clients: The number of clients for each round of training and for
evaluation.
train_metrics_manager: An optional `tff.program.ReleaseManager` used to
release training metrics.
evaluation_metrics_manager: An optional `tff.program.ReleaseManager` used to
release evaluation metrics.
model_output_manager: An optional `tff.program.ReleaseManager` used to
release training output.
program_state_manager: An optional `tff.program.ProgramStateManager` used to
save program state for fault tolerance.
"""
tff.program.check_in_federated_context()
_check_expected_type_signatures(
initialize=initialize,
train=train,
train_data_source=train_data_source,
evaluation=evaluation,
evaluation_data_source=evaluation_data_source,
)
# Cast the `program_state_manager` to a more specific type: a manager that
# loads and saves `_ProgramState`s instead of a manager that loads and saves
# `tff.program.ProgramStateStructure`s. This allows the program logic to:
# * Keep `_ProgramState` private.
# * Have static typing within the program logic.
# * Require callers to provide a `program_state_manager` capable of handling
# any `tff.program.ProgramStateStructure`.
program_state_manager = typing.cast(
Optional[tff.program.ProgramStateManager[_ProgramState]],
program_state_manager,
)
initial_state = initialize()
# Try to load the latest program state. If the program logic failed on a
# previous run, this program state can be used to restore the execution of
# this program logic and skip unnecessary steps.
if program_state_manager is not None:
initial_state = await tff.program.materialize_value(initial_state)
structure = _ProgramState(initial_state, round_num=0)
program_state, version = await program_state_manager.load_latest(structure)
# TODO: b/271445312 - Cast `program_state` to `_ProgramState`. `TypeVar`s
# are lost from async function signatures.
program_state = typing.cast(_ProgramState, program_state)
else:
program_state = None
version = 0
# Assign the inputs to the program logic using the loaded program state if
# available or the initialized state.
if program_state is not None:
state = program_state.state
start_round = program_state.round_num + 1
else:
state = initial_state
start_round = 1
# Construct a async context manager to group and run tasks concurrently.
# Program logic will release values and save program state, these functions
# are asynchronous and can be run concurrently. However, it is possible to
# schedule these functions differently using
# [asyncio](https://docs.python.org/3/library/asyncio.html).
async with _TaskGroup() as task_group:
# Construct an iterator from the `train_data_source` which returns client
# data used during training.
train_data_iterator = train_data_source.iterator()
# Train `state` for some number of rounds. Both `state` and `start_round`
# are inputs to this loop and are saved using the `program_state_manager`.
# This means that if there is a failure during training, previously trained
# rounds will be skipped.
for round_num in range(start_round, total_rounds + 1):
# Run one round of training.
train_data = train_data_iterator.select(num_clients)
state, metrics = train(state, train_data)
# Release the training metrics.
if train_metrics_manager is not None:
_, metrics_type = train.type_signature.result # pytype: disable=attribute-error
metrics_type = metrics_type.member
task_group.create_task(
train_metrics_manager.release(metrics, metrics_type, round_num)
)
# Save the current program state.
if program_state_manager is not None:
program_state = _ProgramState(state, round_num)
version = version + 1
task_group.create_task(
program_state_manager.save(program_state, version)
)
# Run one round of evaluation. This is similar to running one round of
# training above, except using the `evaluation` computation and the
# `evaluation_data_source`.
evaluation_data_iterator = evaluation_data_source.iterator()
evaluation_data = evaluation_data_iterator.select(num_clients)
evaluation_metrics = evaluation(state, evaluation_data)
# Release the evaluation metrics.
if evaluation_metrics_manager is not None:
evaluation_metrics_type = evaluation.type_signature.result.member # pytype: disable=attribute-error
task_group.create_task(
evaluation_metrics_manager.release(
evaluation_metrics, evaluation_metrics_type, total_rounds + 1
)
)
# Release the model output.
if model_output_manager is not None:
_, state_type = train.type_signature.result # pytype: disable=attribute-error
state_type = state_type.member
task_group.create_task(
model_output_manager.release(state, state_type, None)
)
|
6316b387ba016ca99f6290e10b30c5b12a206923
|
afbae26b958b5ef20548402a65002dcc8e55b66a
|
/ironstubs/default_settings.py
|
d445574bc29c1bf3f2aa9b7306ae8abf7068ed67
|
[
"MIT"
] |
permissive
|
gtalarico/ironpython-stubs
|
d875cb8932c7644f807dc6fde9dd513d159e4f5c
|
c7f6a6cb197e3949e40a4880a0b2a44e72d0a940
|
refs/heads/master
| 2023-07-12T01:43:47.295560
| 2022-05-23T18:12:06
| 2022-05-23T18:12:06
| 95,340,553
| 235
| 88
|
NOASSERTION
| 2023-07-05T06:36:28
| 2017-06-25T05:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,734
|
py
|
default_settings.py
|
import os
PATHS = [
# | Local Binaries
# | Revit
'C:\\Program Files\\Autodesk\\Revit 2017',
'C:\\Program Files\\Autodesk\\Revit 2017\\en-US',
# | Tekla Structures
'C:\\Program Files\\Tekla Structures\\2017\\nt\\bin\\plugins',
# | Dynamo
'C:\\Program Files\\Dynamo\Dynamo Core\\1.2',
'C:\\Program Files\\Dynamo\Dynamo Core\\1.3',
'C:\\Program Files\\Dynamo\Dynamo Revit\\1.2\\Revit_2017',
'C:\\Program Files\\Dynamo\Dynamo Revit\\1.3\\Revit_2017',
# | Rhino
'C:\\Program Files\\Rhinoceros 5 (64-bit)\\System',
# Grasshopper
'C:\\Users\\gtalarico\\AppData\\Roaming\\McNeel\\Rhinoceros\\5.0\\Plug-ins\\Grasshopper (b45a29b1-4343-4035-989e-044e8580d9cf)\\0.9.76.0'
]
ASSEMBLIES = [
# | Ironpython
'IronPython.Wpf',
# | Windows
'PresentationCore',
'PresentationFramework',
'WindowsBase',
# | System
'System',
'System.Drawing',
'System.Windows.Forms',
# | Dynamo
'ProtoGeometry',
'DSCoreNodes',
'DSOffice',
'Tessellation',
# | Rhino
'Rhino3dmIO',
'RhinoCommon',
# | Grasshopper
'Grasshopper',
'GH_IO',
# 'GH_Util',
# | Tekla Structures
'Tekla.Structures',
'Tekla.Structures.Drawing',
'Tekla.Structures.Model',
'Tekla.Structures.Plugins',
]
BUILTINS = [
'clr',
'wpf'
]
ASSEMBLIES.extend(BUILTINS)
ASSEMBLIES.sort()
REVIT_ASSEMBLIES = [
# | Revit
'RevitAPI',
'RevitAPIUI',
'RevitServices',
'RevitNodes',
]
# | If running inside Revit, Process Revit Assemblies Only
try:
__revit__
except NameError:
pass
else:
ASSEMBLIES = REVIT_ASSEMBLIES
|
d3517ffb64796590f32ef40fe61b966ac5e2c25d
|
83e7dc1281874779c46dfadcc15b2bb66d8e599c
|
/examples/event/lv_example_event_4.py
|
48399039cc6080515d8178ef59ac37936999eb00
|
[
"MIT"
] |
permissive
|
lvgl/lvgl
|
7d51d6774d6ac71df7101fc7ded56fea4b70be01
|
5c984b4a5364b6455966eb3a860153806c51626f
|
refs/heads/master
| 2023-08-30T22:39:20.283922
| 2023-08-30T19:55:29
| 2023-08-30T19:55:29
| 60,667,730
| 9,296
| 2,218
|
MIT
| 2023-09-14T17:59:34
| 2016-06-08T04:14:34
|
C
|
UTF-8
|
Python
| false
| false
| 1,693
|
py
|
lv_example_event_4.py
|
class LV_Example_Event_4:
def __init__(self):
#
# Demonstrate the usage of draw event
#
self.size = 0
self.size_dec = False
self.cont = lv.obj(lv.scr_act())
self.cont.set_size(200, 200)
self.cont.center()
self.cont.add_event(self.event_cb, lv.EVENT.DRAW_TASK_ADDED, None)
self.cont.add_flag(lv.obj.FLAG.SEND_DRAW_TASK_EVENTS)
lv.timer_create(self.timer_cb, 30, None)
def timer_cb(self,timer) :
self.cont.invalidate()
if self.size_dec :
self.size -= 1
else :
self.size += 1
if self.size == 50 :
self.size_dec = True
elif self.size == 0:
self.size_dec = False
def event_cb(self,e) :
obj = e.get_target_obj()
dsc = e.get_draw_task()
base_dsc = lv.draw_dsc_base_t.__cast__(dsc.draw_dsc)
if base_dsc.part == lv.PART.MAIN:
a = lv.area_t()
a.x1 = 0
a.y1 = 0
a.x2 = self.size
a.y2 = self.size
coords = lv.area_t()
obj.get_coords(coords)
coords.align(a, lv.ALIGN.CENTER, 0, 0)
draw_dsc = lv.draw_rect_dsc_t()
draw_dsc.init()
draw_dsc.bg_color = lv.color_hex(0xffaaaa)
draw_dsc.radius = lv.RADIUS_CIRCLE
draw_dsc.border_color = lv.color_hex(0xff5555)
draw_dsc.border_width = 2
draw_dsc.outline_color = lv.color_hex(0xff0000)
draw_dsc.outline_pad = 3
draw_dsc.outline_width = 2
lv.draw_rect(base_dsc.layer, draw_dsc, a)
lv_example_event_4 = LV_Example_Event_4()
|
6c2512d55971633eaacd685acd26fa0007200199
|
86366739c4613bc96193680001ab80b704a69e17
|
/loaddata.py
|
1f327886b9ecbc82c8d4e268efe67403bfafa053
|
[
"MIT"
] |
permissive
|
JunjH/Visualizing-CNNs-for-monocular-depth-estimation
|
3b2ecfa38e5ae0ae51beaa50b159e88254546459
|
9fb5124e7a907817de6a3eecf1b3ec854c8adf85
|
refs/heads/master
| 2022-06-21T05:52:43.671488
| 2022-06-10T03:12:04
| 2022-06-10T03:12:04
| 180,339,370
| 148
| 29
| null | 2019-04-10T06:38:26
| 2019-04-09T10:06:58
|
Python
|
UTF-8
|
Python
| false
| false
| 3,427
|
py
|
loaddata.py
|
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from PIL import Image
import random
from nyu_transform import *
class depthDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, csv_file, transform=None):
self.frame = pd.read_csv(csv_file, header=None)
self.transform = transform
def __getitem__(self, idx):
image_name = self.frame.ix[idx, 0]
depth_name = self.frame.ix[idx, 1]
image = Image.open(image_name)
depth = Image.open(depth_name)
sample = {'image': image, 'depth': depth}
if self.transform:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.frame)
def getTrainingData(batch_size=64):
__imagenet_pca = {
'eigval': torch.Tensor([0.2175, 0.0188, 0.0045]),
'eigvec': torch.Tensor([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
])
}
__imagenet_stats = {'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]}
transformed_training = depthDataset(csv_file='./data/nyu2_train.csv',
transform=transforms.Compose([
Scale(240),
RandomHorizontalFlip(),
RandomRotate(5),
CenterCrop([304, 228], [152, 114]),
ToTensor(),
Lighting(0.1, __imagenet_pca[
'eigval'], __imagenet_pca['eigvec']),
ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
),
Normalize(__imagenet_stats['mean'],
__imagenet_stats['std'])
]))
dataloader_training = DataLoader(transformed_training, batch_size,
shuffle=True, num_workers=4, pin_memory=False)
return dataloader_training
def getTestingData(batch_size=64):
__imagenet_stats = {'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]}
# scale = random.uniform(1, 1.5)
transformed_testing = depthDataset(csv_file='./data/nyu2_test.csv',
transform=transforms.Compose([
Scale(240),
CenterCrop([304, 228], [152, 114]),
ToTensor(is_test=True),
Normalize(__imagenet_stats['mean'],
__imagenet_stats['std'])
]))
dataloader_testing = DataLoader(transformed_testing, batch_size,
shuffle=False, num_workers=4, pin_memory=False)
return dataloader_testing
|
090117dcb8a6f5a81b7502e7854946d427e80233
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/pytests/unit/utils/test_xmlutil.py
|
42826e626fae0a8d7bd6608b7e46b6d0646d89e7
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 6,241
|
py
|
test_xmlutil.py
|
import xml.etree.ElementTree as ET
import pytest
import salt.utils.xmlutil as xml
@pytest.fixture
def xml_doc():
return ET.fromstring(
"""
<domain>
<name>test01</name>
<memory unit="MiB">1024</memory>
<cpu>
<topology sockets="1"/>
</cpu>
<vcpus>
<vcpu enabled="yes" id="1"/>
</vcpus>
<memtune>
<hugepages>
<page size="128"/>
</hugepages>
</memtune>
</domain>
"""
)
def test_change_xml_text(xml_doc):
ret = xml.change_xml(
xml_doc, {"name": "test02"}, [{"path": "name", "xpath": "name"}]
)
assert ret
assert "test02" == xml_doc.find("name").text
def test_change_xml_text_nochange(xml_doc):
ret = xml.change_xml(
xml_doc, {"name": "test01"}, [{"path": "name", "xpath": "name"}]
)
assert not ret
def test_change_xml_equals_nochange(xml_doc):
ret = xml.change_xml(
xml_doc,
{"mem": 1023},
[
{
"path": "mem",
"xpath": "memory",
"get": lambda n: int(n.text),
"equals": lambda o, n: abs(o - n) <= 1,
}
],
)
assert not ret
def test_change_xml_text_notdefined(xml_doc):
ret = xml.change_xml(xml_doc, {}, [{"path": "name", "xpath": "name"}])
assert not ret
def test_change_xml_text_removed(xml_doc):
ret = xml.change_xml(xml_doc, {"name": None}, [{"path": "name", "xpath": "name"}])
assert ret
assert xml_doc.find("name") is None
def test_change_xml_text_add(xml_doc):
ret = xml.change_xml(
xml_doc,
{"cpu": {"vendor": "ACME"}},
[{"path": "cpu:vendor", "xpath": "cpu/vendor"}],
)
assert ret
assert "ACME" == xml_doc.find("cpu/vendor").text
def test_change_xml_convert(xml_doc):
ret = xml.change_xml(
xml_doc,
{"mem": 2},
[{"path": "mem", "xpath": "memory", "convert": lambda v: v * 1024}],
)
assert ret
assert "2048" == xml_doc.find("memory").text
def test_change_xml_attr(xml_doc):
ret = xml.change_xml(
xml_doc,
{"cpu": {"topology": {"cores": 4}}},
[
{
"path": "cpu:topology:cores",
"xpath": "cpu/topology",
"get": lambda n: int(n.get("cores")) if n.get("cores") else None,
"set": lambda n, v: n.set("cores", str(v)),
"del": xml.del_attribute("cores"),
}
],
)
assert ret
assert "4" == xml_doc.find("cpu/topology").get("cores")
def test_change_xml_attr_unchanged(xml_doc):
ret = xml.change_xml(
xml_doc,
{"cpu": {"topology": {"sockets": 1}}},
[
{
"path": "cpu:topology:sockets",
"xpath": "cpu/topology",
"get": lambda n: int(n.get("sockets")) if n.get("sockets") else None,
"set": lambda n, v: n.set("sockets", str(v)),
"del": xml.del_attribute("sockets"),
}
],
)
assert not ret
def test_change_xml_attr_remove(xml_doc):
ret = xml.change_xml(
xml_doc,
{"cpu": {"topology": {"sockets": None}}},
[
{
"path": "cpu:topology:sockets",
"xpath": "./cpu/topology",
"get": lambda n: int(n.get("sockets")) if n.get("sockets") else None,
"set": lambda n, v: n.set("sockets", str(v)),
"del": xml.del_attribute("sockets"),
}
],
)
assert ret
assert xml_doc.find("cpu") is None
def test_change_xml_not_simple_value(xml_doc):
ret = xml.change_xml(
xml_doc,
{"cpu": {"topology": {"sockets": None}}},
[{"path": "cpu", "xpath": "vcpu", "get": lambda n: int(n.text)}],
)
assert not ret
def test_change_xml_template(xml_doc):
ret = xml.change_xml(
xml_doc,
{"cpu": {"vcpus": {2: {"enabled": True}, 4: {"enabled": False}}}},
[
{
"path": "cpu:vcpus:{id}:enabled",
"xpath": "vcpus/vcpu[@id='$id']",
"convert": lambda v: "yes" if v else "no",
"get": lambda n: n.get("enabled"),
"set": lambda n, v: n.set("enabled", v),
"del": xml.del_attribute("enabled", ["id"]),
},
],
)
assert ret
assert xml_doc.find("vcpus/vcpu[@id='1']") is None
assert "yes" == xml_doc.find("vcpus/vcpu[@id='2']").get("enabled")
assert "no" == xml_doc.find("vcpus/vcpu[@id='4']").get("enabled")
def test_change_xml_template_remove(xml_doc):
ret = xml.change_xml(
xml_doc,
{"cpu": {"vcpus": None}},
[
{
"path": "cpu:vcpus:{id}:enabled",
"xpath": "vcpus/vcpu[@id='$id']",
"convert": lambda v: "yes" if v else "no",
"get": lambda n: n.get("enabled"),
"set": lambda n, v: n.set("enabled", v),
"del": xml.del_attribute("enabled", ["id"]),
},
],
)
assert ret
assert xml_doc.find("vcpus") is None
def test_change_xml_template_list(xml_doc):
ret = xml.change_xml(
xml_doc,
{"memtune": {"hugepages": [{"size": "1024"}, {"size": "512"}]}},
[
{
"path": "memtune:hugepages:{id}:size",
"xpath": "memtune/hugepages/page[$id]",
"get": lambda n: n.get("size"),
"set": lambda n, v: n.set("size", v),
"del": xml.del_attribute("size"),
},
],
)
assert ret
assert ["1024", "512"] == [
n.get("size") for n in xml_doc.findall("memtune/hugepages/page")
]
def test_strip_spaces():
xml_str = """<domain>
<name>test01</name>
<memory unit="MiB" >1024</memory>
</domain>
"""
expected_str = (
b'<domain><name>test01</name><memory unit="MiB">1024</memory></domain>'
)
node = ET.fromstring(xml_str)
assert expected_str == ET.tostring(xml.strip_spaces(node))
|
acbfad4422fddf48dfe4605cc292e88acd04b5d0
|
56d6257e932e1397ab03b1e7ccc6231378665b04
|
/ATOMSQ/view_toggle.py
|
3cedfda2af394aedd8ec6aa9b736214264386e7d
|
[] |
no_license
|
gluon/AbletonLive10.1_MIDIRemoteScripts
|
e6c8dc4956cff9630aaa36f3667994387ad1d0cf
|
2468b51eba7e5082b06f9e381b3e72027c5f272c
|
refs/heads/master
| 2023-01-10T18:37:46.504180
| 2022-12-23T09:21:48
| 2022-12-23T09:21:48
| 213,423,555
| 205
| 59
| null | 2021-02-12T16:15:01
| 2019-10-07T15:44:52
|
Python
|
UTF-8
|
Python
| false
| false
| 3,092
|
py
|
view_toggle.py
|
#Embedded file name: /Users/versonator/Jenkins/live/output/Live/mac_64_static/Release/python-bundle/MIDI Remote Scripts/ATOMSQ/view_toggle.py
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.base import listens
from ableton.v2.control_surface import Component
from ableton.v2.control_surface.control import ToggleButtonControl
class ViewToggleComponent(Component):
detail_view_toggle_button = ToggleButtonControl(untoggled_color=u'View.DetailOff', toggled_color=u'View.DetailOn')
main_view_toggle_button = ToggleButtonControl(untoggled_color=u'View.MainOff', toggled_color=u'View.MainOn')
clip_view_toggle_button = ToggleButtonControl(untoggled_color=u'View.ClipOff', toggled_color=u'View.ClipOn')
browser_view_toggle_button = ToggleButtonControl(untoggled_color=u'View.BrowserOff', toggled_color=u'View.BrowserOn')
def __init__(self, *a, **k):
super(ViewToggleComponent, self).__init__(*a, **k)
self.__on_detail_view_visibility_changed.subject = self.application.view
self.__on_main_view_visibility_changed.subject = self.application.view
self.__on_clip_view_visibility_changed.subject = self.application.view
self.__on_browser_view_visibility_changed.subject = self.application.view
self.__on_detail_view_visibility_changed()
self.__on_main_view_visibility_changed()
self.__on_clip_view_visibility_changed()
self.__on_browser_view_visibility_changed()
@detail_view_toggle_button.toggled
def detail_view_toggle_button(self, is_toggled, _):
self._show_or_hide_view(is_toggled, u'Detail')
@main_view_toggle_button.toggled
def main_view_toggle_button(self, is_toggled, _):
self._show_or_hide_view(is_toggled, u'Session')
@clip_view_toggle_button.toggled
def clip_view_toggle_button(self, is_toggled, _):
self._show_or_hide_view(is_toggled, u'Detail/Clip')
@browser_view_toggle_button.toggled
def browser_view_toggle_button(self, is_toggled, _):
self._show_or_hide_view(is_toggled, u'Browser')
def _show_or_hide_view(self, show_view, view_name):
if show_view:
self.application.view.show_view(view_name)
else:
self.application.view.hide_view(view_name)
@listens(u'is_view_visible', u'Detail')
def __on_detail_view_visibility_changed(self):
self.detail_view_toggle_button.is_toggled = self.application.view.is_view_visible(u'Detail')
@listens(u'is_view_visible', u'Session')
def __on_main_view_visibility_changed(self):
self.main_view_toggle_button.is_toggled = self.application.view.is_view_visible(u'Session')
@listens(u'is_view_visible', u'Detail/Clip')
def __on_clip_view_visibility_changed(self):
self.clip_view_toggle_button.is_toggled = self.application.view.is_view_visible(u'Detail/Clip')
@listens(u'is_view_visible', u'Browser')
def __on_browser_view_visibility_changed(self):
self.browser_view_toggle_button.is_toggled = self.application.view.is_view_visible(u'Browser')
|
1d2072e95f00ace2721963a3a30e0cb9072da1f5
|
f1c2e4b3147af77e23306f841610aafd6db1c6b0
|
/dev-support/mini-submarine/submarine/image_classification.py
|
aa3aa2b9f5fe8ee45be18b7813fbac061b9ff6d3
|
[
"Apache-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"BSD-2-Clause",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"MIT",
"CDDL-1.1",
"Classpath-exception-2.0"
] |
permissive
|
apache/submarine
|
a2927f5f4f7f5faff4701139f2f0f88a98195e7f
|
0c10613f39b707d5e446c515c12fa28295c8052e
|
refs/heads/master
| 2023-08-30T14:35:43.145942
| 2023-08-20T00:19:54
| 2023-08-24T23:50:49
| 209,459,144
| 663
| 269
|
Apache-2.0
| 2023-09-03T09:05:06
| 2019-09-19T04:00:17
|
Java
|
UTF-8
|
Python
| false
| false
| 21,376
|
py
|
image_classification.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import random
import tarfile
import time
import mxnet as mx
import numpy as np
from mxnet import autograd as ag
from mxnet import gluon, profiler
from mxnet.contrib.io import DataLoaderIter
from mxnet.gluon.data import DataLoader
from mxnet.gluon.data.vision import ImageFolderDataset
from mxnet.gluon.model_zoo import vision as models
from mxnet.metric import Accuracy, CompositeEvalMetric, TopKAccuracy
from mxnet.test_utils import get_cifar10, get_mnist_iterator
# logging
logging.basicConfig(level=logging.INFO)
fh = logging.FileHandler("image-classification.log")
logger = logging.getLogger()
logger.addHandler(fh)
formatter = logging.Formatter("%(message)s")
fh.setFormatter(formatter)
fh.setLevel(logging.DEBUG)
logging.debug("\n%s", "-" * 100)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
fh.setFormatter(formatter)
# CLI
parser = argparse.ArgumentParser(description="Train a model for image classification.")
parser.add_argument(
"--dataset",
type=str,
default="cifar10",
help="dataset to use. options are mnist, cifar10, caltech101, imagenet and dummy.",
)
parser.add_argument(
"--data-dir",
type=str,
default="",
help="training directory of imagenet images, contains train/val subdirs.",
)
parser.add_argument(
"--num-worker",
"-j",
dest="num_workers",
default=4,
type=int,
help="number of workers for dataloader",
)
parser.add_argument("--batch-size", type=int, default=32, help="training batch size per device (CPU/GPU).")
parser.add_argument(
"--gpus",
type=str,
default="",
help='ordinates of gpus to use, can be "0,1,2" or empty for cpu only.',
)
parser.add_argument("--epochs", type=int, default=120, help="number of training epochs.")
parser.add_argument("--lr", type=float, default=0.1, help="learning rate. default is 0.1.")
parser.add_argument(
"--momentum", type=float, default=0.9, help="momentum value for optimizer, default is 0.9."
)
parser.add_argument("--wd", type=float, default=0.0001, help="weight decay rate. default is 0.0001.")
parser.add_argument("--seed", type=int, default=123, help="random seed to use. Default=123.")
parser.add_argument(
"--mode",
type=str,
help="mode in which to train the model. options are symbolic, imperative, hybrid",
)
parser.add_argument(
"--model", type=str, required=True, help="type of model to use. see vision_model for options."
)
parser.add_argument(
"--use_thumbnail", action="store_true", help="use thumbnail or not in resnet. default is false."
)
parser.add_argument(
"--batch-norm",
action="store_true",
help="enable batch normalization or not in vgg. default is false.",
)
parser.add_argument("--use-pretrained", action="store_true", help="enable using pretrained model from gluon.")
parser.add_argument(
"--prefix",
default="",
type=str,
help="path to checkpoint prefix, default is current working dir",
)
parser.add_argument(
"--start-epoch", default=0, type=int, help="starting epoch, 0 for fresh training, > 0 to resume"
)
parser.add_argument("--resume", type=str, default="", help="path to saved weight where you want resume")
parser.add_argument("--lr-factor", default=0.1, type=float, help="learning rate decay ratio")
parser.add_argument(
"--lr-steps", default="30,60,90", type=str, help="list of learning rate decay epochs as in str"
)
parser.add_argument(
"--dtype", default="float32", type=str, help="data type, float32 or float16 if applicable"
)
parser.add_argument(
"--save-frequency",
default=10,
type=int,
help="epoch frequence to save model, best model will always be saved",
)
parser.add_argument("--kvstore", type=str, default="device", help="kvstore to use for trainer/module.")
parser.add_argument("--log-interval", type=int, default=50, help="Number of batches to wait before logging.")
parser.add_argument(
"--profile",
action="store_true",
help=(
"Option to turn on memory profiling for front-end, "
"and prints out the memory usage by python function at the end."
),
)
parser.add_argument("--builtin-profiler", type=int, default=0, help="Enable built-in profiler (0=off, 1=on)")
opt = parser.parse_args()
# global variables
logger.info("Starting new image-classification task:, %s", opt)
mx.random.seed(opt.seed)
model_name = opt.model
dataset_classes = {"mnist": 10, "cifar10": 10, "caltech101": 101, "imagenet": 1000, "dummy": 1000}
batch_size, dataset, classes = opt.batch_size, opt.dataset, dataset_classes[opt.dataset]
context = [mx.gpu(int(i)) for i in opt.gpus.split(",")] if opt.gpus.strip() else [mx.cpu()]
num_gpus = len(context)
batch_size *= max(1, num_gpus)
lr_steps = [int(x) for x in opt.lr_steps.split(",") if x.strip()]
metric = CompositeEvalMetric([Accuracy(), TopKAccuracy(5)])
kv = mx.kv.create(opt.kvstore)
def get_cifar10_iterator(batch_size, data_shape, resize=-1, num_parts=1, part_index=0):
get_cifar10()
train = mx.io.ImageRecordIter(
path_imgrec="data/cifar/train.rec",
resize=resize,
data_shape=data_shape,
batch_size=batch_size,
rand_crop=True,
rand_mirror=True,
num_parts=num_parts,
part_index=part_index,
)
val = mx.io.ImageRecordIter(
path_imgrec="data/cifar/test.rec",
resize=resize,
rand_crop=False,
rand_mirror=False,
data_shape=data_shape,
batch_size=batch_size,
num_parts=num_parts,
part_index=part_index,
)
return train, val
def get_imagenet_transforms(data_shape=224, dtype="float32"):
def train_transform(image, label):
image, _ = mx.image.random_size_crop(image, (data_shape, data_shape), 0.08, (3 / 4.0, 4 / 3.0))
image = mx.nd.image.random_flip_left_right(image)
image = mx.nd.image.to_tensor(image)
image = mx.nd.image.normalize(image, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
return mx.nd.cast(image, dtype), label
def val_transform(image, label):
image = mx.image.resize_short(image, data_shape + 32)
image, _ = mx.image.center_crop(image, (data_shape, data_shape))
image = mx.nd.image.to_tensor(image)
image = mx.nd.image.normalize(image, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
return mx.nd.cast(image, dtype), label
return train_transform, val_transform
def get_imagenet_iterator(root, batch_size, num_workers, data_shape=224, dtype="float32"):
"""Dataset loader with preprocessing."""
train_dir = os.path.join(root, "train")
train_transform, val_transform = get_imagenet_transforms(data_shape, dtype)
logging.info("Loading image folder %s, this may take a bit long...", train_dir)
train_dataset = ImageFolderDataset(train_dir, transform=train_transform)
train_data = DataLoader(
train_dataset, batch_size, shuffle=True, last_batch="discard", num_workers=num_workers
)
val_dir = os.path.join(root, "val")
if not os.path.isdir(os.path.expanduser(os.path.join(root, "val", "n01440764"))):
user_warning = (
"Make sure validation images are stored in one subdir per category, a helper script is"
" available at https://git.io/vNQv1"
)
raise ValueError(user_warning)
logging.info("Loading image folder %s, this may take a bit long...", val_dir)
val_dataset = ImageFolderDataset(val_dir, transform=val_transform)
val_data = DataLoader(val_dataset, batch_size, last_batch="keep", num_workers=num_workers)
return DataLoaderIter(train_data, dtype), DataLoaderIter(val_data, dtype)
def get_caltech101_data():
url = "https://s3.us-east-2.amazonaws.com/mxnet-public/101_ObjectCategories.tar.gz"
dataset_name = "101_ObjectCategories"
data_folder = "data"
if not os.path.isdir(data_folder):
os.makedirs(data_folder)
tar_path = mx.gluon.utils.download(url, path=data_folder)
if not os.path.isdir(os.path.join(data_folder, "101_ObjectCategories")) or not os.path.isdir(
os.path.join(data_folder, "101_ObjectCategories_test")
):
tar = tarfile.open(tar_path, "r:gz")
tar.extractall(data_folder)
tar.close()
print("Data extracted")
training_path = os.path.join(data_folder, dataset_name)
testing_path = os.path.join(data_folder, f"{dataset_name}_test")
return training_path, testing_path
def get_caltech101_iterator(batch_size, num_workers, dtype):
def transform(image, label):
# resize the shorter edge to 224, the longer edge will be greater or equal to 224
resized = mx.image.resize_short(image, 224)
# center and crop an area of size (224,224)
cropped, crop_info = mx.image.center_crop(resized, (224, 224))
# transpose the channels to be (3,224,224)
transposed = mx.nd.transpose(cropped, (2, 0, 1))
return transposed, label
training_path, testing_path = get_caltech101_data()
dataset_train = ImageFolderDataset(root=training_path, transform=transform)
dataset_test = ImageFolderDataset(root=testing_path, transform=transform)
train_data = DataLoader(dataset_train, batch_size, shuffle=True, num_workers=num_workers)
test_data = DataLoader(dataset_test, batch_size, shuffle=False, num_workers=num_workers)
return DataLoaderIter(train_data), DataLoaderIter(test_data)
class DummyIter(mx.io.DataIter):
def __init__(self, batch_size, data_shape, batches=100):
super().__init__(batch_size)
self.data_shape = (batch_size,) + data_shape
self.label_shape = (batch_size,)
self.provide_data = [("data", self.data_shape)]
self.provide_label = [("softmax_label", self.label_shape)]
self.batch = mx.io.DataBatch(
data=[mx.nd.zeros(self.data_shape)], label=[mx.nd.zeros(self.label_shape)]
)
self._batches = 0
self.batches = batches
def next(self):
if self._batches < self.batches:
self._batches += 1
return self.batch
else:
self._batches = 0
raise StopIteration
def dummy_iterator(batch_size, data_shape):
return DummyIter(batch_size, data_shape), DummyIter(batch_size, data_shape)
class ImagePairIter(mx.io.DataIter):
def __init__(self, path, data_shape, label_shape, batch_size=64, flag=0, input_aug=None, target_aug=None):
def is_image_file(fn):
return any(fn.endswith(ext) for ext in [".png", ".jpg", ".jpeg"])
super().__init__(batch_size)
self.data_shape = (batch_size,) + data_shape
self.label_shape = (batch_size,) + label_shape
self.input_aug = input_aug
self.target_aug = target_aug
self.provide_data = [("data", self.data_shape)]
self.provide_label = [("label", self.label_shape)]
self.filenames = [os.path.join(path, x) for x in os.listdir(path) if is_image_file(x)]
self.count = 0
self.flag = flag
random.shuffle(self.filenames)
def next(self):
from PIL import Image
if self.count + self.batch_size <= len(self.filenames):
data = []
label = []
for i in range(self.batch_size):
fn = self.filenames[self.count]
self.count += 1
image = Image.open(fn).convert("YCbCr").split()[0]
if image.size[0] > image.size[1]:
image = image.transpose(Image.TRANSPOSE)
image = mx.nd.expand_dims(mx.nd.array(image), axis=2)
target = image.copy()
for aug in self.input_aug:
image = aug(image)
for aug in self.target_aug:
target = aug(target)
data.append(image)
label.append(target)
data = mx.nd.concat(*[mx.nd.expand_dims(d, axis=0) for d in data], dim=0)
label = mx.nd.concat(*[mx.nd.expand_dims(d, axis=0) for d in label], dim=0)
data = [mx.nd.transpose(data, axes=(0, 3, 1, 2)).astype("float32") / 255]
label = [mx.nd.transpose(label, axes=(0, 3, 1, 2)).astype("float32") / 255]
return mx.io.DataBatch(data=data, label=label)
else:
raise StopIteration
def reset(self):
self.count = 0
random.shuffle(self.filenames)
def get_model(model, ctx, opt):
"""Model initialization."""
kwargs = {"ctx": ctx, "pretrained": opt.use_pretrained, "classes": classes}
if model.startswith("resnet"):
kwargs["thumbnail"] = opt.use_thumbnail
elif model.startswith("vgg"):
kwargs["batch_norm"] = opt.batch_norm
net = models.get_model(model, **kwargs)
if opt.resume:
net.load_parameters(opt.resume)
elif not opt.use_pretrained:
if model in ["alexnet"]:
net.initialize(mx.init.Normal())
else:
net.initialize(mx.init.Xavier(magnitude=2))
net.cast(opt.dtype)
return net
net = get_model(opt.model, context, opt)
def get_data_iters(dataset, batch_size, opt):
"""get dataset iterators"""
if dataset == "mnist":
train_data, val_data = get_mnist_iterator(
batch_size, (1, 28, 28), num_parts=kv.num_workers, part_index=kv.rank
)
elif dataset == "cifar10":
train_data, val_data = get_cifar10_iterator(
batch_size, (3, 32, 32), num_parts=kv.num_workers, part_index=kv.rank
)
elif dataset == "imagenet":
shape_dim = 299 if model_name == "inceptionv3" else 224
if not opt.data_dir:
raise ValueError(
"Dir containing raw images in train/val is required for imagenet."
"Please specify \"--data-dir\""
)
train_data, val_data = get_imagenet_iterator(
opt.data_dir, batch_size, opt.num_workers, shape_dim, opt.dtype
)
elif dataset == "caltech101":
train_data, val_data = get_caltech101_iterator(batch_size, opt.num_workers, opt.dtype)
elif dataset == "dummy":
shape_dim = 299 if model_name == "inceptionv3" else 224
train_data, val_data = dummy_iterator(batch_size, (3, shape_dim, shape_dim))
return train_data, val_data
def test(ctx, val_data):
metric.reset()
val_data.reset()
for batch in val_data:
data = gluon.utils.split_and_load(
batch.data[0].astype(opt.dtype, copy=False), ctx_list=ctx, batch_axis=0
)
label = gluon.utils.split_and_load(
batch.label[0].astype(opt.dtype, copy=False), ctx_list=ctx, batch_axis=0
)
outputs = [net(X) for X in data]
metric.update(label, outputs)
return metric.get()
def update_learning_rate(lr, trainer, epoch, ratio, steps):
"""Set the learning rate to the initial value decayed by ratio every N epochs."""
new_lr = lr * (ratio ** int(np.sum(np.array(steps) < epoch)))
trainer.set_learning_rate(new_lr)
return trainer
def save_checkpoint(epoch, top1, best_acc):
if opt.save_frequency and (epoch + 1) % opt.save_frequency == 0:
fname = os.path.join(opt.prefix, "%s_%d_acc_%.4f.params" % (opt.model, epoch, top1))
net.save_parameters(fname)
logger.info("[Epoch %d] Saving checkpoint to %s with Accuracy: %.4f", epoch, fname, top1)
if top1 > best_acc[0]:
best_acc[0] = top1
fname = os.path.join(opt.prefix, "%s_best.params" % (opt.model))
net.save_parameters(fname)
logger.info("[Epoch %d] Saving checkpoint to %s with Accuracy: %.4f", epoch, fname, top1)
def train(opt, ctx):
if isinstance(ctx, mx.Context):
ctx = [ctx]
train_data, val_data = get_data_iters(dataset, batch_size, opt)
net.collect_params().reset_ctx(ctx)
trainer = gluon.Trainer(
net.collect_params(),
"sgd",
optimizer_params={
"learning_rate": opt.lr,
"wd": opt.wd,
"momentum": opt.momentum,
"multi_precision": True,
},
kvstore=kv,
)
loss = gluon.loss.SoftmaxCrossEntropyLoss()
total_time = 0
num_epochs = 0
best_acc = [0]
for epoch in range(opt.start_epoch, opt.epochs):
trainer = update_learning_rate(opt.lr, trainer, epoch, opt.lr_factor, lr_steps)
tic = time.time()
train_data.reset()
metric.reset()
btic = time.time()
for i, batch in enumerate(train_data):
data = gluon.utils.split_and_load(batch.data[0].astype(opt.dtype), ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch.label[0].astype(opt.dtype), ctx_list=ctx, batch_axis=0)
outputs = []
Ls = []
with ag.record():
for x, y in zip(data, label):
z = net(x)
L = loss(z, y)
# store the loss and do backward after we have done forward
# on all GPUs for better speed on multiple GPUs.
Ls.append(L)
outputs.append(z)
ag.backward(Ls)
trainer.step(batch.data[0].shape[0])
metric.update(label, outputs)
if opt.log_interval and not (i + 1) % opt.log_interval:
name, acc = metric.get()
logger.info(
"Epoch[%d] Batch [%d]\tSpeed: %f samples/sec\t%s=%f, %s=%f"
% (
epoch,
i,
batch_size / (time.time() - btic),
name[0],
acc[0],
name[1],
acc[1],
)
)
btic = time.time()
epoch_time = time.time() - tic
# First epoch will usually be much slower than the subsequent epics,
# so don't factor into the average
if num_epochs > 0:
total_time = total_time + epoch_time
num_epochs = num_epochs + 1
name, acc = metric.get()
logger.info("[Epoch %d] training: %s=%f, %s=%f" % (epoch, name[0], acc[0], name[1], acc[1]))
logger.info("[Epoch %d] time cost: %f" % (epoch, epoch_time))
name, val_acc = test(ctx, val_data)
logger.info("[Epoch %d] validation: %s=%f, %s=%f" % (epoch, name[0], val_acc[0], name[1], val_acc[1]))
# save model if meet requirements
save_checkpoint(epoch, val_acc[0], best_acc)
if num_epochs > 1:
print(f"Average epoch time: {float(total_time) / (num_epochs - 1)}")
def main():
if opt.builtin_profiler > 0:
profiler.set_config(profile_all=True, aggregate_stats=True)
profiler.set_state("run")
if opt.mode == "symbolic":
data = mx.sym.var("data")
if opt.dtype == "float16":
data = mx.sym.Cast(data=data, dtype=np.float16)
out = net(data)
if opt.dtype == "float16":
out = mx.sym.Cast(data=out, dtype=np.float32)
softmax = mx.sym.SoftmaxOutput(out, name="softmax")
mod = mx.mod.Module(softmax, context=context)
train_data, val_data = get_data_iters(dataset, batch_size, opt)
mod.fit(
train_data,
eval_data=val_data,
num_epoch=opt.epochs,
kvstore=kv,
batch_end_callback=mx.callback.Speedometer(batch_size, max(1, opt.log_interval)),
epoch_end_callback=mx.callback.do_checkpoint("image-classifier-%s" % opt.model),
optimizer="sgd",
optimizer_params={
"learning_rate": opt.lr,
"wd": opt.wd,
"momentum": opt.momentum,
"multi_precision": True,
},
initializer=mx.init.Xavier(magnitude=2),
)
mod.save_parameters("image-classifier-%s-%d-final.params" % (opt.model, opt.epochs))
else:
if opt.mode == "hybrid":
net.hybridize()
train(opt, context)
if opt.builtin_profiler > 0:
profiler.set_state("stop")
print(profiler.dumps())
if __name__ == "__main__":
if opt.profile:
import hotshot
import hotshot.stats
prof = hotshot.Profile(f"image-classifier-{opt.model}-{opt.mode}.prof")
prof.runcall(main)
prof.close()
stats = hotshot.stats.load(f"image-classifier-{opt.model}-{opt.mode}.prof")
stats.strip_dirs()
stats.sort_stats("cumtime", "calls")
stats.print_stats()
else:
main()
|
c2b4e654862590e57338c843c71ec677e9455530
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/23_设计类/设计版本控制(可持久化)/可持久化数组/完全可持久化数组-数据结构.py
|
fd490ebd1b0453895c1dbf66e1cda831e05e24b2
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,982
|
py
|
完全可持久化数组-数据结构.py
|
# # 配列の初期状態
# A = [2, 0, 1, 9]
# # 更新操作
# operations = [('a.0', 2, 2, 'a.1'), ('a.1', 0, 1, 'a.2'), ('a.2', 1, 1, 'a.3'),
# ('a.1', 3, 0, 'b.2'), ('a.3', 3, 3, 'a.4')]
# # クエリ
# queries = [('a.1', 2), ('a.4', 1), ('a.4', 3), ('b.2', 3)]
# https://qiita.com/wotsushi/items/72e7f8cdd674741ffd61#%E5%8F%82%E8%80%83%E8%A8%98%E4%BA%8B
# !完全可持久化数组
from typing import List
from PersistentArray2 import PersistentArray
class OnlineQuery:
"""持久化数组记录每个版本的数组"""
def __init__(self, nums: List[int]) -> None:
self.arr = PersistentArray.create(nums)
self.git = dict({"a.0": self.arr}) # 保存每个版本的持久化数组
def update(self, curVersion: str, index: int, value: int, nextVersion: str) -> None:
"""将curVersion版本的数组的index位置更新为value,得到nextVersion版本的数组"""
preArr = self.git.get(curVersion, None)
if preArr is None:
raise Exception(f"版本{curVersion}不存在")
curArr = preArr.update(index, value)
self.git[nextVersion] = curArr
def query(self, version: str, index: int) -> int:
"""查询version版本的数组的index位置的值"""
arr = self.git.get(version, None)
if arr is None:
raise Exception(f"版本{version}不存在")
return arr.get(index)
if __name__ == "__main__":
persist = OnlineQuery([2, 0, 1, 9] * 2500)
for _ in range(2000):
persist.update("a.0", 2, 2, "a.1")
persist.update("a.1", 0, 1, "a.2")
persist.update("a.2", 1, 1, "a.3")
persist.update("a.1", 3, 0, "b.2")
persist.update("a.3", 3, 3, "a.4")
for _ in range(2500):
assert persist.query("a.1", 2) == 2
assert persist.query("a.4", 1) == 1
assert persist.query("a.4", 3) == 3
assert persist.query("b.2", 3) == 0
|
dc2a036569a42630cce431374858085679351fde
|
126884e6916e9d78201527419681c0369bc61e9a
|
/Python/Human_Detector/human_detector.py
|
7e981d0c76a64c1aaaaf5c08def8c4661db2ceb3
|
[
"MIT"
] |
permissive
|
HarshCasper/Rotten-Scripts
|
adb9d9b707958f0353f7f7dda44f406da123e64a
|
31fd3fb1233f39ea2252a7a44160ff8a2140f7bd
|
refs/heads/master
| 2023-08-27T20:10:27.180869
| 2023-05-07T19:14:31
| 2023-07-25T11:59:06
| 240,786,294
| 1,474
| 754
|
MIT
| 2023-07-25T11:59:08
| 2020-02-15T20:53:36
|
Python
|
UTF-8
|
Python
| false
| false
| 4,355
|
py
|
human_detector.py
|
import cv2
import imutils
import argparse
HOGCV = cv2.HOGDescriptor()
HOGCV.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
def detect(frame):
bounding_box_cordinates, weights = HOGCV.detectMultiScale(
frame, winStride=(4, 4), padding=(8, 8), scale=1.03
)
person = 1
for x, y, w, h in bounding_box_cordinates:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(
frame,
f"person {person}",
(x, y),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(0, 0, 255),
1,
)
person += 1
cv2.putText(
frame,
"Status : Detecting ",
(40, 40),
cv2.FONT_HERSHEY_DUPLEX,
0.8,
(255, 0, 0),
2,
)
cv2.putText(
frame,
f"Total Persons : {person - 1}",
(40, 70),
cv2.FONT_HERSHEY_DUPLEX,
0.8,
(255, 0, 0),
2,
)
cv2.imshow("output", frame)
return frame
def humanDetector(args):
image_path = args["image"]
video_path = args["video"]
if str(args["camera"]) == "true":
camera = True
else:
camera = False
writer = None
if args["output"] is not None and image_path is None:
writer = cv2.VideoWriter(
args["output"], cv2.VideoWriter_fourcc(*"MJPG"), 10, (600, 600)
)
if camera:
print("[INFO] Opening Web Cam.")
detectByCamera(writer)
elif video_path is not None:
print("[INFO] Opening Video from path.")
detectByPathVideo(video_path, writer)
elif image_path is not None:
print("[INFO] Opening Image from path.")
detectByPathImage(image_path, args["output"])
def detectByCamera(writer):
print("Here")
video = cv2.VideoCapture(0)
print("Detecting people...")
while True:
check, frame = video.read()
frame = detect(frame)
if writer is not None:
writer.write(frame)
key = cv2.waitKey(1)
if key == ord("q"):
break
video.release()
cv2.destroyAllWindows()
def detectByPathVideo(path, writer):
video = cv2.VideoCapture(path)
check, frame = video.read()
if check is False:
print(
"Video Not Found. Please Enter a Valid Path (Full path of Video Should be Provided)."
)
return
print("Detecting people...")
while video.isOpened():
# check is True if reading was successful
check, frame = video.read()
if check:
frame = imutils.resize(frame, width=min(800, frame.shape[1]))
frame = detect(frame)
if writer is not None:
writer.write(frame)
key = cv2.waitKey(1)
if key == ord("q"):
break
else:
break
video.release()
cv2.destroyAllWindows()
def detectByCamera(writer):
print("Not Here")
video = cv2.VideoCapture(0)
print("Detecting people...")
while True:
check, frame = video.read()
frame = detect(frame)
if writer is not None:
writer.write(frame)
key = cv2.waitKey(1)
if key == ord("q"):
break
video.release()
cv2.destroyAllWindows()
def detectByPathImage(path, output_path):
cv2.namedWindow("output", cv2.WINDOW_NORMAL)
image = cv2.imread(path)
image = imutils.resize(image, width=min(1800, image.shape[1]))
result_image = detect(image)
if output_path is not None:
cv2.imwrite(output_path, result_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def argsParser():
arg_parse = argparse.ArgumentParser()
arg_parse.add_argument("-v", "--video", default=None, help="path to Video File ")
arg_parse.add_argument("-i", "--image", default=None, help="path to Image File ")
arg_parse.add_argument(
"-c", "--camera", default=False, help="Set true if you want to use the camera."
)
arg_parse.add_argument(
"-o", "--output", type=str, help="path to optional output video file"
)
args = vars(arg_parse.parse_args())
return args
if __name__ == "__main__":
HOGCV = cv2.HOGDescriptor()
HOGCV.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
args = argsParser()
humanDetector(args)
|
169acd2fe8a4ef1990fd5356bbf221d4fa238da7
|
518bf342bc4138982af3e2724e75f1d9ca3ba56c
|
/solutions/2154. Keep Multiplying Found Values by Two/2155.py
|
e9c418a40e15cfc68b18de0fc5d70db4749e0b7a
|
[
"MIT"
] |
permissive
|
walkccc/LeetCode
|
dae85af7cc689882a84ee5011f0a13a19ad97f18
|
a27be41c174565d365cbfe785f0633f634a01b2a
|
refs/heads/main
| 2023-08-28T01:32:43.384999
| 2023-08-20T19:00:45
| 2023-08-20T19:00:45
| 172,231,974
| 692
| 302
|
MIT
| 2023-08-13T14:48:42
| 2019-02-23T15:46:23
|
C++
|
UTF-8
|
Python
| false
| false
| 178
|
py
|
2155.py
|
class Solution:
def findFinalValue(self, nums: List[int], original: int) -> int:
numsSet = set(nums)
while original in numsSet:
original *= 2
return original
|
46f763d2a8309c08b34b8d77f07f140c0ddd3d25
|
be240c2147e7e1456b934d40d3fe47115e443bc3
|
/meta_learning/meta_example.py
|
4b2daa9796c3ec9522c546f1c7a3233fa9b2d328
|
[
"Apache-2.0"
] |
permissive
|
google-research/tensor2robot
|
811c416663a505ec110759dca0c9b31c7eb86720
|
f93f378db3183cceaad1b96828b199cf40cad606
|
refs/heads/master
| 2023-09-01T07:31:43.167332
| 2023-07-31T17:19:19
| 2023-07-31T17:20:07
| 186,457,418
| 539
| 104
|
Apache-2.0
| 2023-07-12T09:25:12
| 2019-05-13T16:28:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,520
|
py
|
meta_example.py
|
# coding=utf-8
# Copyright 2023 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility function for dealing with meta-examples.
"""
from typing import List, Union
import six
import tensorflow.compat.v1 as tf # tf
Example = Union[tf.train.Example, tf.train.SequenceExample]
def make_meta_example(
condition_examples,
inference_examples,
):
"""Creates a single MetaExample from train_examples and val_examples."""
if isinstance(condition_examples[0], tf.train.Example):
meta_example = tf.train.Example()
append_fn = append_example
else:
meta_example = tf.train.SequenceExample()
append_fn = append_sequence_example
for i, train_example in enumerate(condition_examples):
append_fn(meta_example, train_example, 'condition_ep{:d}'.format(i))
for i, val_example in enumerate(inference_examples):
append_fn(meta_example, val_example, 'inference_ep{:d}'.format(i))
return meta_example
def append_example(example, ep_example, prefix):
"""Add episode Example to Meta TFExample with a prefix."""
context_feature_map = example.features.feature
for key, feature in six.iteritems(ep_example.features.feature):
context_feature_map[six.ensure_str(prefix) + '/' +
six.ensure_str(key)].CopyFrom(feature)
def append_sequence_example(meta_example, ep_example, prefix):
"""Add episode SequenceExample to the Meta SequenceExample with a prefix."""
context_feature_map = meta_example.context.feature
# Append context features.
for key, feature in six.iteritems(ep_example.context.feature):
context_feature_map[six.ensure_str(prefix) + '/' +
six.ensure_str(key)].CopyFrom(feature)
# Append Sequential features.
sequential_feature_map = meta_example.feature_lists.feature_list
for key, feature_list in six.iteritems(ep_example.feature_lists.feature_list):
sequential_feature_map[six.ensure_str(prefix) + '/' +
six.ensure_str(key)].CopyFrom(feature_list)
|
c9a1601113afd6508cfcd3e6e4bb2d65e0895c9f
|
967968e56ec17a2ee641af84cfca669c1d16a6f1
|
/tests/ad/directory/test_directory_schema.py
|
73a9c315af2c992c281465e0163cb00363c1d019
|
[
"MIT"
] |
permissive
|
tenable/pyTenable
|
72108c2564682e65cba181ded6ef6a9c990ef004
|
4e31049891f55016168b14ae30d332a965523640
|
refs/heads/master
| 2023-08-30T23:26:33.161062
| 2023-08-08T04:39:04
| 2023-08-08T04:39:04
| 114,689,090
| 300
| 211
|
MIT
| 2023-08-08T04:39:05
| 2017-12-18T21:23:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,308
|
py
|
test_directory_schema.py
|
'''test directory schema'''
import pytest
from marshmallow import INCLUDE, ValidationError
from tenable.ad.directories.schema import DirectorySchema
@pytest.fixture
def directory_schema():
return {'infrastructureId': 1,
'id': 1,
'name': 'test',
'ip': '172.32.68.1',
'dns': 'company.tld',
'type': 'type',
'ldapPort': 321,
'globalCatalogPort': 0,
'smbPort': 0
}
def test_directory_schema(directory_schema):
'''
tests the directory schema
'''
test_response = {'dns': 'company.tld',
'global_catalog_port': 3268,
'id': 13,
'infrastructure_id': 2,
'ip': '172.16.0.1',
'ldapInitialized': None,
'ldap_port': 389,
'name': 'dheeraj2',
'smb_port': 445,
'sysvolInitialized': None,
'type': 'type'
}
schema = DirectorySchema()
assert test_response['dns'] == \
schema.dump(schema.load(directory_schema))['dns']
with pytest.raises(ValidationError):
directory_schema['new_val'] = 'something'
schema.load(directory_schema)
|
328f3d26be985dae05587f625ddb3f4d56b477a5
|
017b1261bac4a6ed7e613474f328239188366491
|
/src/rpdk/core/contract/type_configuration.py
|
209b2574f8e1c9fed8c6ec63f7f9fbd87e3b04cd
|
[
"Apache-2.0"
] |
permissive
|
aws-cloudformation/cloudformation-cli
|
bd4834bfe8b39c9fc926f9c77710b2c6d1b167c1
|
75bed278bcec94739e4c132e2b3d88a4fddb5bf4
|
refs/heads/master
| 2023-08-07T18:24:56.153849
| 2023-07-31T22:54:23
| 2023-07-31T22:54:23
| 143,929,054
| 270
| 164
|
Apache-2.0
| 2023-08-31T16:06:04
| 2018-08-07T21:33:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,013
|
py
|
type_configuration.py
|
import json
import logging
import os
from rpdk.core.exceptions import InvalidProjectError
LOG = logging.getLogger(__name__)
class TypeConfiguration:
TYPE_CONFIGURATION = None
@staticmethod
def get_type_configuration(typeconfigloc):
if typeconfigloc:
type_config_file_path = typeconfigloc
else:
type_config_file_path = "~/.cfn-cli/typeConfiguration.json"
LOG.debug(
"Loading type configuration setting file at %s",
type_config_file_path,
)
if TypeConfiguration.TYPE_CONFIGURATION is None:
try:
with open(
os.path.expanduser(type_config_file_path), encoding="utf-8"
) as f:
TypeConfiguration.TYPE_CONFIGURATION = json.load(f)
except json.JSONDecodeError as json_decode_error:
LOG.debug(
"Type configuration file '%s' is invalid",
type_config_file_path,
)
raise InvalidProjectError(
"Type configuration file '%s' is invalid" % type_config_file_path
) from json_decode_error
except FileNotFoundError:
LOG.debug(
"Type configuration file '%s' not Found, do nothing",
type_config_file_path,
)
return TypeConfiguration.TYPE_CONFIGURATION
@staticmethod
def get_hook_configuration(typeconfigloc):
type_configuration = TypeConfiguration.get_type_configuration(typeconfigloc)
if type_configuration:
try:
return type_configuration.get("CloudFormationConfiguration", {})[
"HookConfiguration"
]["Properties"]
except KeyError as e:
LOG.warning("Hook configuration is invalid")
raise InvalidProjectError("Hook configuration is invalid") from e
return type_configuration
|
6557f38aefd0ceb4bf4a794a320b65546b9a11ee
|
ed865aed525556fd7aa5ac5a024af720de8438e3
|
/cli/src/pcluster/cli/commands/configure/subnet_computation.py
|
31c04e4831ffc6640ce67c8c18e26ea306df7b4c
|
[
"Python-2.0",
"GPL-1.0-or-later",
"MPL-2.0",
"MIT",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"MIT-0",
"BSD-2-Clause"
] |
permissive
|
aws/aws-parallelcluster
|
7bb33a6e175168f63a1e0acb1a9a7e9cbc405eff
|
a213978a09ea7fc80855bf55c539861ea95259f9
|
refs/heads/develop
| 2023-09-05T15:12:18.533270
| 2023-09-05T14:38:59
| 2023-09-05T14:38:59
| 19,718,034
| 520
| 226
|
Apache-2.0
| 2023-09-14T15:56:30
| 2014-05-12T22:42:19
|
Python
|
UTF-8
|
Python
| false
| false
| 5,959
|
py
|
subnet_computation.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
# the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
from ipaddress import ip_address, ip_network, summarize_address_range
def unicode(ip_addr):
return "{0}".format(ip_addr)
def get_subnet_cidr(vpc_cidr, occupied_cidr, min_subnet_size):
"""
Decide the parallelcluster subnet size of the compute fleet.
:param vpc_cidr: the vpc_cidr in which the suitable subnet should be
:param occupied_cidr: a list of cidr of the already occupied subnets in the vpc
:param min_subnet_size: the minimum size of the subnet
:return:
"""
default_target_size = 4000
target_size = max(default_target_size, 2 * min_subnet_size)
cidr = evaluate_cidr(vpc_cidr, occupied_cidr, target_size)
while cidr is None:
if target_size < min_subnet_size:
return None
target_size = target_size // 2
cidr = evaluate_cidr(vpc_cidr, occupied_cidr, target_size)
return cidr
def evaluate_cidr(vpc_cidr, occupied_cidrs, target_size):
"""
Decide the first smallest suitable CIDR for a subnet with size >= target_size.
In order to find a space in between all the subnets we have, we first start by making all the occupied subnets size
bigger or equal to the one we are targeting. In order to do that, if a subnet is smaller than target_size, we will
find the bigger one to which she is part of.
After that, we will sort the subnet by cidr and than look for space between the end of one subnet and the beginning
of the other, not forgetting to also look for space in the begin and end.
:param vpc_cidr: the vpc_cidr in which the suitable subnet should be
:param occupied_cidrs: a list of cidr of the already occupied subnets in the vpc
:param target_size: the minimum target size of the subnet
:return: the suitable CIDR if found, else None
"""
subnet_size, subnet_bitmask = _evaluate_subnet_size(target_size)
vpc_begin_address_decimal, vpc_end_address_decimal = _get_cidr_limits_as_decimal(vpc_cidr)
# if we do not have enough space
if vpc_end_address_decimal - vpc_begin_address_decimal + 1 < subnet_size:
return None
# if we have space and no occupied cidr
if not occupied_cidrs:
return _decimal_ip_limits_to_cidr(vpc_begin_address_decimal, vpc_begin_address_decimal + subnet_size)
lower_limit_index = 0
upper_limit_index = 1
# Get subnets limits
occupied_cidrs = _align_subnet_cidrs(occupied_cidrs, subnet_bitmask)
subnets_limits = [_get_cidr_limits_as_decimal(subnet) for subnet in occupied_cidrs]
subnets_limits.sort(key=lambda x: x[upper_limit_index])
# Looking at space between occupied cidrs
resulting_cidr = None
subnets_limits.append((vpc_end_address_decimal, vpc_end_address_decimal))
for index, subnet_limit in enumerate(subnets_limits):
current_lower_limit = subnet_limit[lower_limit_index]
# In the first case, vpc_begin_address is free, whereas upper_limit_index is not
previous_upper_limit = (
subnets_limits[index - 1][upper_limit_index] if index > 0 else vpc_begin_address_decimal - 1
)
if current_lower_limit - previous_upper_limit > subnet_size:
resulting_cidr = _decimal_ip_limits_to_cidr(previous_upper_limit + 1, previous_upper_limit + subnet_size)
break
return resulting_cidr
def _align_subnet_cidrs(occupied_cidr, target_bitmask):
"""Transform the subnet cidr that are smaller than the minimum bitmask to bigger ones."""
correct_cidrs = set()
for subnet_cidr in occupied_cidr:
if _get_bitmask(subnet_cidr) > target_bitmask:
correct_cidrs.add(expand_cidr(subnet_cidr, target_bitmask))
else:
correct_cidrs.add(subnet_cidr)
return list(correct_cidrs)
def _get_bitmask(cidr):
return int(cidr.split("/")[1])
def _evaluate_subnet_size(target_size):
aws_reserved_ip = 6
min_bitmask = 28
subnet_bitmask = min(32 - ((next_power_of_2(target_size + aws_reserved_ip) - 1).bit_length()), min_bitmask)
subnet_size = 2 ** (32 - subnet_bitmask)
return subnet_size, subnet_bitmask
def _decimal_ip_limits_to_cidr(begin, end):
"""Given begin and end ip (as decimals number), return the CIDR that begins with begin ip and ends with end ip."""
return str(next(summarize_address_range(ip_address(begin), ip_address(end))))
def _get_cidr_limits_as_decimal(cidr):
"""
Given a cidr, return the begin ip and the end ip as decimal.
For example, given the cidr 10.0.0.0/24, it will return 167772160, which is 10.0.0.0 and 167772416,
which is 10.0.1.0
:param: cidr the cidr to convert
:return: a tuple (decimal begin address, decimal end address)
"""
address = ip_network(unicode(cidr))
return _ip_to_decimal(str(address[0])), _ip_to_decimal(str(address[-1]))
def _ip_to_decimal(ip_addr):
"""Transform an ip into its decimal representation."""
return int(ip_address(unicode(ip_addr)))
def expand_cidr(cidr, new_size):
"""
Given a cidr, it upgrade is netmask to new_size.
:param cidr: the list of cidr to promote
:param new_size: the minimum bitmask required
"""
ip_addr = ip_network(unicode(cidr))
return str(ip_addr.supernet(new_prefix=new_size))
def next_power_of_2(number):
"""Given a number returns the following power of 2 of that number."""
return 1 if number == 0 else 2 ** (number - 1).bit_length()
|
f9d1781f4ec4f0a7fd3e6d613f9256cc1e787695
|
f9e7d65cb784c01a0200145ba8d289afe41d4a56
|
/extra/tigertool/ecusb/stm32usb.py
|
875251329f362d814fb9fbcb679221c5e073f984
|
[
"BSD-3-Clause"
] |
permissive
|
FrameworkComputer/EmbeddedController
|
ad7086769e87d0a4179eae96a7c9ff5e383ff54e
|
f6d6b927eed71550d3475411cfc3e59abe5cef2a
|
refs/heads/hx20-hx30
| 2023-08-08T20:45:10.621169
| 2023-05-26T07:03:59
| 2023-05-26T07:03:59
| 447,021,040
| 846
| 48
|
BSD-3-Clause
| 2023-05-26T07:04:59
| 2022-01-12T00:11:14
|
C
|
UTF-8
|
Python
| false
| false
| 3,545
|
py
|
stm32usb.py
|
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Allows creation of an interface via stm32 usb."""
import usb
class SusbError(Exception):
"""Class for exceptions of Susb."""
def __init__(self, msg, value=0):
"""SusbError constructor.
Args:
msg: string, message describing error in detail
value: integer, value of error when non-zero status returned. Default=0
"""
super(SusbError, self).__init__(msg, value)
self.msg = msg
self.value = value
class Susb(object):
"""Provide stm32 USB functionality.
Instance Variables:
_read_ep: pyUSB read endpoint for this interface
_write_ep: pyUSB write endpoint for this interface
"""
READ_ENDPOINT = 0x81
WRITE_ENDPOINT = 0x1
TIMEOUT_MS = 100
def __init__(self, vendor=0x18d1,
product=0x5027, interface=1, serialname=None, logger=None):
"""Susb constructor.
Discovers and connects to stm32 USB endpoints.
Args:
vendor: usb vendor id of stm32 device.
product: usb product id of stm32 device.
interface: interface number ( 1 - 4 ) of stm32 device to use.
serialname: string of device serialname.
logger: none
Raises:
SusbError: An error accessing Susb object
"""
self._vendor = vendor
self._product = product
self._interface = interface
self._serialname = serialname
self._find_device()
def _find_device(self):
"""Set up the usb endpoint"""
# Find the stm32.
dev_g = usb.core.find(idVendor=self._vendor, idProduct=self._product,
find_all=True)
dev_list = list(dev_g)
if not dev_list:
raise SusbError('USB device not found')
# Check if we have multiple stm32s and we've specified the serial.
dev = None
if self._serialname:
for d in dev_list:
dev_serial = usb.util.get_string(d, d.iSerialNumber)
if dev_serial == self._serialname:
dev = d
break
if dev is None:
raise SusbError('USB device(%s) not found' % self._serialname)
else:
try:
dev = dev_list[0]
except StopIteration:
raise SusbError('USB device %04x:%04x not found' % (
self._vendor, self._product))
# If we can't set configuration, it's already been set.
try:
dev.set_configuration()
except usb.core.USBError:
pass
self._dev = dev
# Get an endpoint instance.
cfg = dev.get_active_configuration()
intf = usb.util.find_descriptor(cfg, bInterfaceNumber=self._interface)
self._intf = intf
if not intf:
raise SusbError('Interface %04x:%04x - 0x%x not found' % (
self._vendor, self._product, self._interface))
# Detach raiden.ko if it is loaded. CCD endpoints support either a kernel
# module driver that produces a ttyUSB, or direct endpoint access, but
# can't do both at the same time.
if dev.is_kernel_driver_active(intf.bInterfaceNumber) is True:
dev.detach_kernel_driver(intf.bInterfaceNumber)
read_ep_number = intf.bInterfaceNumber + self.READ_ENDPOINT
read_ep = usb.util.find_descriptor(intf, bEndpointAddress=read_ep_number)
self._read_ep = read_ep
write_ep_number = intf.bInterfaceNumber + self.WRITE_ENDPOINT
write_ep = usb.util.find_descriptor(intf, bEndpointAddress=write_ep_number)
self._write_ep = write_ep
def close(self):
usb.util.dispose_resources(self._dev)
|
3c5befea3657e8a159ab2f0288fae6e2e677527f
|
6416b746ee71d897789eab1e450000831674dbd0
|
/tests/unit/hpo/test_resource_manager.py
|
46beb4ac0662e98fb82a40c05ac2e6956bcd568e
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/training_extensions
|
c921f83ad52311af96ff45ae0b88d0aecddd855b
|
80454808b38727e358e8b880043eeac0f18152fb
|
refs/heads/develop
| 2023-08-31T06:29:07.229339
| 2023-08-31T01:57:26
| 2023-08-31T01:57:26
| 154,843,614
| 397
| 230
|
Apache-2.0
| 2023-09-14T06:17:01
| 2018-10-26T14:02:29
|
Python
|
UTF-8
|
Python
| false
| false
| 6,268
|
py
|
test_resource_manager.py
|
import pytest
from otx.hpo.resource_manager import (
CPUResourceManager,
GPUResourceManager,
_remove_none_from_dict,
get_resource_manager,
)
from tests.test_suite.e2e_test_system import e2e_pytest_component
@pytest.fixture
def cpu_resource_manager():
return CPUResourceManager(num_parallel_trial=4)
@pytest.fixture
def gpu_resource_manager():
return GPUResourceManager(num_gpu_for_single_trial=1, available_gpu="0,1,2,3")
class TestCPUResourceManager:
@e2e_pytest_component
@pytest.mark.parametrize("num_parallel_trial", [1, 5, 10])
def test_init(self, num_parallel_trial):
CPUResourceManager(num_parallel_trial)
@e2e_pytest_component
@pytest.mark.parametrize("num_parallel_trial", [-1, 0])
def test_init_with_not_positive_num_parallel_trial(self, num_parallel_trial):
with pytest.raises(ValueError):
CPUResourceManager(num_parallel_trial)
@e2e_pytest_component
def test_reserve_resource(self, cpu_resource_manager):
num_parallel_trial = cpu_resource_manager._num_parallel_trial
for i in range(num_parallel_trial):
assert cpu_resource_manager.reserve_resource(i) == {}
for i in range(10):
assert cpu_resource_manager.reserve_resource(i) is None
@e2e_pytest_component
def test_reserve_resource_reserved_already(self, cpu_resource_manager):
cpu_resource_manager.reserve_resource(0)
with pytest.raises(RuntimeError):
cpu_resource_manager.reserve_resource(0)
@e2e_pytest_component
def test_release_resource(self, cpu_resource_manager):
cpu_resource_manager.reserve_resource(1)
cpu_resource_manager.release_resource(1)
@e2e_pytest_component
def test_release_unreserved_resource(self, cpu_resource_manager):
cpu_resource_manager.release_resource(1)
@e2e_pytest_component
def test_have_available_resource(self, cpu_resource_manager):
num_parallel_trial = cpu_resource_manager._num_parallel_trial
for i in range(num_parallel_trial):
assert cpu_resource_manager.have_available_resource()
cpu_resource_manager.reserve_resource(i)
assert not cpu_resource_manager.have_available_resource()
class TestGPUResourceManager:
@e2e_pytest_component
def test_init(self):
GPUResourceManager(num_gpu_for_single_trial=1, available_gpu="0,1,2")
@e2e_pytest_component
@pytest.mark.parametrize("num_gpu_for_single_trial", [-1, 0])
def test_init_not_positive_num_gpu(self, num_gpu_for_single_trial):
with pytest.raises(ValueError):
GPUResourceManager(num_gpu_for_single_trial=num_gpu_for_single_trial)
@e2e_pytest_component
@pytest.mark.parametrize("available_gpu", [",", "a,b", "0,a", ""])
def test_init_wrong_available_gpu_value(self, available_gpu):
with pytest.raises(ValueError):
GPUResourceManager(available_gpu=available_gpu)
@e2e_pytest_component
def test_reserve_resource(self):
num_gpu_for_single_trial = 2
num_gpus = 8
max_parallel = num_gpus // num_gpu_for_single_trial
gpu_resource_manager = GPUResourceManager(
num_gpu_for_single_trial=num_gpu_for_single_trial,
available_gpu=",".join([str(val) for val in range(num_gpus)]),
)
num_gpus = len(gpu_resource_manager._available_gpu)
for i in range(max_parallel):
env = gpu_resource_manager.reserve_resource(i)
assert env is not None
assert "CUDA_VISIBLE_DEVICES" in env
assert len(env["CUDA_VISIBLE_DEVICES"].split(",")) == num_gpu_for_single_trial
for i in range(max_parallel, max_parallel + 10):
assert gpu_resource_manager.reserve_resource(i) is None
@e2e_pytest_component
def test_reserve_resource_reserved_already(self, gpu_resource_manager):
gpu_resource_manager.reserve_resource(0)
with pytest.raises(RuntimeError):
gpu_resource_manager.reserve_resource(0)
@e2e_pytest_component
def test_release_resource(self, gpu_resource_manager):
gpu_resource_manager.reserve_resource(1)
gpu_resource_manager.release_resource(1)
@e2e_pytest_component
def test_release_unreserved_resource(self, gpu_resource_manager):
gpu_resource_manager.release_resource(1)
@e2e_pytest_component
def test_have_available_resource(self):
num_gpu_for_single_trial = 2
num_gpus = 8
max_parallel = num_gpus // num_gpu_for_single_trial
gpu_resource_manager = GPUResourceManager(
num_gpu_for_single_trial=num_gpu_for_single_trial,
available_gpu=",".join([str(val) for val in range(num_gpus)]),
)
num_gpus = len(gpu_resource_manager._available_gpu)
for i in range(max_parallel):
assert gpu_resource_manager.have_available_resource()
gpu_resource_manager.reserve_resource(i)
for i in range(max_parallel, max_parallel + 10):
assert not gpu_resource_manager.have_available_resource()
@e2e_pytest_component
def test_get_resource_manager_cpu():
manager = get_resource_manager(resource_type="cpu", num_parallel_trial=4)
assert isinstance(manager, CPUResourceManager)
@e2e_pytest_component
def test_get_resource_manager_gpu():
num_gpu_for_single_trial = 1
available_gpu = "0,1,2,3"
manager = get_resource_manager(
resource_type="gpu", num_gpu_for_single_trial=num_gpu_for_single_trial, available_gpu=available_gpu
)
assert isinstance(manager, GPUResourceManager)
@e2e_pytest_component
def test_get_resource_manager_wrong_resource_type():
with pytest.raises(ValueError):
get_resource_manager("wrong")
@e2e_pytest_component
def test_get_resource_manager_gpu_without_available_gpu(mocker):
mock_is_available = mocker.patch("otx.hpo.resource_manager.torch.cuda.is_available")
mock_is_available.return_value = False
manager = get_resource_manager("gpu")
assert isinstance(manager, CPUResourceManager)
@e2e_pytest_component
def test_remove_none_from_dict():
some_dict = {"a": 1, "b": None}
ret = _remove_none_from_dict(some_dict)
assert ret == {"a": 1}
|
c8085d6e9a589badcfd443f96b8c6f781faee7f6
|
279f415dd1e06c594c6c87deda57e201c73c4542
|
/test/test_transformer_decode.py
|
5c92c019095b0644c99579eecb005634e5cfc862
|
[
"Apache-2.0"
] |
permissive
|
espnet/espnet
|
f7ba47271c1a6b1ed606dbbfb04a7f14220bb585
|
bcd20948db7846ee523443ef9fd78c7a1248c95e
|
refs/heads/master
| 2023-08-28T23:43:34.238336
| 2023-08-23T02:51:39
| 2023-08-23T02:51:39
| 114,054,873
| 7,242
| 2,244
|
Apache-2.0
| 2023-09-14T08:01:11
| 2017-12-13T00:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 4,486
|
py
|
test_transformer_decode.py
|
import numpy
import pytest
import torch
from espnet.nets.pytorch_backend.transformer.decoder import Decoder
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
RTOL = 1e-4
@pytest.mark.parametrize("normalize_before", [True, False])
def test_decoder_cache(normalize_before):
adim = 4
odim = 5
decoder = Decoder(
odim=odim,
attention_dim=adim,
linear_units=3,
num_blocks=2,
normalize_before=normalize_before,
dropout_rate=0.0,
)
dlayer = decoder.decoders[0]
memory = torch.randn(2, 5, adim)
x = torch.randn(2, 5, adim) * 100
mask = subsequent_mask(x.shape[1]).unsqueeze(0)
prev_mask = mask[:, :-1, :-1]
decoder.eval()
with torch.no_grad():
# layer-level test
y = dlayer(x, mask, memory, None)[0]
cache = dlayer(x[:, :-1], prev_mask, memory, None)[0]
y_fast = dlayer(x, mask, memory, None, cache=cache)[0]
numpy.testing.assert_allclose(y.numpy(), y_fast.numpy(), rtol=RTOL)
# decoder-level test
x = torch.randint(0, odim, x.shape[:2])
y, _ = decoder.forward_one_step(x, mask, memory)
y_, cache = decoder.forward_one_step(
x[:, :-1], prev_mask, memory, cache=decoder.init_state(None)
)
y_fast, _ = decoder.forward_one_step(x, mask, memory, cache=cache)
numpy.testing.assert_allclose(y.numpy(), y_fast.numpy(), rtol=RTOL)
@pytest.mark.parametrize("normalize_before", [True, False])
def test_encoder_cache(normalize_before):
adim = 4
idim = 5
encoder = Encoder(
idim=idim,
attention_dim=adim,
linear_units=3,
num_blocks=2,
normalize_before=normalize_before,
dropout_rate=0.0,
input_layer="embed",
)
elayer = encoder.encoders[0]
x = torch.randn(2, 5, adim)
mask = subsequent_mask(x.shape[1]).unsqueeze(0)
prev_mask = mask[:, :-1, :-1]
encoder.eval()
with torch.no_grad():
# layer-level test
y = elayer(x, mask, None)[0]
cache = elayer(x[:, :-1], prev_mask, None)[0]
y_fast = elayer(x, mask, cache=cache)[0]
numpy.testing.assert_allclose(y.numpy(), y_fast.numpy(), rtol=RTOL)
# encoder-level test
x = torch.randint(0, idim, x.shape[:2])
y = encoder.forward_one_step(x, mask)[0]
y_, _, cache = encoder.forward_one_step(x[:, :-1], prev_mask)
y_fast, _, _ = encoder.forward_one_step(x, mask, cache=cache)
numpy.testing.assert_allclose(y.numpy(), y_fast.numpy(), rtol=RTOL)
if __name__ == "__main__":
# benchmark with synth dataset
from time import time
import matplotlib.pyplot as plt
adim = 4
odim = 5
model = "decoder"
if model == "decoder":
decoder = Decoder(
odim=odim,
attention_dim=adim,
linear_units=3,
num_blocks=2,
dropout_rate=0.0,
)
decoder.eval()
else:
encoder = Encoder(
idim=odim,
attention_dim=adim,
linear_units=3,
num_blocks=2,
dropout_rate=0.0,
input_layer="embed",
)
encoder.eval()
xlen = 100
xs = torch.randint(0, odim, (1, xlen))
memory = torch.randn(2, 500, adim)
mask = subsequent_mask(xlen).unsqueeze(0)
result = {"cached": [], "baseline": []}
n_avg = 10
for key, value in result.items():
cache = None
print(key)
for i in range(xlen):
x = xs[:, : i + 1]
m = mask[:, : i + 1, : i + 1]
start = time()
for _ in range(n_avg):
with torch.no_grad():
if key == "baseline":
cache = None
if model == "decoder":
y, new_cache = decoder.forward_one_step(
x, m, memory, cache=cache
)
else:
y, _, new_cache = encoder.forward_one_step(x, m, cache=cache)
if key == "cached":
cache = new_cache
dur = (time() - start) / n_avg
value.append(dur)
plt.plot(range(xlen), value, label=key)
plt.xlabel("hypothesis length")
plt.ylabel("average time [sec]")
plt.grid()
plt.legend()
plt.savefig(f"benchmark_{model}.png")
|
1887c43e839ae986c6d56e55ef873ff235c56b23
|
12f0bd77926127cdacc2452d6f9cfed91806b2fe
|
/idaes/models/properties/modular_properties/examples/HC_PR.py
|
7826a12f8b896e028092da65054094d471762882
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
IDAES/idaes-pse
|
e03d2583ae1ba968a7099f9f439fd8c3efa12904
|
deacf4c422bc9e50cb347e11a8cbfa0195bd4274
|
refs/heads/main
| 2023-08-16T19:13:00.355572
| 2023-08-04T04:19:29
| 2023-08-04T04:19:29
| 168,622,088
| 173
| 227
|
NOASSERTION
| 2023-09-11T16:04:55
| 2019-02-01T01:12:51
|
Python
|
UTF-8
|
Python
| false
| false
| 41,092
|
py
|
HC_PR.py
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES).
#
# Copyright (c) 2018-2023 by the software owners: The Regents of the
# University of California, through Lawrence Berkeley National Laboratory,
# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon
# University, West Virginia University Research Corporation, et al.
# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md
# for full copyright and license information.
#################################################################################
"""
Hydrocarbon processing phase equilibrium package using Peng-Robinson EoS.
Example property package using the Generic Property Package Framework.
This example shows how to set up a property package to do hydrocarbon
processing phase equilibrium in the generic framework using Peng-Robinson
equation along with methods drawn from the pre-built IDAES property libraries.
The example includes the dictionary named configuration contains parameters
for calculating VLE phase equilibrium and properties for hydrocarbon processing.
"""
# Import Python libraries
import logging
# Import Pyomo units
from pyomo.environ import units as pyunits
# Import IDAES cores
from idaes.core import LiquidPhase, VaporPhase, Component, PhaseType as PT
from idaes.models.properties.modular_properties.state_definitions import FTPx
from idaes.models.properties.modular_properties.eos.ceos import Cubic, CubicType
from idaes.models.properties.modular_properties.phase_equil import SmoothVLE
from idaes.models.properties.modular_properties.phase_equil.bubble_dew import (
LogBubbleDew,
)
from idaes.models.properties.modular_properties.phase_equil.forms import log_fugacity
from idaes.models.properties.modular_properties.pure import Perrys
from idaes.models.properties.modular_properties.pure import RPP4
from idaes.models.properties.modular_properties.pure import RPP5
# Set up logger
_log = logging.getLogger(__name__)
# ---------------------------------------------------------------------
# Configuration dictionary for a Prng Robinson alkene system
# Data Sources:
# [1] The Properties of Gases and Liquids (1987)
# 4th edition, Chemical Engineering Series - Robert C. Reid
# [2] Perry's Chemical Engineers' Handbook 7th Ed.
# Converted to J/mol.K, mol/m^3
# [3] Engineering Toolbox, https://www.engineeringtoolbox.com
# Retrieved 15th september, 2020
# [4] The Properties of Gases and Liquids (2001)
# 5th edition, Chemical Engineering Series - Robert C. Reid
configuration = {
# Specifying components
"components": {
"hydrogen": {
"type": Component,
"elemental_composition": {"H": 2, "C": 0},
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"valid_phase_types": PT.vaporPhase,
"parameter_data": {
"mw": (2.016e-3, pyunits.kg / pyunits.mol), # [1]
"pressure_crit": (12.9e5, pyunits.Pa), # [1]
"temperature_crit": (33.2, pyunits.K), # [1]
"omega": -0.218,
"cp_mol_ig_comp_coeff": {
"A": (2.714e1, pyunits.J / pyunits.mol / pyunits.K), # [1]
"B": (9.274e-3, pyunits.J / pyunits.mol / pyunits.K**2),
"C": (-1.981e-5, pyunits.J / pyunits.mol / pyunits.K**3),
"D": (7.645e-9, pyunits.J / pyunits.mol / pyunits.K**4),
},
"entr_mol_form_vap_comp_ref": (0, pyunits.J / pyunits.mol / pyunits.K),
"enth_mol_form_vap_comp_ref": (0.0, pyunits.J / pyunits.mol),
},
},
"methane": {
"type": Component,
"elemental_composition": {"H": 4, "C": 1},
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"valid_phase_types": PT.vaporPhase,
"parameter_data": {
"mw": (16.043e-3, pyunits.kg / pyunits.mol), # [1]
"pressure_crit": (46e5, pyunits.Pa), # [1]
"temperature_crit": (190.4, pyunits.K), # [1]
"omega": 0.011,
"cp_mol_ig_comp_coeff": {
"A": (1.925e1, pyunits.J / pyunits.mol / pyunits.K),
"B": (5.213e-2, pyunits.J / pyunits.mol / pyunits.K**2),
"C": (1.197e-5, pyunits.J / pyunits.mol / pyunits.K**3),
"D": (-1.132e-8, pyunits.J / pyunits.mol / pyunits.K**4),
},
"entr_mol_form_vap_comp_ref": (0, pyunits.J / pyunits.mol / pyunits.K),
"enth_mol_form_vap_comp_ref": (-74600, pyunits.J / pyunits.mol),
},
},
"ethane": {
"type": Component,
"elemental_composition": {"H": 6, "C": 2},
"dens_mol_liq_comp": Perrys,
"enth_mol_liq_comp": Perrys,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP5,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (30.070e-3, pyunits.kg / pyunits.mol), # [1]
"pressure_crit": (48.8e5, pyunits.Pa), # [1]
"temperature_crit": (305.4, pyunits.K), # [1]
"omega": 0.099,
"dens_mol_liq_comp_coeff": {
"eqn_type": 1,
"1": (1.9122, pyunits.kmol * pyunits.m**-3), # [2] pg. 2-98
"2": (0.27937, None),
"3": (305.32, pyunits.K),
"4": (0.29187, None),
},
"cp_mol_ig_comp_coeff": {
"A": (5.409e0, pyunits.J / pyunits.mol / pyunits.K),
"B": (1.781e-1, pyunits.J / pyunits.mol / pyunits.K**2),
"C": (-6.938e-5, pyunits.J / pyunits.mol / pyunits.K**3),
"D": (8.713e-8, pyunits.J / pyunits.mol / pyunits.K**4),
},
"cp_mol_liq_comp_coeff": {
"1": (
90.9,
pyunits.J * pyunits.kmol**-1 * pyunits.K**-1,
), # [2]
"2": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-2),
"3": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-3),
"4": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-4),
"5": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-5),
},
"enth_mol_form_liq_comp_ref": (0, pyunits.J / pyunits.mol), # [3]
"entr_mol_form_vap_comp_ref": (0, pyunits.J / pyunits.mol / pyunits.K),
"enth_mol_form_vap_comp_ref": (-83800, pyunits.J / pyunits.mol), # [3]
"pressure_sat_comp_coeff": {
"A": (3.95405, None), # [4]
"B": (663.720, pyunits.K),
"C": (256.681, pyunits.K),
},
},
},
"propane": {
"type": Component,
"elemental_composition": {"H": 8, "C": 3},
"dens_mol_liq_comp": Perrys,
"enth_mol_liq_comp": Perrys,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP5,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (44.094e-3, pyunits.kg / pyunits.mol), # [1]
"pressure_crit": (42.5e5, pyunits.Pa), # [1]
"temperature_crit": (369.8, pyunits.K), # [1]
"omega": 0.144,
"dens_mol_liq_comp_coeff": {
"eqn_type": 1,
"1": (1.3757, pyunits.kmol * pyunits.m**-3), # [2] pg. 2-98
"2": (0.274253, None),
"3": (369.83, pyunits.K),
"4": (0.29147, None),
},
"cp_mol_ig_comp_coeff": {
"A": (-4.224e0, pyunits.J / pyunits.mol / pyunits.K),
"B": (3.063e-1, pyunits.J / pyunits.mol / pyunits.K**2),
"C": (-1.586e-4, pyunits.J / pyunits.mol / pyunits.K**3),
"D": (3.215e-8, pyunits.J / pyunits.mol / pyunits.K**4),
},
"cp_mol_liq_comp_coeff": {
"1": (
112.71,
pyunits.J * pyunits.kmol**-1 * pyunits.K**-1,
), # [2]
"2": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-2),
"3": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-3),
"4": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-4),
"5": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-5),
},
"enth_mol_form_liq_comp_ref": (0.0, pyunits.J / pyunits.mol), # [3]
"entr_mol_form_vap_comp_ref": (0, pyunits.J / pyunits.mol / pyunits.K),
"enth_mol_form_vap_comp_ref": (-104700, pyunits.J / pyunits.mol), # [3]
"pressure_sat_comp_coeff": {
"A": (3.92828, None), # [4]
"B": (803.9970, pyunits.K),
"C": (247.040, pyunits.K),
},
},
},
"nbutane": {
"type": Component,
"elemental_composition": {"H": 10, "C": 4},
"dens_mol_liq_comp": Perrys,
"enth_mol_liq_comp": Perrys,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP5,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (58.124e-3, pyunits.kg / pyunits.mol), # [1]
"pressure_crit": (38.0e5, pyunits.Pa), # [1]
"temperature_crit": (425.2, pyunits.K), # [1]
"omega": 0.199,
"dens_mol_liq_comp_coeff": {
"eqn_type": 1,
"1": (1.0677, pyunits.kmol * pyunits.m**-3), # [2] pg. 2-98
"2": (0.27188, None),
"3": (425.12, pyunits.K),
"4": (0.28688, None),
},
"cp_mol_ig_comp_coeff": {
"A": (9.487e0, pyunits.J / pyunits.mol / pyunits.K),
"B": (3.313e-1, pyunits.J / pyunits.mol / pyunits.K**2),
"C": (-1.108e-4, pyunits.J / pyunits.mol / pyunits.K**3),
"D": (-2.822e-9, pyunits.J / pyunits.mol / pyunits.K**4),
},
"cp_mol_liq_comp_coeff": {
"1": (
132.42,
pyunits.J * pyunits.kmol**-1 * pyunits.K**-1,
), # [2]
"2": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-2),
"3": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-3),
"4": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-4),
"5": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-5),
},
"enth_mol_form_liq_comp_ref": (0, pyunits.J / pyunits.mol), # [3]
"entr_mol_form_vap_comp_ref": (0, pyunits.J / pyunits.mol / pyunits.K),
"enth_mol_form_vap_comp_ref": (-125600, pyunits.J / pyunits.mol), # [3]
"pressure_sat_comp_coeff": {
"A": (3.93266, None), # [4]
"B": (935.7730, pyunits.K),
"C": (238.789, pyunits.K),
},
},
},
"ibutane": {
"type": Component,
"elemental_composition": {"H": 10, "C": 4},
"dens_mol_liq_comp": Perrys,
"enth_mol_liq_comp": Perrys,
"entr_mol_liq_comp": Perrys,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP5,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (58.124e-3, pyunits.kg / pyunits.mol), # [1]
"pressure_crit": (36.5e5, pyunits.Pa), # [1]
"temperature_crit": (408.2, pyunits.K), # [1]
"omega": 0.183,
"dens_mol_liq_comp_coeff": {
"eqn_type": 1,
"1": (1.0463, pyunits.kmol * pyunits.m**-3), # [2] pg. 2-98
"2": (0.27294, None),
"3": (408.14, pyunits.K),
"4": (0.27301, None),
},
"cp_mol_ig_comp_coeff": {
"A": (-1.390e0, pyunits.J / pyunits.mol / pyunits.K),
"B": (3.847e-1, pyunits.J / pyunits.mol / pyunits.K**2),
"C": (-1.846e-4, pyunits.J / pyunits.mol / pyunits.K**3),
"D": (2.895e-8, pyunits.J / pyunits.mol / pyunits.K**4),
},
"cp_mol_liq_comp_coeff": {
"1": (
1.7237e5,
pyunits.J * pyunits.kmol**-1 * pyunits.K**-1,
), # [2]
"2": (-1.7839e3, pyunits.J * pyunits.kmol**-1 * pyunits.K**-2),
"3": (1.4759e1, pyunits.J * pyunits.kmol**-1 * pyunits.K**-3),
"4": (-4.7909e-2, pyunits.J * pyunits.kmol**-1 * pyunits.K**-4),
"5": (5.8050e-5, pyunits.J * pyunits.kmol**-1 * pyunits.K**-5),
},
"enth_mol_form_liq_comp_ref": (0, pyunits.J / pyunits.mol), # [3]
"entr_mol_form_liq_comp_ref": (0, pyunits.J / pyunits.mol / pyunits.K),
"enth_mol_form_vap_comp_ref": (-135600, pyunits.J / pyunits.mol), # [3]
"entr_mol_form_vap_comp_ref": (
310.1,
pyunits.J / pyunits.mol / pyunits.K,
), # [3]
"pressure_sat_comp_coeff": {
"A": (4.00272, None), # [4]
"B": (947.5400, pyunits.K),
"C": (248.870, pyunits.K),
},
},
},
"ethylene": {
"type": Component,
"elemental_composition": {"H": 4, "C": 2},
"dens_mol_liq_comp": Perrys,
"enth_mol_liq_comp": Perrys,
"entr_mol_liq_comp": Perrys,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP5,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (28.054e-3, pyunits.kg / pyunits.mol), # [1]
"pressure_crit": (50.5e5, pyunits.Pa), # [1]
"temperature_crit": (282.4, pyunits.K), # [1]
"omega": 0.089,
"dens_mol_liq_comp_coeff": {
"eqn_type": 1,
"1": (2.0961, pyunits.kmol * pyunits.m**-3), # [2] pg. 2-98
"2": (0.27657, None),
"3": (282.34, pyunits.K),
"4": (0.29147, None),
},
"cp_mol_ig_comp_coeff": {
"A": (3.806e0, pyunits.J / pyunits.mol / pyunits.K),
"B": (1.566e-1, pyunits.J / pyunits.mol / pyunits.K**2),
"C": (-8.348e-5, pyunits.J / pyunits.mol / pyunits.K**3),
"D": (1.755e-8, pyunits.J / pyunits.mol / pyunits.K**4),
},
"cp_mol_liq_comp_coeff": {
"1": (
2.4739e5,
pyunits.J * pyunits.kmol**-1 * pyunits.K**-1,
), # [2]
"2": (-4.4280e3, pyunits.J * pyunits.kmol**-1 * pyunits.K**-2),
"3": (4.0936e1, pyunits.J * pyunits.kmol**-1 * pyunits.K**-3),
"4": (-1.6970e-1, pyunits.J * pyunits.kmol**-1 * pyunits.K**-4),
"5": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-5),
},
"enth_mol_form_liq_comp_ref": (0, pyunits.J / pyunits.mol), # [3]
"entr_mol_form_liq_comp_ref": (0, pyunits.J / pyunits.mol / pyunits.K),
"enth_mol_form_vap_comp_ref": (
52283.264,
pyunits.J / pyunits.mol,
), # [3]
"entr_mol_form_vap_comp_ref": (
219.5,
pyunits.J / pyunits.mol / pyunits.K,
), # [3]
"pressure_sat_comp_coeff": {
"A": (3.67374, None), # [4]
"B": (528.6700, pyunits.K),
"C": (228.790, pyunits.K),
},
},
},
"propene": {
"type": Component,
"elemental_composition": {"H": 6, "C": 3},
"dens_mol_liq_comp": Perrys,
"enth_mol_liq_comp": Perrys,
"entr_mol_liq_comp": Perrys,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP5,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (42.081e-3, pyunits.kg / pyunits.mol), # [1]
"pressure_crit": (46.2e5, pyunits.Pa), # [1]
"temperature_crit": (365.0, pyunits.K), # [1]
"omega": 0.144,
"dens_mol_liq_comp_coeff": {
"eqn_type": 1,
"1": (1.4094, pyunits.kmol * pyunits.m**-3), # [2] pg. 2-98
"2": (0.26465, None),
"3": (365.57, pyunits.K),
"4": (0.295, None),
},
"cp_mol_ig_comp_coeff": {
"A": (3.710e0, pyunits.J / pyunits.mol / pyunits.K),
"B": (2.345e-1, pyunits.J / pyunits.mol / pyunits.K**2),
"C": (-1.160e-4, pyunits.J / pyunits.mol / pyunits.K**3),
"D": (2.205e-8, pyunits.J / pyunits.mol / pyunits.K**4),
},
"cp_mol_liq_comp_coeff": {
"1": (
7.1720e5,
pyunits.J * pyunits.kmol**-1 * pyunits.K**-1,
), # [2]
"2": (-3.8632e2, pyunits.J * pyunits.kmol**-1 * pyunits.K**-2),
"3": (1.2348e0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-3),
"4": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-4),
"5": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-5),
},
"enth_mol_form_liq_comp_ref": (0, pyunits.J / pyunits.mol), # [3]
"entr_mol_form_liq_comp_ref": (0, pyunits.J / pyunits.mol / pyunits.K),
"enth_mol_form_vap_comp_ref": (
20413.736,
pyunits.J / pyunits.mol,
), # [3]
"entr_mol_form_vap_comp_ref": (
266.9,
pyunits.J / pyunits.mol / pyunits.K,
), # [3]
"pressure_sat_comp_coeff": {
"A": (3.95606, None), # [4]
"B": (789.6200, pyunits.K),
"C": (247.580, pyunits.K),
},
},
},
"butene": {
"type": Component,
"elemental_composition": {"H": 8, "C": 4},
"dens_mol_liq_comp": Perrys,
"enth_mol_liq_comp": Perrys,
"entr_mol_liq_comp": Perrys,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP5,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (56.104e-3, pyunits.kg / pyunits.mol), # [1]
"pressure_crit": (40.2e5, pyunits.Pa), # [1]
"temperature_crit": (419.3, pyunits.K), # [1]
"omega": 0.191,
"dens_mol_liq_comp_coeff": {
"eqn_type": 1,
"1": (1.0972, pyunits.kmol * pyunits.m**-3), # [2] pg. 2-98
"2": (0.2649, None),
"3": (419.95, pyunits.K),
"4": (0.29043, None),
},
"cp_mol_ig_comp_coeff": {
"A": (-2.994e0, pyunits.J / pyunits.mol / pyunits.K),
"B": (3.532e-1, pyunits.J / pyunits.mol / pyunits.K**2),
"C": (-1.990e-4, pyunits.J / pyunits.mol / pyunits.K**3),
"D": (4.463e-8, pyunits.J / pyunits.mol / pyunits.K**4),
},
"cp_mol_liq_comp_coeff": {
"1": (
1.3589e5,
pyunits.J * pyunits.kmol**-1 * pyunits.K**-1,
), # [2]
"2": (-4.7739e2, pyunits.J * pyunits.kmol**-1 * pyunits.K**-2),
"3": (2.1835e0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-3),
"4": (-2.2230e-3, pyunits.J * pyunits.kmol**-1 * pyunits.K**-4),
"5": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-5),
},
"enth_mol_form_liq_comp_ref": (0, pyunits.J / pyunits.mol), # [3]
"entr_mol_form_liq_comp_ref": (0, pyunits.J / pyunits.mol / pyunits.K),
"enth_mol_form_vap_comp_ref": (1171.52, pyunits.J / pyunits.mol), # [3]
"entr_mol_form_vap_comp_ref": (
305.6,
pyunits.J / pyunits.mol / pyunits.K,
), # [3]
"pressure_sat_comp_coeff": {
"A": (3.96640, None), # [4]
"B": (927.2100, pyunits.K),
"C": (238.630, pyunits.K),
},
},
},
"pentene": {
"type": Component,
"elemental_composition": {"H": 10, "C": 5},
"dens_mol_liq_comp": Perrys,
"enth_mol_liq_comp": Perrys,
"entr_mol_liq_comp": Perrys,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP5,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (70.135e-3, pyunits.kg / pyunits.mol), # [1]
"pressure_crit": (40.5e5, pyunits.Pa), # [1]
"temperature_crit": (464.7, pyunits.K), # [1]
"omega": 0.233, # [1]
"dens_mol_liq_comp_coeff": {
"eqn_type": 1,
"1": (0.9038, pyunits.kmol * pyunits.m**-3), # [2] pg. 2-98
"2": (0.26648, None),
"3": (464.78, pyunits.K),
"4": (0.2905, None),
},
"cp_mol_ig_comp_coeff": {
"A": (-1.340e-1, pyunits.J / pyunits.mol / pyunits.K),
"B": (4.329e-1, pyunits.J / pyunits.mol / pyunits.K**2),
"C": (-2.317e-4, pyunits.J / pyunits.mol / pyunits.K**3),
"D": (4.681e-8, pyunits.J / pyunits.mol / pyunits.K**4),
},
"cp_mol_liq_comp_coeff": {
"1": (
1.5467e5,
pyunits.J * pyunits.kmol**-1 * pyunits.K**-1,
), # [2]
"2": (-4.2600e2, pyunits.J * pyunits.kmol**-1 * pyunits.K**-2),
"3": (1.964, pyunits.J * pyunits.kmol**-1 * pyunits.K**-3),
"4": (-1.8034e-3, pyunits.J * pyunits.kmol**-1 * pyunits.K**-4),
"5": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-5),
},
"enth_mol_form_liq_comp_ref": (0, pyunits.J / pyunits.mol), # [3]
"entr_mol_form_liq_comp_ref": (0, pyunits.J / pyunits.mol / pyunits.K),
"enth_mol_form_vap_comp_ref": (
-20920.00,
pyunits.J / pyunits.mol,
), # [3]
"entr_mol_form_vap_comp_ref": (
347.82,
pyunits.J / pyunits.mol / pyunits.K,
), # [3]
"pressure_sat_comp_coeff": {
"A": (3.96914, None), # [4]
"B": (1044.010, pyunits.K),
"C": (233.450, pyunits.K),
},
},
},
"hexene": {
"type": Component,
"elemental_composition": {"H": 12, "C": 6},
"dens_mol_liq_comp": Perrys,
"enth_mol_liq_comp": Perrys,
"entr_mol_liq_comp": Perrys,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP5,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (84.162e-3, pyunits.kg / pyunits.mol), # [1]
"pressure_crit": (31.7e5, pyunits.Pa), # [1]
"temperature_crit": (504.0, pyunits.K), # [1]
"omega": 0.285, # [1]
"dens_mol_liq_comp_coeff": {
"eqn_type": 1,
"1": (0.7389, pyunits.kmol * pyunits.m**-3), # [2] pg. 2-98
"2": (0.26147, None),
"3": (504.03, pyunits.K),
"4": (0.2902, None),
},
"cp_mol_ig_comp_coeff": {
"A": (-1.746e0, pyunits.J / pyunits.mol / pyunits.K),
"B": (5.309e-1, pyunits.J / pyunits.mol / pyunits.K**2),
"C": (-2.903e-4, pyunits.J / pyunits.mol / pyunits.K**3),
"D": (6.054e-8, pyunits.J / pyunits.mol / pyunits.K**4),
},
"cp_mol_liq_comp_coeff": {
"1": (
1.9263e5,
pyunits.J * pyunits.kmol**-1 * pyunits.K**-1,
), # [2]
"2": (-5.7116e2, pyunits.J * pyunits.kmol**-1 * pyunits.K**-2),
"3": (2.4004e0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-3),
"4": (-1.9758e-3, pyunits.J * pyunits.kmol**-1 * pyunits.K**-4),
"5": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-5),
},
"enth_mol_form_liq_comp_ref": (0, pyunits.J / pyunits.mol), # [3]
"entr_mol_form_liq_comp_ref": (0, pyunits.J / pyunits.mol / pyunits.K),
"enth_mol_form_vap_comp_ref": (
-41672.64,
pyunits.J / pyunits.mol,
), # [3]
"entr_mol_form_vap_comp_ref": (
385.97,
pyunits.J / pyunits.mol / pyunits.K,
), # [3]
"pressure_sat_comp_coeff": {
"A": (3.98260, None), # [4]
"B": (1148.620, pyunits.K),
"C": (225.340, pyunits.K),
},
},
},
"heptene": {
"type": Component,
"elemental_composition": {"H": 14, "C": 7},
"dens_mol_liq_comp": Perrys,
"enth_mol_liq_comp": Perrys,
"entr_mol_liq_comp": Perrys,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP5,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (98.189e-3, pyunits.kg / pyunits.mol), # [1]
"pressure_crit": (25.4e5, pyunits.Pa), # [1]
"temperature_crit": (537.2, pyunits.K), # [1]
"omega": 0.358, # [1]
"dens_mol_liq_comp_coeff": {
"eqn_type": 1,
"1": (0.63734, pyunits.kmol * pyunits.m**-3), # [2] pg. 2-98
"2": (0.26319, None),
"3": (537.29, pyunits.K),
"4": (0.27375, None),
},
"cp_mol_ig_comp_coeff": {
"A": (-3.303e0, pyunits.J / pyunits.mol / pyunits.K),
"B": (6.297e-1, pyunits.J / pyunits.mol / pyunits.K**2),
"C": (-3.512e-4, pyunits.J / pyunits.mol / pyunits.K**3),
"D": (7.607e-8, pyunits.J / pyunits.mol / pyunits.K**4),
},
"cp_mol_liq_comp_coeff": {
"1": (
1.8997e5,
pyunits.J * pyunits.kmol**-1 * pyunits.K**-1,
), # [2]
"2": (-1.5670e2, pyunits.J * pyunits.kmol**-1 * pyunits.K**-2),
"3": (3.4300e-1, pyunits.J * pyunits.kmol**-1 * pyunits.K**-3),
"4": (1.5222e-3, pyunits.J * pyunits.kmol**-1 * pyunits.K**-4),
"5": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-5),
},
"enth_mol_form_liq_comp_ref": (0, pyunits.J / pyunits.mol), # [3]
"entr_mol_form_liq_comp_ref": (0, pyunits.J / pyunits.mol / pyunits.K),
"enth_mol_form_vap_comp_ref": (
-62299.76,
pyunits.J / pyunits.mol,
), # [3]
"entr_mol_form_vap_comp_ref": (
424,
pyunits.J / pyunits.mol / pyunits.K,
), # [3]
"pressure_sat_comp_coeff": {
"A": (4.02677, None), # [4]
"B": (1258.340, pyunits.K),
"C": (219.300, pyunits.K),
},
},
},
"octene": {
"type": Component,
"elemental_composition": {"H": 16, "C": 8},
"dens_mol_liq_comp": Perrys,
"enth_mol_liq_comp": Perrys,
"entr_mol_liq_comp": Perrys,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP5,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (112.216e-3, pyunits.kg / pyunits.mol), # [1]
"pressure_crit": (26.2e5, pyunits.Pa), # [1]
"temperature_crit": (566.6, pyunits.K), # [1]
"omega": 0.386, # [1]
"dens_mol_liq_comp_coeff": {
"eqn_type": 1,
"1": (0.5871, pyunits.kmol * pyunits.m**-3), # [2] pg. 2-98
"2": (0.27005, None),
"3": (566.65, pyunits.K),
"4": (0.27187, None),
},
"cp_mol_ig_comp_coeff": {
"A": (-4.099e0, pyunits.J / pyunits.mol / pyunits.K),
"B": (7.239e-1, pyunits.J / pyunits.mol / pyunits.K**2),
"C": (-4.036e-4, pyunits.J / pyunits.mol / pyunits.K**3),
"D": (8.675e-8, pyunits.J / pyunits.mol / pyunits.K**4),
},
"cp_mol_liq_comp_coeff": {
"1": (
3.7930e5,
pyunits.J * pyunits.kmol**-1 * pyunits.K**-1,
), # [2]
"2": (-2.1175e3, pyunits.J * pyunits.kmol**-1 * pyunits.K**-2),
"3": (8.2362e0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-3),
"4": (-9.0093e-3, pyunits.J * pyunits.kmol**-1 * pyunits.K**-4),
"5": (0, pyunits.J * pyunits.kmol**-1 * pyunits.K**-5),
},
"enth_mol_form_liq_comp_ref": (0, pyunits.J / pyunits.mol), # [3]
"entr_mol_form_liq_comp_ref": (0, pyunits.J / pyunits.mol / pyunits.K),
"enth_mol_form_vap_comp_ref": (
-82926.88,
pyunits.J / pyunits.mol,
), # [3]
"entr_mol_form_vap_comp_ref": (
454.36,
pyunits.J / pyunits.mol / pyunits.K,
), # [3]
"pressure_sat_comp_coeff": {
"A": (4.05985, None), # [4]
"B": (1355.460, pyunits.K),
"C": (213.050, pyunits.K),
},
},
},
},
# Specifying phases
"phases": {
"Liq": {
"type": LiquidPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {"type": CubicType.PR},
},
"Vap": {
"type": VaporPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {"type": CubicType.PR},
},
},
# Set base units of measurement
"base_units": {
"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K,
},
# Specifying state definition
"state_definition": FTPx,
"state_bounds": {
"flow_mol": (0, 100, 1000, pyunits.mol / pyunits.s),
"temperature": (273.15, 300, 1500, pyunits.K),
"pressure": (5e4, 1e5, 1e7, pyunits.Pa),
},
"pressure_ref": (101325, pyunits.Pa),
"temperature_ref": (298.15, pyunits.K),
# Defining phase equilibria
"phases_in_equilibrium": [("Vap", "Liq")],
"phase_equilibrium_state": {("Vap", "Liq"): SmoothVLE},
"bubble_dew_method": LogBubbleDew,
"parameter_data": {
"PR_kappa": {
("hydrogen", "hydrogen"): 0.000,
("hydrogen", "methane"): 0.000,
("hydrogen", "ethane"): 0.000,
("hydrogen", "propane"): 0.000,
("hydrogen", "nbutane"): 0.000,
("hydrogen", "ibutane"): 0.000,
("hydrogen", "ethylene"): 0.000,
("hydrogen", "propene"): 0.000,
("hydrogen", "butene"): 0.000,
("hydrogen", "pentene"): 0.000,
("hydrogen", "hexene"): 0.000,
("hydrogen", "heptene"): 0.000,
("hydrogen", "octene"): 0.000,
("methane", "hydrogen"): 0.000,
("methane", "methane"): 0.000,
("methane", "ethane"): 0.000,
("methane", "propane"): 0.000,
("methane", "nbutane"): 0.000,
("methane", "ibutane"): 0.000,
("methane", "ethylene"): 0.000,
("methane", "propene"): 0.000,
("methane", "butene"): 0.000,
("methane", "pentene"): 0.000,
("methane", "hexene"): 0.000,
("methane", "heptene"): 0.000,
("methane", "octene"): 0.000,
("ethane", "hydrogen"): 0.000,
("ethane", "methane"): 0.000,
("ethane", "ethane"): 0.000,
("ethane", "propane"): 0.000,
("ethane", "nbutane"): 0.000,
("ethane", "ibutane"): 0.000,
("ethane", "ethylene"): 0.000,
("ethane", "propene"): 0.000,
("ethane", "butene"): 0.000,
("ethane", "pentene"): 0.000,
("ethane", "hexene"): 0.000,
("ethane", "heptene"): 0.000,
("ethane", "octene"): 0.000,
("propane", "hydrogen"): 0.000,
("propane", "methane"): 0.000,
("propane", "ethane"): 0.000,
("propane", "propane"): 0.000,
("propane", "nbutane"): 0.000,
("propane", "ibutane"): 0.000,
("propane", "ethylene"): 0.000,
("propane", "propene"): 0.000,
("propane", "butene"): 0.000,
("propane", "pentene"): 0.000,
("propane", "hexene"): 0.000,
("propane", "heptene"): 0.000,
("propane", "octene"): 0.000,
("nbutane", "hydrogen"): 0.000,
("nbutane", "methane"): 0.000,
("nbutane", "ethane"): 0.000,
("nbutane", "propane"): 0.000,
("nbutane", "nbutane"): 0.000,
("nbutane", "ibutane"): 0.000,
("nbutane", "ethylene"): 0.000,
("nbutane", "propene"): 0.000,
("nbutane", "butene"): 0.000,
("nbutane", "pentene"): 0.000,
("nbutane", "hexene"): 0.000,
("nbutane", "heptene"): 0.000,
("nbutane", "octene"): 0.000,
("ibutane", "hydrogen"): 0.000,
("ibutane", "methane"): 0.000,
("ibutane", "ethane"): 0.000,
("ibutane", "propane"): 0.000,
("ibutane", "nbutane"): 0.000,
("ibutane", "ibutane"): 0.000,
("ibutane", "ethylene"): 0.000,
("ibutane", "propene"): 0.000,
("ibutane", "butene"): 0.000,
("ibutane", "pentene"): 0.000,
("ibutane", "hexene"): 0.000,
("ibutane", "heptene"): 0.000,
("ibutane", "octene"): 0.000,
("ethylene", "hydrogen"): 0.000,
("ethylene", "methane"): 0.000,
("ethylene", "ethane"): 0.000,
("ethylene", "propane"): 0.000,
("ethylene", "nbutane"): 0.000,
("ethylene", "ibutane"): 0.000,
("ethylene", "ethylene"): 0.000,
("ethylene", "propene"): 0.000,
("ethylene", "butene"): 0.000,
("ethylene", "pentene"): 0.000,
("ethylene", "hexene"): 0.000,
("ethylene", "heptene"): 0.000,
("ethylene", "octene"): 0.000,
("propene", "hydrogen"): 0.000,
("propene", "methane"): 0.000,
("propene", "ethane"): 0.000,
("propene", "propane"): 0.000,
("propene", "nbutane"): 0.000,
("propene", "ibutane"): 0.000,
("propene", "ethylene"): 0.000,
("propene", "propene"): 0.000,
("propene", "butene"): 0.000,
("propene", "pentene"): 0.000,
("propene", "hexene"): 0.000,
("propene", "heptene"): 0.000,
("propene", "octene"): 0.000,
("butene", "hydrogen"): 0.000,
("butene", "methane"): 0.000,
("butene", "ethane"): 0.000,
("butene", "propane"): 0.000,
("butene", "nbutane"): 0.000,
("butene", "ibutane"): 0.000,
("butene", "ethylene"): 0.000,
("butene", "propene"): 0.000,
("butene", "butene"): 0.000,
("butene", "pentene"): 0.000,
("butene", "hexene"): 0.000,
("butene", "heptene"): 0.000,
("butene", "octene"): 0.000,
("pentene", "hydrogen"): 0.000,
("pentene", "methane"): 0.000,
("pentene", "ethane"): 0.000,
("pentene", "propane"): 0.000,
("pentene", "nbutane"): 0.000,
("pentene", "ibutane"): 0.000,
("pentene", "ethylene"): 0.000,
("pentene", "propene"): 0.000,
("pentene", "butene"): 0.000,
("pentene", "pentene"): 0.000,
("pentene", "hexene"): 0.000,
("pentene", "heptene"): 0.000,
("pentene", "octene"): 0.000,
("hexene", "hydrogen"): 0.000,
("hexene", "methane"): 0.000,
("hexene", "ethane"): 0.000,
("hexene", "propane"): 0.000,
("hexene", "nbutane"): 0.000,
("hexene", "ibutane"): 0.000,
("hexene", "ethylene"): 0.000,
("hexene", "propene"): 0.000,
("hexene", "butene"): 0.000,
("hexene", "pentene"): 0.000,
("hexene", "hexene"): 0.000,
("hexene", "heptene"): 0.000,
("hexene", "octene"): 0.000,
("heptene", "hydrogen"): 0.000,
("heptene", "methane"): 0.000,
("heptene", "ethane"): 0.000,
("heptene", "propane"): 0.000,
("heptene", "nbutane"): 0.000,
("heptene", "ibutane"): 0.000,
("heptene", "ethylene"): 0.000,
("heptene", "propene"): 0.000,
("heptene", "butene"): 0.000,
("heptene", "pentene"): 0.000,
("heptene", "hexene"): 0.000,
("heptene", "heptene"): 0.000,
("heptene", "octene"): 0.000,
("octene", "hydrogen"): 0.000,
("octene", "methane"): 0.000,
("octene", "ethane"): 0.000,
("octene", "propane"): 0.000,
("octene", "nbutane"): 0.000,
("octene", "ibutane"): 0.000,
("octene", "ethylene"): 0.000,
("octene", "propene"): 0.000,
("octene", "butene"): 0.000,
("octene", "pentene"): 0.000,
("octene", "hexene"): 0.000,
("octene", "heptene"): 0.000,
("octene", "octene"): 0.000,
}
},
}
|
ee8acc3943d3e3400b004d6a08f2ea7a3b00421b
|
3f763cf893b09a3be562858613c928703ff349e4
|
/client/verta/verta/_cli/deployment/get.py
|
a0ffec426dec2f5bf287fd89d3bf89aca5e4f0c9
|
[
"Apache-2.0"
] |
permissive
|
VertaAI/modeldb
|
636e46fc025b01a514d599b10e228c8735503357
|
ec9ac7712500adb13fd815dfd476ce9f536c6921
|
refs/heads/main
| 2023-08-31T00:45:37.220628
| 2023-08-30T18:45:13
| 2023-08-30T18:45:13
| 71,305,435
| 844
| 142
|
Apache-2.0
| 2023-09-14T19:24:13
| 2016-10-19T01:07:26
|
Java
|
UTF-8
|
Python
| false
| false
| 809
|
py
|
get.py
|
# -*- coding: utf-8 -*-
import click
from .deployment import deployment
from ... import Client
@deployment.group()
def get():
"""Get detailed information about an entity related to deployment.
For example, to see information about an endpoint, run
`verta deployment get endpoint "<endpoint path>"`
"""
pass
@get.command(name="endpoint")
@click.argument("path", nargs=1, required=True)
@click.option("--workspace", "-w", help="Workspace to use.")
def get_endpoint(path, workspace):
"""Get detailed information about a deployment endpoint."""
client = Client()
try:
endpoint = client.get_endpoint(path, workspace=workspace)
except ValueError:
raise click.BadParameter("endpoint {} not found".format(path))
click.echo()
click.echo(endpoint)
|
64d3e87bc6b3a68030f88a962f0c546f27b59a35
|
7ebb2f0458d3813737dd045473d7c1398d08392d
|
/pyclesperanto_prototype/_tier4/_proximal_neighbor_count.py
|
5c0e37d4ee91dca8eddf6c357c2cbc5973656e8e
|
[
"Python-2.0",
"BSD-3-Clause"
] |
permissive
|
clEsperanto/pyclesperanto_prototype
|
b3192d6984f45571fe0a7dfcceee2058bc4debbe
|
b465c8669f8e9326874139cf4b9c9af22c22757c
|
refs/heads/master
| 2023-09-04T11:07:55.828329
| 2023-08-25T17:18:30
| 2023-08-25T17:18:30
| 248,206,619
| 152
| 36
|
BSD-3-Clause
| 2023-05-23T09:44:51
| 2020-03-18T10:56:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,361
|
py
|
_proximal_neighbor_count.py
|
from .._tier0 import plugin_function
from .._tier0 import Image
import numpy as np
from .._tier0 import create_none
@plugin_function(categories=['label measurement'], output_creator=create_none)
def proximal_neighbor_count(source : Image, destination : Image = None, min_distance : float = 0, max_distance : float = np.finfo(np.float32).max) -> Image:
"""Takes a label map, determines which labels are within a give distance range
and returns the number of those in a vector.
Parameters
----------
source : Image
destination : Image, optional
min_distance : float, optional
default : 0
max_distance : float, optional
default: maximum float value
Returns
-------
destination
"""
from .._tier1 import set_column
from .._tier9 import centroids_of_labels
from .._tier1 import generate_distance_matrix
from .._tier3 import generate_proximal_neighbors_matrix
from .._tier1 import count_touching_neighbors
pointlist = centroids_of_labels(source)
distance_matrix = generate_distance_matrix(pointlist, pointlist)
touch_matrix = generate_proximal_neighbors_matrix(distance_matrix, min_distance=min_distance, max_distance=max_distance)
destination = count_touching_neighbors(touch_matrix, destination)
set_column(destination, 0, 0)
return destination
|
07409aa6d1b50b75674b4143d3a281f0956e48c8
|
da1721d2783ea4d67ff4e73cee6eee71292f2ef7
|
/toontown/building/TutorialBuildingAI.py
|
c2426e50314e4e06df5804fa7e75533f43a29a40
|
[
"BSD-3-Clause"
] |
permissive
|
open-toontown/open-toontown
|
bbdeb1b7bf0fb2861eba2df5483738c0112090ca
|
464c2d45f60551c31397bd03561582804e760b4a
|
refs/heads/develop
| 2023-07-07T01:34:31.959657
| 2023-05-30T23:49:10
| 2023-05-30T23:49:10
| 219,221,570
| 143
| 104
|
BSD-3-Clause
| 2023-09-11T09:52:34
| 2019-11-02T22:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 3,772
|
py
|
TutorialBuildingAI.py
|
from panda3d.core import *
from direct.directnotify import DirectNotifyGlobal
from . import DistributedDoorAI
from . import DistributedTutorialInteriorAI
from . import FADoorCodes
from . import DoorTypes
from toontown.toon import NPCToons
from toontown.toonbase import TTLocalizer
# This is not a distributed class... It just owns and manages some distributed
# classes.
class TutorialBuildingAI:
def __init__(self, air, exteriorZone, interiorZone, blockNumber):
# While this is not a distributed object, it needs to know about
# the repository.
self.air = air
self.exteriorZone = exteriorZone
self.interiorZone = interiorZone
# This is because we are "pretending" to be a DistributedBuilding.
# The DistributedTutorialInterior takes a peek at savedBy. It really
# should make a function call. Perhaps TutorialBuildingAI and
# DistributedBuildingAI should inherit from each other somehow,
# but I can't see an easy way to do that.
self.savedBy = None
self.setup(blockNumber)
def cleanup(self):
self.interior.requestDelete()
del self.interior
self.door.requestDelete()
del self.door
self.insideDoor.requestDelete()
del self.insideDoor
self.gagShopNPC.requestDelete()
del self.gagShopNPC
return
def setup(self, blockNumber):
# Put an NPC in here. Give him id# 20000. When he has assigned
# his quest, he will unlock the interior door.
self.gagShopNPC = NPCToons.createNPC(
self.air, 20000,
(self.interiorZone,
TTLocalizer.NPCToonNames[20000],
("dll" ,"ms" ,"m" ,"m" ,7 ,0 ,7 ,7 ,2 ,6 ,2 ,6 ,2 ,16), "m", 1, NPCToons.NPC_REGULAR),
self.interiorZone,
questCallback=self.unlockInteriorDoor)
# Flag him as being part of tutorial
self.gagShopNPC.setTutorial(1)
npcId = self.gagShopNPC.getDoId()
# Toon interior (with tutorial flag set to 1)
self.interior=DistributedTutorialInteriorAI.DistributedTutorialInteriorAI(
blockNumber, self.air, self.interiorZone, self, npcId)
self.interior.generateWithRequired(self.interiorZone)
# Outside door:
door=DistributedDoorAI.DistributedDoorAI(self.air, blockNumber,
DoorTypes.EXT_STANDARD,
lockValue=FADoorCodes.DEFEAT_FLUNKY_TOM)
# Inside door. Locked until you get your gags.
insideDoor=DistributedDoorAI.DistributedDoorAI(
self.air,
blockNumber,
DoorTypes.INT_STANDARD,
lockValue=FADoorCodes.TALK_TO_TOM)
# Tell them about each other:
door.setOtherDoor(insideDoor)
insideDoor.setOtherDoor(door)
door.zoneId=self.exteriorZone
insideDoor.zoneId=self.interiorZone
# Now that they both now about each other, generate them:
door.generateWithRequired(self.exteriorZone)
#door.sendUpdate("setDoorIndex", [door.getDoorIndex()])
insideDoor.generateWithRequired(self.interiorZone)
#insideDoor.sendUpdate("setDoorIndex", [door.getDoorIndex()])
# keep track of them:
self.door=door
self.insideDoor=insideDoor
return
def unlockInteriorDoor(self):
self.insideDoor.setDoorLock(FADoorCodes.UNLOCKED)
def battleOverCallback(self):
# There is an if statement here because it is possible for
# the callback to get called after cleanup has already taken
# place.
if hasattr(self, "door"):
self.door.setDoorLock(FADoorCodes.TALK_TO_HQ_TOM)
|
6ee6a8673d13535a58a34b820cb5bd9865401aaf
|
2181883c8faac55bfc969a97d22d9b24a3e81ab3
|
/Pythonwin/pywin/framework/editor/__init__.py
|
16e3158184e55234a845dbbcc792c2b352939a24
|
[
"PSF-2.0"
] |
permissive
|
mhammond/pywin32
|
574bf121cfeac8c7a9d28f94ee0f2069a425e8ab
|
2a7137f21965013020ef9e4f27565db6dea59003
|
refs/heads/main
| 2023-09-02T13:16:52.307262
| 2023-08-17T19:42:26
| 2023-08-17T19:42:26
| 108,187,130
| 4,757
| 907
| null | 2023-08-23T01:45:49
| 2017-10-24T21:44:27
|
C++
|
UTF-8
|
Python
| false
| false
| 2,923
|
py
|
__init__.py
|
# __init__ for the Pythonwin editor package.
#
# We used to support optional editors - eg, color or non-color.
#
# This really isnt necessary with Scintilla, and scintilla
# is getting so deeply embedded that it was too much work.
import win32ui
defaultCharacterFormat = (-402653169, 0, 200, 0, 0, 0, 49, "Courier New")
##def GetDefaultEditorModuleName():
## import pywin
## # If someone has set pywin.editormodulename, then this is what we use
## try:
## prefModule = pywin.editormodulename
## except AttributeError:
## prefModule = win32ui.GetProfileVal("Editor","Module", "")
## return prefModule
##
##def WriteDefaultEditorModule(module):
## try:
## module = module.__name__
## except:
## pass
## win32ui.WriteProfileVal("Editor", "Module", module)
def LoadDefaultEditor():
pass
## prefModule = GetDefaultEditorModuleName()
## restorePrefModule = None
## mod = None
## if prefModule:
## try:
## mod = __import__(prefModule)
## except 'xx':
## msg = "Importing your preferred editor ('%s') failed.\n\nError %s: %s\n\nAn attempt will be made to load the default editor.\n\nWould you like this editor disabled in the future?" % (prefModule, sys.exc_info()[0], sys.exc_info()[1])
## rc = win32ui.MessageBox(msg, "Error importing editor", win32con.MB_YESNO)
## if rc == win32con.IDNO:
## restorePrefModule = prefModule
## WriteDefaultEditorModule("")
## del rc
##
## try:
## # Try and load the default one - dont catch errors here.
## if mod is None:
## prefModule = "pywin.framework.editor.color.coloreditor"
## mod = __import__(prefModule)
##
## # Get at the real module.
## mod = sys.modules[prefModule]
##
## # Do a "from mod import *"
## globals().update(mod.__dict__)
##
## finally:
## # Restore the users default editor if it failed and they requested not to disable it.
## if restorePrefModule:
## WriteDefaultEditorModule(restorePrefModule)
def GetEditorOption(option, defaultValue, min=None, max=None):
rc = win32ui.GetProfileVal("Editor", option, defaultValue)
if min is not None and rc < min:
rc = defaultValue
if max is not None and rc > max:
rc = defaultValue
return rc
def SetEditorOption(option, newValue):
win32ui.WriteProfileVal("Editor", option, newValue)
def DeleteEditorOption(option):
try:
win32ui.WriteProfileVal("Editor", option, None)
except win32ui.error:
pass
# Load and save font tuples
def GetEditorFontOption(option, default=None):
if default is None:
default = defaultCharacterFormat
fmt = GetEditorOption(option, "")
if fmt == "":
return default
try:
return eval(fmt)
except:
print("WARNING: Invalid font setting in registry - setting ignored")
return default
def SetEditorFontOption(option, newValue):
SetEditorOption(option, str(newValue))
from pywin.framework.editor.color.coloreditor import editorTemplate
|
076472973dcdc01873e19df03580b1fe9c7734c2
|
fa3f6d4e9169fb95f828013d179d03accdff381b
|
/grr/server/grr_response_server/export_converters/launchd_plist_test.py
|
44bdf5fabf1422d9a2f4778db1926d887816107b
|
[
"Apache-2.0"
] |
permissive
|
google/grr
|
c51a2bd251ed2f7adae538541990a2cc01fdcc8c
|
44c0eb8c938302098ef7efae8cfd6b90bcfbb2d6
|
refs/heads/master
| 2023-09-05T20:02:36.823914
| 2023-07-26T09:34:09
| 2023-07-26T09:34:09
| 14,909,673
| 4,683
| 927
|
Apache-2.0
| 2023-07-26T09:34:10
| 2013-12-04T00:17:53
|
Python
|
UTF-8
|
Python
| false
| false
| 5,127
|
py
|
launchd_plist_test.py
|
#!/usr/bin/env python
from absl import app
from grr_response_core.lib.rdfvalues import plist as rdf_plist
from grr_response_server.export_converters import launchd_plist
from grr.test_lib import export_test_lib
from grr.test_lib import test_lib
class LaunchdPlistConverterTest(export_test_lib.ExportTestBase):
"""Tests for LaunchdPlist converter."""
def testExportsValueCorrectly(self):
sample = rdf_plist.LaunchdPlist(
path="/etc/foo.plist",
Label="label",
Disabled=True,
UserName="someuser",
GroupName="somegroup",
Program="foo",
ProgramArguments=["-h", "-k"],
RootDirectory="/foo",
WorkingDirectory="/bar",
OnDemand=True,
RunAtLoad=True,
StartCalendarInterval=[
rdf_plist.LaunchdStartCalendarIntervalEntry(
Minute=1, Hour=2, Day=3, Weekday=4, Month=5),
rdf_plist.LaunchdStartCalendarIntervalEntry(
Minute=2, Hour=3, Day=4, Weekday=5, Month=6),
],
EnvironmentVariables=[
rdf_plist.PlistStringDictEntry(name="foo", value="bar"),
rdf_plist.PlistStringDictEntry(name="foo2", value="bar2"),
],
KeepAlive=True,
KeepAliveDict=rdf_plist.LaunchdKeepAlive(
SuccessfulExit=True,
NetworkState=True,
PathState=[
rdf_plist.PlistBoolDictEntry(name="foo", value=True),
rdf_plist.PlistBoolDictEntry(name="bar", value=False),
],
OtherJobEnabled=[
rdf_plist.PlistBoolDictEntry(name="foo2", value=True),
rdf_plist.PlistBoolDictEntry(name="bar2", value=False),
],
),
StandardInPath="in_path",
StandardOutPath="out_path",
StandardErrorPath="error_path",
LimitLoadToHosts=["host1", "host2"],
LimitLoadFromHosts=["host3", "host4"],
LimitLoadToSessionType=["type1", "type2"],
EnableGlobbing=True,
EnableTransactions=True,
Umask=42,
TimeOut=43,
ExitTimeOut=44,
ThrottleInterval=45,
InitGroups=True,
WatchPaths=["path1", "path2"],
QueueDirectories=["dir1", "dir2"],
StartOnMount=True,
StartInterval=46,
Debug=True,
WaitForDebugger=True,
Nice=47,
ProcessType="sometype",
AbandonProcessGroup=True,
LowPriorityIO=True,
LaunchOnlyOnce=True,
inetdCompatibilityWait=True,
SoftResourceLimits=True,
HardResourceLimits=True,
Sockets=True)
converter = launchd_plist.LaunchdPlistConverter()
converted = list(converter.Convert(self.metadata, sample))
self.assertLen(converted, 1)
c = converted[0]
self.assertIsInstance(c, launchd_plist.ExportedLaunchdPlist)
self.assertEqual(c.metadata, self.metadata)
self.assertEqual(c.launchd_file_path, "/etc/foo.plist")
self.assertEqual(c.label, "label")
self.assertTrue(c.disabled)
self.assertEqual(c.user_name, "someuser")
self.assertEqual(c.group_name, "somegroup")
self.assertEqual(c.program, "foo")
self.assertEqual(c.program_arguments, "-h -k")
self.assertEqual(c.root_directory, "/foo")
self.assertEqual(c.working_directory, "/bar")
self.assertTrue(c.on_demand)
self.assertTrue(c.run_at_load)
self.assertEqual(c.start_calendar_interval, "5-4-3-2-1 6-5-4-3-2")
self.assertEqual(c.environment_variables, "foo=bar foo2=bar2")
self.assertEqual(c.standard_in_path, "in_path")
self.assertEqual(c.standard_out_path, "out_path")
self.assertEqual(c.standard_error_path, "error_path")
self.assertEqual(c.limit_load_to_hosts, "host1 host2")
self.assertEqual(c.limit_load_from_hosts, "host3 host4")
self.assertEqual(c.limit_load_to_session_type, "type1 type2")
self.assertTrue(c.enable_globbing)
self.assertTrue(c.enable_transactions)
self.assertEqual(c.umask, 42)
self.assertEqual(c.time_out, 43)
self.assertEqual(c.exit_time_out, 44)
self.assertEqual(c.throttle_interval, 45)
self.assertTrue(c.init_groups)
self.assertEqual(c.watch_paths, "path1 path2")
self.assertEqual(c.queue_directories, "dir1 dir2")
self.assertTrue(c.start_on_mount)
self.assertEqual(c.start_interval, 46)
self.assertTrue(c.debug)
self.assertTrue(c.wait_for_debugger)
self.assertEqual(c.nice, 47)
self.assertEqual(c.process_type, "sometype")
self.assertTrue(c.abandon_process_group)
self.assertTrue(c.low_priority_io)
self.assertTrue(c.launch_only_once)
self.assertTrue(c.inetd_compatibility_wait)
self.assertTrue(c.soft_resource_limits)
self.assertTrue(c.hard_resource_limits)
self.assertTrue(c.sockets)
self.assertTrue(c.keep_alive)
self.assertTrue(c.keep_alive_successful_exit)
self.assertTrue(c.keep_alive_network_state)
self.assertEqual(c.keep_alive_path_state, "foo=True bar=False")
self.assertEqual(c.keep_alive_other_job_enabled, "foo2=True bar2=False")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
bfc0da789a88411f021cdaac7c245917719426c3
|
99dcb18a9e3ea367272f740b8cbf3c34285a0c08
|
/google/cloud/aiplatform/compat/types/__init__.py
|
fb72dc7103a20107d85449250cdfa3d6420425ff
|
[
"Apache-2.0"
] |
permissive
|
googleapis/python-aiplatform
|
926a4873f35dbea15b2fd86c0e16b5e6556d803e
|
76b95b92c1d3b87c72d754d8c02b1bca652b9a27
|
refs/heads/main
| 2023-08-19T23:49:02.180075
| 2023-08-19T13:25:59
| 2023-08-19T13:27:27
| 298,017,988
| 418
| 240
|
Apache-2.0
| 2023-09-14T21:08:33
| 2020-09-23T15:43:39
|
Python
|
UTF-8
|
Python
| false
| false
| 10,897
|
py
|
__init__.py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.aiplatform_v1beta1.types import (
accelerator_type as accelerator_type_v1beta1,
annotation as annotation_v1beta1,
annotation_spec as annotation_spec_v1beta1,
artifact as artifact_v1beta1,
batch_prediction_job as batch_prediction_job_v1beta1,
completion_stats as completion_stats_v1beta1,
context as context_v1beta1,
custom_job as custom_job_v1beta1,
data_item as data_item_v1beta1,
data_labeling_job as data_labeling_job_v1beta1,
dataset as dataset_v1beta1,
dataset_service as dataset_service_v1beta1,
deployed_index_ref as matching_engine_deployed_index_ref_v1beta1,
deployed_model_ref as deployed_model_ref_v1beta1,
deployment_resource_pool as deployment_resource_pool_v1beta1,
deployment_resource_pool_service as deployment_resource_pool_service_v1beta1,
encryption_spec as encryption_spec_v1beta1,
endpoint as endpoint_v1beta1,
endpoint_service as endpoint_service_v1beta1,
entity_type as entity_type_v1beta1,
env_var as env_var_v1beta1,
event as event_v1beta1,
execution as execution_v1beta1,
explanation as explanation_v1beta1,
explanation_metadata as explanation_metadata_v1beta1,
feature as feature_v1beta1,
feature_monitoring_stats as feature_monitoring_stats_v1beta1,
feature_selector as feature_selector_v1beta1,
featurestore as featurestore_v1beta1,
featurestore_monitoring as featurestore_monitoring_v1beta1,
featurestore_online_service as featurestore_online_service_v1beta1,
featurestore_service as featurestore_service_v1beta1,
index as index_v1beta1,
index_endpoint as index_endpoint_v1beta1,
hyperparameter_tuning_job as hyperparameter_tuning_job_v1beta1,
io as io_v1beta1,
job_service as job_service_v1beta1,
job_state as job_state_v1beta1,
lineage_subgraph as lineage_subgraph_v1beta1,
machine_resources as machine_resources_v1beta1,
manual_batch_tuning_parameters as manual_batch_tuning_parameters_v1beta1,
match_service as match_service_v1beta1,
metadata_schema as metadata_schema_v1beta1,
metadata_service as metadata_service_v1beta1,
metadata_store as metadata_store_v1beta1,
model as model_v1beta1,
model_evaluation as model_evaluation_v1beta1,
model_evaluation_slice as model_evaluation_slice_v1beta1,
model_deployment_monitoring_job as model_deployment_monitoring_job_v1beta1,
model_garden_service as model_garden_service_v1beta1,
model_service as model_service_v1beta1,
model_monitoring as model_monitoring_v1beta1,
operation as operation_v1beta1,
pipeline_failure_policy as pipeline_failure_policy_v1beta1,
pipeline_job as pipeline_job_v1beta1,
pipeline_service as pipeline_service_v1beta1,
pipeline_state as pipeline_state_v1beta1,
prediction_service as prediction_service_v1beta1,
publisher_model as publisher_model_v1beta1,
schedule as schedule_v1beta1,
schedule_service as schedule_service_v1beta1,
specialist_pool as specialist_pool_v1beta1,
specialist_pool_service as specialist_pool_service_v1beta1,
study as study_v1beta1,
tensorboard as tensorboard_v1beta1,
tensorboard_data as tensorboard_data_v1beta1,
tensorboard_experiment as tensorboard_experiment_v1beta1,
tensorboard_run as tensorboard_run_v1beta1,
tensorboard_service as tensorboard_service_v1beta1,
tensorboard_time_series as tensorboard_time_series_v1beta1,
training_pipeline as training_pipeline_v1beta1,
types as types_v1beta1,
vizier_service as vizier_service_v1beta1,
)
from google.cloud.aiplatform_v1.types import (
accelerator_type as accelerator_type_v1,
annotation as annotation_v1,
annotation_spec as annotation_spec_v1,
artifact as artifact_v1,
batch_prediction_job as batch_prediction_job_v1,
completion_stats as completion_stats_v1,
context as context_v1,
custom_job as custom_job_v1,
data_item as data_item_v1,
data_labeling_job as data_labeling_job_v1,
dataset as dataset_v1,
dataset_service as dataset_service_v1,
deployed_index_ref as matching_engine_deployed_index_ref_v1,
deployed_model_ref as deployed_model_ref_v1,
encryption_spec as encryption_spec_v1,
endpoint as endpoint_v1,
endpoint_service as endpoint_service_v1,
entity_type as entity_type_v1,
env_var as env_var_v1,
event as event_v1,
execution as execution_v1,
explanation as explanation_v1,
explanation_metadata as explanation_metadata_v1,
feature as feature_v1,
feature_monitoring_stats as feature_monitoring_stats_v1,
feature_selector as feature_selector_v1,
featurestore as featurestore_v1,
featurestore_online_service as featurestore_online_service_v1,
featurestore_service as featurestore_service_v1,
hyperparameter_tuning_job as hyperparameter_tuning_job_v1,
index as index_v1,
index_endpoint as index_endpoint_v1,
io as io_v1,
job_service as job_service_v1,
job_state as job_state_v1,
lineage_subgraph as lineage_subgraph_v1,
machine_resources as machine_resources_v1,
manual_batch_tuning_parameters as manual_batch_tuning_parameters_v1,
metadata_service as metadata_service_v1,
metadata_schema as metadata_schema_v1,
metadata_store as metadata_store_v1,
model as model_v1,
model_evaluation as model_evaluation_v1,
model_evaluation_slice as model_evaluation_slice_v1,
model_deployment_monitoring_job as model_deployment_monitoring_job_v1,
model_service as model_service_v1,
model_monitoring as model_monitoring_v1,
operation as operation_v1,
pipeline_failure_policy as pipeline_failure_policy_v1,
pipeline_job as pipeline_job_v1,
pipeline_service as pipeline_service_v1,
pipeline_state as pipeline_state_v1,
prediction_service as prediction_service_v1,
publisher_model as publisher_model_v1,
schedule as schedule_v1,
schedule_service as schedule_service_v1,
specialist_pool as specialist_pool_v1,
specialist_pool_service as specialist_pool_service_v1,
study as study_v1,
tensorboard as tensorboard_v1,
tensorboard_data as tensorboard_data_v1,
tensorboard_experiment as tensorboard_experiment_v1,
tensorboard_run as tensorboard_run_v1,
tensorboard_service as tensorboard_service_v1,
tensorboard_time_series as tensorboard_time_series_v1,
training_pipeline as training_pipeline_v1,
types as types_v1,
vizier_service as vizier_service_v1,
)
__all__ = (
# v1
accelerator_type_v1,
annotation_v1,
annotation_spec_v1,
artifact_v1,
batch_prediction_job_v1,
completion_stats_v1,
context_v1,
custom_job_v1,
data_item_v1,
data_labeling_job_v1,
dataset_v1,
dataset_service_v1,
deployed_model_ref_v1,
encryption_spec_v1,
endpoint_v1,
endpoint_service_v1,
entity_type_v1,
env_var_v1,
event_v1,
execution_v1,
explanation_v1,
explanation_metadata_v1,
feature_v1,
feature_monitoring_stats_v1,
feature_selector_v1,
featurestore_v1,
featurestore_online_service_v1,
featurestore_service_v1,
hyperparameter_tuning_job_v1,
io_v1,
job_service_v1,
job_state_v1,
lineage_subgraph_v1,
machine_resources_v1,
manual_batch_tuning_parameters_v1,
matching_engine_deployed_index_ref_v1,
index_v1,
index_endpoint_v1,
metadata_service_v1,
metadata_schema_v1,
metadata_store_v1,
model_v1,
model_evaluation_v1,
model_evaluation_slice_v1,
model_deployment_monitoring_job_v1,
model_service_v1,
model_monitoring_v1,
operation_v1,
pipeline_failure_policy_v1,
pipeline_job_v1,
pipeline_service_v1,
pipeline_state_v1,
prediction_service_v1,
publisher_model_v1,
schedule_v1,
schedule_service_v1,
specialist_pool_v1,
specialist_pool_service_v1,
tensorboard_v1,
tensorboard_data_v1,
tensorboard_experiment_v1,
tensorboard_run_v1,
tensorboard_service_v1,
tensorboard_time_series_v1,
training_pipeline_v1,
types_v1,
study_v1,
vizier_service_v1,
# v1beta1
accelerator_type_v1beta1,
annotation_v1beta1,
annotation_spec_v1beta1,
artifact_v1beta1,
batch_prediction_job_v1beta1,
completion_stats_v1beta1,
context_v1beta1,
custom_job_v1beta1,
data_item_v1beta1,
data_labeling_job_v1beta1,
dataset_v1beta1,
dataset_service_v1beta1,
deployment_resource_pool_v1beta1,
deployment_resource_pool_service_v1beta1,
deployed_model_ref_v1beta1,
encryption_spec_v1beta1,
endpoint_v1beta1,
endpoint_service_v1beta1,
entity_type_v1beta1,
env_var_v1beta1,
event_v1beta1,
execution_v1beta1,
explanation_v1beta1,
explanation_metadata_v1beta1,
feature_v1beta1,
feature_monitoring_stats_v1beta1,
feature_selector_v1beta1,
featurestore_v1beta1,
featurestore_monitoring_v1beta1,
featurestore_online_service_v1beta1,
featurestore_service_v1beta1,
hyperparameter_tuning_job_v1beta1,
io_v1beta1,
job_service_v1beta1,
job_state_v1beta1,
lineage_subgraph_v1beta1,
machine_resources_v1beta1,
manual_batch_tuning_parameters_v1beta1,
matching_engine_deployed_index_ref_v1beta1,
index_v1beta1,
index_endpoint_v1beta1,
match_service_v1beta1,
metadata_service_v1beta1,
metadata_schema_v1beta1,
metadata_store_v1beta1,
model_v1beta1,
model_evaluation_v1beta1,
model_evaluation_slice_v1beta1,
model_deployment_monitoring_job_v1beta1,
model_garden_service_v1beta1,
model_service_v1beta1,
model_monitoring_v1beta1,
operation_v1beta1,
pipeline_failure_policy_v1beta1,
pipeline_job_v1beta1,
pipeline_service_v1beta1,
pipeline_state_v1beta1,
prediction_service_v1beta1,
publisher_model_v1beta1,
schedule_v1beta1,
schedule_service_v1beta1,
specialist_pool_v1beta1,
specialist_pool_service_v1beta1,
study_v1beta1,
tensorboard_v1beta1,
tensorboard_data_v1beta1,
tensorboard_experiment_v1beta1,
tensorboard_run_v1beta1,
tensorboard_service_v1beta1,
tensorboard_time_series_v1beta1,
training_pipeline_v1beta1,
types_v1beta1,
vizier_service_v1beta1,
)
|
6a70f4c2d7eb6f54f9e71dfc6bcaef2119b1556b
|
2311e4ef8feb8c54cb3ca0db817c5219804364a7
|
/setup.py
|
98f89a371d8830656584e05e7c13f79a6885ff45
|
[
"MIT"
] |
permissive
|
vBaiCai/python-pesq
|
4d4095f2e6241c5c1edd4e729e30e1a25f34e0b3
|
a76902d7dd371a172bfcbb52d80b2d1f1f9251dd
|
refs/heads/master
| 2023-04-28T19:14:39.979390
| 2023-04-24T12:05:46
| 2023-04-24T12:05:46
| 161,613,833
| 334
| 71
|
MIT
| 2023-04-24T12:05:47
| 2018-12-13T09:18:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,544
|
py
|
setup.py
|
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext as _build_ext
import os
includes = ['pypesq']
try:
import numpy as np
includes += [os.path.join(np.get_include(), 'numpy')]
except:
pass
extension = Extension("pesq_core",
sources=["pypesq/pesq.c", "pypesq/dsp.c", "pypesq/pesqdsp.c",
"pypesq/pesqio.c", "pypesq/pesqmain.c", "pypesq/pesqmod.c"],
include_dirs=includes,
language='c')
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
try:
__builtins__.__NUMPY_SETUP__ = False
except AttributeError:
print("Cannot set '__builtins__.__NUMPY_SETUP__ = False' This is not needed if numpy is already installed.")
import numpy
self.include_dirs.append(numpy.get_include())
setup(name='pypesq',
version='1.2.4',
description="A package to compute pesq score.",
url='https://github.com/vBaiCai/python-pesq',
author_email='zhuroubaicai@gmail.com',
keywords=['pesq', 'speech', 'speech quality'],
license='MIT',
packages=find_packages(),
ext_modules=[extension],
cmdclass={'build_ext': build_ext},
setup_requires=['numpy'],
py_modules=['numpy'],
zip_safe=False,
install_requires=['numpy'],
python_requires='!=3.0.*, !=3.1.*, !=3.2.*, <4',
)
|
ab9782cc55ac964a93a4c8eedf13a638ba48e157
|
4f458456fa868a2824eaefaa110b477aaa9bf434
|
/source/accounts/forms.py
|
79a3c3d0210fe3337156f14811d407eddfc9977a
|
[] |
permissive
|
egorsmkv/simple-django-login-and-register
|
9e1e2d2fb02a4851492fb22bebd5dbe976c8c6aa
|
9389911dbc98d8f411922e1e8b5dd9247ac9a95b
|
refs/heads/master
| 2023-07-31T20:41:23.368663
| 2023-07-13T12:52:04
| 2023-07-13T12:52:04
| 99,043,550
| 832
| 470
|
BSD-3-Clause
| 2023-09-02T18:01:42
| 2017-08-01T21:11:28
|
Python
|
UTF-8
|
Python
| false
| false
| 6,641
|
py
|
forms.py
|
from datetime import timedelta
from django import forms
from django.forms import ValidationError
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django.utils import timezone
from django.db.models import Q
from django.utils.translation import gettext_lazy as _
class UserCacheMixin:
user_cache = None
class SignIn(UserCacheMixin, forms.Form):
password = forms.CharField(label=_('Password'), strip=False, widget=forms.PasswordInput)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if settings.USE_REMEMBER_ME:
self.fields['remember_me'] = forms.BooleanField(label=_('Remember me'), required=False)
def clean_password(self):
password = self.cleaned_data['password']
if not self.user_cache:
return password
if not self.user_cache.check_password(password):
raise ValidationError(_('You entered an invalid password.'))
return password
class SignInViaUsernameForm(SignIn):
username = forms.CharField(label=_('Username'))
@property
def field_order(self):
if settings.USE_REMEMBER_ME:
return ['username', 'password', 'remember_me']
return ['username', 'password']
def clean_username(self):
username = self.cleaned_data['username']
user = User.objects.filter(username=username).first()
if not user:
raise ValidationError(_('You entered an invalid username.'))
if not user.is_active:
raise ValidationError(_('This account is not active.'))
self.user_cache = user
return username
class EmailForm(UserCacheMixin, forms.Form):
email = forms.EmailField(label=_('Email'))
def clean_email(self):
email = self.cleaned_data['email']
user = User.objects.filter(email__iexact=email).first()
if not user:
raise ValidationError(_('You entered an invalid email address.'))
if not user.is_active:
raise ValidationError(_('This account is not active.'))
self.user_cache = user
return email
class SignInViaEmailForm(SignIn, EmailForm):
@property
def field_order(self):
if settings.USE_REMEMBER_ME:
return ['email', 'password', 'remember_me']
return ['email', 'password']
class EmailOrUsernameForm(UserCacheMixin, forms.Form):
email_or_username = forms.CharField(label=_('Email or Username'))
def clean_email_or_username(self):
email_or_username = self.cleaned_data['email_or_username']
user = User.objects.filter(Q(username=email_or_username) | Q(email__iexact=email_or_username)).first()
if not user:
raise ValidationError(_('You entered an invalid email address or username.'))
if not user.is_active:
raise ValidationError(_('This account is not active.'))
self.user_cache = user
return email_or_username
class SignInViaEmailOrUsernameForm(SignIn, EmailOrUsernameForm):
@property
def field_order(self):
if settings.USE_REMEMBER_ME:
return ['email_or_username', 'password', 'remember_me']
return ['email_or_username', 'password']
class SignUpForm(UserCreationForm):
class Meta:
model = User
fields = settings.SIGN_UP_FIELDS
email = forms.EmailField(label=_('Email'), help_text=_('Required. Enter an existing email address.'))
def clean_email(self):
email = self.cleaned_data['email']
user = User.objects.filter(email__iexact=email).exists()
if user:
raise ValidationError(_('You can not use this email address.'))
return email
class ResendActivationCodeForm(UserCacheMixin, forms.Form):
email_or_username = forms.CharField(label=_('Email or Username'))
def clean_email_or_username(self):
email_or_username = self.cleaned_data['email_or_username']
user = User.objects.filter(Q(username=email_or_username) | Q(email__iexact=email_or_username)).first()
if not user:
raise ValidationError(_('You entered an invalid email address or username.'))
if user.is_active:
raise ValidationError(_('This account has already been activated.'))
activation = user.activation_set.first()
if not activation:
raise ValidationError(_('Activation code not found.'))
now_with_shift = timezone.now() - timedelta(hours=24)
if activation.created_at > now_with_shift:
raise ValidationError(_('Activation code has already been sent. You can request a new code in 24 hours.'))
self.user_cache = user
return email_or_username
class ResendActivationCodeViaEmailForm(UserCacheMixin, forms.Form):
email = forms.EmailField(label=_('Email'))
def clean_email(self):
email = self.cleaned_data['email']
user = User.objects.filter(email__iexact=email).first()
if not user:
raise ValidationError(_('You entered an invalid email address.'))
if user.is_active:
raise ValidationError(_('This account has already been activated.'))
activation = user.activation_set.first()
if not activation:
raise ValidationError(_('Activation code not found.'))
now_with_shift = timezone.now() - timedelta(hours=24)
if activation.created_at > now_with_shift:
raise ValidationError(_('Activation code has already been sent. You can request a new code in 24 hours.'))
self.user_cache = user
return email
class RestorePasswordForm(EmailForm):
pass
class RestorePasswordViaEmailOrUsernameForm(EmailOrUsernameForm):
pass
class ChangeProfileForm(forms.Form):
first_name = forms.CharField(label=_('First name'), max_length=30, required=False)
last_name = forms.CharField(label=_('Last name'), max_length=150, required=False)
class ChangeEmailForm(forms.Form):
email = forms.EmailField(label=_('Email'))
def __init__(self, user, *args, **kwargs):
self.user = user
super().__init__(*args, **kwargs)
def clean_email(self):
email = self.cleaned_data['email']
if email == self.user.email:
raise ValidationError(_('Please enter another email.'))
user = User.objects.filter(Q(email__iexact=email) & ~Q(id=self.user.id)).exists()
if user:
raise ValidationError(_('You can not use this mail.'))
return email
class RemindUsernameForm(EmailForm):
pass
|
525b502b72f02d30859567571d5b9f897cded29c
|
3afe7348e830a0c5139fb7cf393736e18b59ab4a
|
/src/clusterfuzz/_internal/tests/core/bot/tasks/utasks/minimize_task_test.py
|
26cc1d2c469bd93c68c7d7228afba18608c9491d
|
[
"Apache-2.0"
] |
permissive
|
google/clusterfuzz
|
00845899e081dbbb89b70a75ce0b7eba3da73b02
|
6501a839b27a264500244f32bace8bee4d5cb9a2
|
refs/heads/master
| 2023-09-03T17:34:17.821599
| 2023-09-01T16:11:51
| 2023-09-01T16:11:51
| 168,060,021
| 5,420
| 639
|
Apache-2.0
| 2023-09-13T16:40:54
| 2019-01-29T00:19:40
|
Python
|
UTF-8
|
Python
| false
| false
| 8,068
|
py
|
minimize_task_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for corpus_pruning_task."""
import os
import shutil
import tempfile
import unittest
# pylint: disable=unused-argument
from unittest import mock
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.bot.fuzzers import init as fuzzers_init
from clusterfuzz._internal.bot.tasks.utasks import minimize_task
from clusterfuzz._internal.bot.tasks.utasks import uworker_io
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.google_cloud_utils import blobs
from clusterfuzz._internal.system import environment
from clusterfuzz._internal.tests.test_libs import helpers
from clusterfuzz._internal.tests.test_libs import test_utils
from clusterfuzz._internal.tests.test_libs import untrusted_runner_helpers
TEST_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'minimize_task_data')
@test_utils.with_cloud_emulators('datastore', 'pubsub')
class LibFuzzerMinimizeTaskTest(unittest.TestCase):
"""libFuzzer Minimize task tests."""
def setUp(self):
helpers.patch_environ(self)
helpers.patch(self, [
'clusterfuzz._internal.bot.tasks.utasks.minimize_task._run_libfuzzer_testcase',
'clusterfuzz._internal.bot.tasks.utasks.minimize_task._run_libfuzzer_tool',
])
test_utils.setup_pubsub(utils.get_application_id())
environment.set_value('APP_ARGS', '%TESTCASE% fuzz_target')
environment.set_value('APP_DIR', '/libfuzzer')
environment.set_value('APP_NAME', '')
environment.set_value('APP_PATH', '')
environment.set_value('BOT_TMPDIR', '/bot_tmpdir')
environment.set_value('CRASH_STACKTRACES_DIR', '/crash_stacks')
environment.set_value('FUZZER_DIR', '/fuzzer_dir')
environment.set_value('INPUT_DIR', '/input_dir')
environment.set_value('JOB_NAME', 'libfuzzer_asan_test')
environment.set_value('USER_PROFILE_IN_MEMORY', True)
def test_libfuzzer_skip_minimization_initial_crash_state(self):
"""Test libFuzzer minimization skipping with a valid initial crash state."""
# TODO(ochang): Fix circular import.
from clusterfuzz._internal.crash_analysis.crash_result import CrashResult
data_types.Job(name='libfuzzer_asan_job').put()
testcase = data_types.Testcase(
minimized_keys='',
fuzzed_keys='FUZZED_KEY',
job_type='libfuzzer_asan_job',
security_flag=True)
testcase.put()
stacktrace = (
'==14970==ERROR: AddressSanitizer: heap-buffer-overflow on address '
'0x61b00001f7d0 at pc 0x00000064801b bp 0x7ffce478dbd0 sp '
'0x7ffce478dbc8 READ of size 4 at 0x61b00001f7d0 thread T0\n'
'#0 0x64801a in frame0() src/test.cpp:1819:15\n'
'#1 0x647ac5 in frame1() src/test.cpp:1954:25\n'
'#2 0xb1dee7 in frame2() src/test.cpp:160:9\n'
'#3 0xb1ddd8 in frame3() src/test.cpp:148:34\n')
self.mock._run_libfuzzer_testcase.return_value = CrashResult( # pylint: disable=protected-access
1, 1.0, stacktrace)
self.mock._run_libfuzzer_tool.return_value = (None, None) # pylint: disable=protected-access
minimize_task.do_libfuzzer_minimization(testcase, '/testcase_file_path')
testcase = data_handler.get_testcase_by_id(testcase.key.id())
self.assertEqual('Heap-buffer-overflow', testcase.crash_type)
self.assertEqual('frame0\nframe1\nframe2\n', testcase.crash_state)
self.assertEqual('0x61b00001f7d0', testcase.crash_address)
self.assertEqual(
'+----------------------------------------Release Build Stacktrace'
'----------------------------------------+\n%s' % stacktrace,
testcase.crash_stacktrace)
class MinimizeTaskTestUntrusted(
untrusted_runner_helpers.UntrustedRunnerIntegrationTest):
"""Minimize task tests for untrusted."""
def setUp(self):
"""Set up."""
super().setUp()
environment.set_value('JOB_NAME', 'libfuzzer_asan_job')
patcher = mock.patch(
'clusterfuzz._internal.bot.fuzzers.libFuzzer.fuzzer.LibFuzzer.fuzzer_directory',
new_callable=mock.PropertyMock)
mock_fuzzer_directory = patcher.start()
self.addCleanup(patcher.stop)
mock_fuzzer_directory.return_value = os.path.join(
environment.get_value('ROOT_DIR'), 'src', 'clusterfuzz', '_internal',
'bot', 'fuzzers', 'libFuzzer')
job = data_types.Job(
name='libfuzzer_asan_job',
environment_string=(
'RELEASE_BUILD_BUCKET_PATH = '
'gs://clusterfuzz-test-data/test_libfuzzer_builds/'
'test-libfuzzer-build-([0-9]+).zip\n'
'REVISION_VARS_URL = https://commondatastorage.googleapis.com/'
'clusterfuzz-test-data/test_libfuzzer_builds/'
'test-libfuzzer-build-%s.srcmap.json\n'))
job.put()
data_types.FuzzTarget(
engine='libFuzzer', binary='test_fuzzer', project='test-project').put()
data_types.FuzzTargetJob(
fuzz_target_name='libFuzzer_test_fuzzer',
engine='libFuzzer',
job='libfuzzer_asan_job').put()
environment.set_value('USE_MINIJAIL', True)
data_types.Fuzzer(
revision=1,
file_size='builtin',
source='builtin',
name='libFuzzer',
max_testcases=4,
builtin=True).put()
self.temp_dir = tempfile.mkdtemp(dir=environment.get_value('FUZZ_INPUTS'))
def tearDown(self):
super().tearDown()
shutil.rmtree(self.temp_dir, ignore_errors=True)
def test_minimize(self):
"""Test minimize."""
helpers.patch(self, ['clusterfuzz._internal.base.utils.is_oss_fuzz'])
self.mock.is_oss_fuzz.return_value = True
testcase_file_path = os.path.join(self.temp_dir, 'testcase')
with open(testcase_file_path, 'wb') as f:
f.write(b'EEE')
with open(testcase_file_path) as f:
fuzzed_keys = blobs.write_blob(f)
testcase_path = os.path.join(self.temp_dir, 'testcase')
testcase = data_types.Testcase(
crash_type='Null-dereference WRITE',
crash_address='',
crash_state='Foo\n',
crash_stacktrace='',
crash_revision=1337,
fuzzed_keys=fuzzed_keys,
fuzzer_name='libFuzzer',
overridden_fuzzer_name='libFuzzer_test_fuzzer',
job_type='libfuzzer_asan_job',
absolute_path=testcase_path,
minimized_arguments='%TESTCASE% test_fuzzer')
testcase.put()
data_types.FuzzTarget(engine='libFuzzer', binary='test_fuzzer').put()
fuzzers_init.run()
self._setup_env(job_type='libfuzzer_asan_job')
environment.set_value('APP_ARGS', testcase.minimized_arguments)
environment.set_value('LIBFUZZER_MINIMIZATION_ROUNDS', 3)
environment.set_value('UBSAN_OPTIONS',
'unneeded_option=1:silence_unsigned_overflow=1')
uworker_input = uworker_io.DeserializedUworkerMsg(
job_type='libfuzzer_asan_job', testcase_id=str(testcase.key.id()))
minimize_task.utask_main(uworker_input)
testcase = data_handler.get_testcase_by_id(testcase.key.id())
self.assertNotEqual('', testcase.minimized_keys)
self.assertNotEqual('NA', testcase.minimized_keys)
self.assertNotEqual(testcase.fuzzed_keys, testcase.minimized_keys)
self.assertEqual({
'ASAN_OPTIONS': {},
'UBSAN_OPTIONS': {
'silence_unsigned_overflow': 1
}
}, testcase.get_metadata('env'))
blobs.read_blob_to_disk(testcase.minimized_keys, testcase_path)
with open(testcase_path, 'rb') as f:
self.assertEqual(1, len(f.read()))
|
3531bbdcb663a6fd1ceb0428ec7e9d4dcca6309c
|
db12b990924703cd74748d8585cd9c11fafa6746
|
/h2o-py/tests/testdir_misc/pyunit_citibike_consistency_check.py
|
cafef032ed9e8d75c8371753d943ce56af3ea688
|
[
"Apache-2.0"
] |
permissive
|
h2oai/h2o-3
|
919019a8f297eec676011a9cfd2cc2d97891ce14
|
d817ab90c8c47f6787604a0b9639b66234158228
|
refs/heads/master
| 2023-08-17T18:50:17.732191
| 2023-08-17T16:44:42
| 2023-08-17T16:44:42
| 17,371,412
| 6,872
| 2,345
|
Apache-2.0
| 2023-09-14T18:05:40
| 2014-03-03T16:08:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,521
|
py
|
pyunit_citibike_consistency_check.py
|
from builtins import zip
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
# Check to make sure the small and large citibike demos have not diverged
import os
def consistency_check():
try:
small = pyunit_utils.locate("h2o-py/demos/citi_bike_small.ipynb")
except ValueError:
small = pyunit_utils.locate("h2o-py/demos/citi_bike_small_NOPASS.ipynb")
try:
large = pyunit_utils.locate("h2o-py/demos/citi_bike_large.ipynb")
except ValueError:
large = pyunit_utils.locate("h2o-py/demos/citi_bike_large_NOPASS.ipynb")
results_dir = pyunit_utils.locate("results")
s = os.path.join(results_dir, os.path.basename(small).split('.')[0]+".py")
l = os.path.join(results_dir, os.path.basename(large).split('.')[0]+".py")
from tests import pydemo_utils
pydemo_utils.ipy_notebook_exec(small, save_and_norun = s)
pydemo_utils.ipy_notebook_exec(large, save_and_norun = l)
small_list = list(open(s, 'r'))
large_list = list(open(l, 'r'))
for s, l in zip(small_list, large_list):
if s != l:
assert s == "data = h2o.import_file(path=small_test)\n" and \
l != "data = h2o.import_file(path=large_test)\n", \
"This difference is not allowed between the small and large citibike demos.\nCitibike small: {0}" \
"Citibike large: {1}".format(s,l)
if __name__ == "__main__":
pyunit_utils.standalone_test(consistency_check)
else:
consistency_check()
|
f4a7aeaad31e5c74a3c7283f1d7d0acb0ed864f5
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/impl/gen/view_models/views/lobby/customization/progressive_items_view/item_level_info_model.py
|
3891b116eaaa0ef4ffc083c29f09fd866ee011f7
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,086
|
py
|
item_level_info_model.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/gen/view_models/views/lobby/customization/progressive_items_view/item_level_info_model.py
from frameworks.wulf import ViewModel
from gui.impl.gen.view_models.views.lobby.customization.progressive_items_view.progress_block import ProgressBlock
class ItemLevelInfoModel(ViewModel):
__slots__ = ()
def __init__(self, properties=8, commands=0):
super(ItemLevelInfoModel, self).__init__(properties=properties, commands=commands)
@property
def progressBlock(self):
return self._getViewModel(0)
@staticmethod
def getProgressBlockType():
return ProgressBlock
def getLevel(self):
return self._getNumber(1)
def setLevel(self, value):
self._setNumber(1, value)
def getLevelText(self):
return self._getString(2)
def setLevelText(self, value):
self._setString(2, value)
def getUnlocked(self):
return self._getBool(3)
def setUnlocked(self, value):
self._setBool(3, value)
def getInProgress(self):
return self._getBool(4)
def setInProgress(self, value):
self._setBool(4, value)
def getSelected(self):
return self._getBool(5)
def setSelected(self, value):
self._setBool(5, value)
def getIcon(self):
return self._getString(6)
def setIcon(self, value):
self._setString(6, value)
def getTooltipId(self):
return self._getString(7)
def setTooltipId(self, value):
self._setString(7, value)
def _initialize(self):
super(ItemLevelInfoModel, self)._initialize()
self._addViewModelProperty('progressBlock', ProgressBlock())
self._addNumberProperty('level', -1)
self._addStringProperty('levelText', '')
self._addBoolProperty('unlocked', False)
self._addBoolProperty('inProgress', False)
self._addBoolProperty('selected', False)
self._addStringProperty('icon', '')
self._addStringProperty('tooltipId', '')
|
71f33f00a988f190955b5a61c96e5fe8133fc353
|
c2c212ba42ebfa35f3b6122344978bc94ec8fa67
|
/tests/test_onehundredonecookbooks.py
|
f37b16c4bc485cdd16b997ed90b8b90ccc645ac2
|
[
"MIT"
] |
permissive
|
hhursev/recipe-scrapers
|
0cd6b7db4ef23ca825f2354f5d1ba76076a14813
|
8ced0227b3b16c532fc5ebf3060c99ee0452adab
|
refs/heads/main
| 2023-09-03T07:33:29.684121
| 2023-09-01T21:15:50
| 2023-09-01T21:15:50
| 42,446,168
| 1,276
| 443
|
MIT
| 2023-09-14T16:34:09
| 2015-09-14T12:05:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,390
|
py
|
test_onehundredonecookbooks.py
|
from recipe_scrapers.onehundredonecookbooks import OneHundredOneCookBooks
from tests import ScraperTest
class TestOneHundredOneCookBooksScraper(ScraperTest):
scraper_class = OneHundredOneCookBooks
def test_host(self):
self.assertEqual("101cookbooks.com", self.harvester_class.host())
def test_author(self):
self.assertEqual("Heidi Swanson", self.harvester_class.author())
def test_title(self):
self.assertEqual("Coconut Broccoli Soup", self.harvester_class.title())
def test_total_time(self):
self.assertEqual(20, self.harvester_class.total_time())
def test_yields(self):
self.assertEqual("8", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
"https://images.101cookbooks.com/coconut_broccoli_soup.jpg?w=680&auto=format",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertEqual(
[
"1 14-ounce can of full fat coconut milk",
"3 cloves garlic, smashed",
"1 large yellow onion, chopped",
"1 small serrano chile, stemmed and chopped",
"2 teaspoons fine grain sea salt",
"4 1/2 cups water",
"2-3 large heads of broccoli (~1 1/2 lb.), cut into small florets",
"2-3 large handfuls of spinach",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
self.assertEqual(
"Scoop a big spoonful of thick coconut cream from the top of the coconut milk can. Add it to a large pan over medium-high heat. When hot, stir in the garlic, onions, chile, and salt. Sauté for a couple minutes, just long enough for everything to soften up. Add the remaining coconut milk, and the water, and bring to a simmer before adding the broccoli and spinach. Simmer just long enough for the broccoli to get tender throughout, 2 - 4 minutes. Immediately remove the soup from heat and puree with an immersion blender. Add more water if you feel the need to thin the soup out. Taste and add more salt if needed.\nServe sprinkled with tofu cubes, toasted almonds, and lots of scallions.",
self.harvester_class.instructions(),
)
def test_ratings(self):
self.assertEqual(None, self.harvester_class.ratings())
|
7108edbc93d1426a13c7facaf75f9a45affdcbb7
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/wpt_tools/wpt/tools/wpt/android.py
|
366502cc6c2b63ca47ce01caef9b12146b2ee9f7
|
[
"BSD-3-Clause",
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 5,386
|
py
|
android.py
|
# mypy: allow-untyped-defs
import argparse
import os
import platform
import shutil
import subprocess
import requests
from .wpt import venv_dir
android_device = None
here = os.path.abspath(os.path.dirname(__file__))
wpt_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir))
def do_delayed_imports():
global android_device
from mozrunner.devices import android_device
android_device.TOOLTOOL_PATH = os.path.join(os.path.dirname(__file__),
os.pardir,
"third_party",
"tooltool",
"tooltool.py")
def get_parser_install():
parser = argparse.ArgumentParser()
parser.add_argument("--reinstall", action="store_true", default=False,
help="Force reinstall even if the emulator already exists")
return parser
def get_parser_start():
return get_parser_install()
def get_sdk_path(dest):
if dest is None:
# os.getcwd() doesn't include the venv path
dest = os.path.join(wpt_root, venv_dir())
dest = os.path.join(dest, 'android-sdk')
return os.path.abspath(os.environ.get('ANDROID_SDK_PATH', dest))
def uninstall_sdk(dest=None):
path = get_sdk_path(dest)
if os.path.exists(path) and os.path.isdir(path):
shutil.rmtree(path)
def install_sdk(logger, dest=None):
sdk_path = get_sdk_path(dest)
if os.path.isdir(sdk_path):
logger.info("Using SDK installed at %s" % sdk_path)
return sdk_path, False
if not os.path.exists(sdk_path):
os.makedirs(sdk_path)
os_name = platform.system().lower()
if os_name not in ["darwin", "linux", "windows"]:
logger.critical("Unsupported platform %s" % os_name)
raise NotImplementedError
os_name = 'darwin' if os_name == 'macosx' else os_name
# TODO: either always use the latest version or have some way to
# configure a per-product version if there are strong requirements
# to use a specific version.
url = f'https://dl.google.com/android/repository/sdk-tools-{os_name}-4333796.zip'
logger.info("Getting SDK from %s" % url)
temp_path = os.path.join(sdk_path, url.rsplit("/", 1)[1])
try:
with open(temp_path, "wb") as f:
with requests.get(url, stream=True) as resp:
shutil.copyfileobj(resp.raw, f)
# Python's zipfile module doesn't seem to work here
subprocess.check_call(["unzip", temp_path], cwd=sdk_path)
finally:
os.unlink(temp_path)
return sdk_path, True
def install_android_packages(logger, sdk_path, no_prompt=False):
sdk_manager_path = os.path.join(sdk_path, "tools", "bin", "sdkmanager")
if not os.path.exists(sdk_manager_path):
raise OSError("Can't find sdkmanager at %s" % sdk_manager_path)
packages = ["platform-tools",
"build-tools;33.0.1",
"platforms;android-33",
"emulator"]
# TODO: make this work non-internactively
logger.info("Installing SDK packages")
cmd = [sdk_manager_path] + packages
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE)
if no_prompt:
data = "Y\n" * 100 if no_prompt else None
proc.communicate(data)
else:
proc.wait()
if proc.returncode != 0:
raise subprocess.CalledProcessError(proc.returncode, cmd)
def get_emulator(sdk_path, device_serial=None):
if android_device is None:
do_delayed_imports()
if "ANDROID_SDK_ROOT" not in os.environ:
os.environ["ANDROID_SDK_ROOT"] = sdk_path
substs = {"top_srcdir": wpt_root, "TARGET_CPU": "x86"}
emulator = android_device.AndroidEmulator("*", substs=substs, device_serial=device_serial)
emulator.emulator_path = os.path.join(sdk_path, "emulator", "emulator")
return emulator
def install(logger, reinstall=False, no_prompt=False, device_serial=None):
if reinstall:
uninstall_sdk()
dest, new_install = install_sdk(logger)
if new_install:
install_android_packages(logger, dest, no_prompt)
if "ANDROID_SDK_ROOT" not in os.environ:
os.environ["ANDROID_SDK_ROOT"] = dest
emulator = get_emulator(dest, device_serial=device_serial)
return emulator
def start(logger, emulator=None, reinstall=False, device_serial=None):
if reinstall:
install(reinstall=True)
sdk_path = get_sdk_path(None)
if emulator is None:
emulator = get_emulator(sdk_path, device_serial=device_serial)
if not emulator.check_avd():
logger.critical("Android AVD not found, please run |mach bootstrap|")
raise NotImplementedError
emulator.start()
emulator.wait_for_start()
return emulator
def run_install(venv, **kwargs):
try:
import logging
logging.basicConfig()
logger = logging.getLogger()
install(logger, **kwargs)
except Exception:
import traceback
traceback.print_exc()
import pdb
pdb.post_mortem()
def run_start(venv, **kwargs):
try:
import logging
logging.basicConfig()
logger = logging.getLogger()
start(logger, **kwargs)
except Exception:
import traceback
traceback.print_exc()
import pdb
pdb.post_mortem()
|
7d708c536b35f528587dbfe9b701be918cc148a2
|
dcc7dd6c65cb13d3619689b2c794b450e503b100
|
/src/poetry/vcs/git/backend.py
|
fcafc430605d76f26d02a496077ea27070fdcd8a
|
[
"MIT"
] |
permissive
|
python-poetry/poetry
|
6b83f8db6a15b132fd252b68ed3bbee51b4e64f0
|
02448cf7f184dea204156f7dcb620a4f01a0068e
|
refs/heads/master
| 2023-09-04T12:23:02.700442
| 2023-09-02T10:46:06
| 2023-09-02T10:46:06
| 123,303,402
| 20,127
| 2,081
|
MIT
| 2023-09-12T09:41:09
| 2018-02-28T15:23:47
|
Python
|
UTF-8
|
Python
| false
| false
| 15,939
|
py
|
backend.py
|
from __future__ import annotations
import dataclasses
import logging
import re
from pathlib import Path
from subprocess import CalledProcessError
from typing import TYPE_CHECKING
from urllib.parse import urljoin
from dulwich import porcelain
from dulwich.client import HTTPUnauthorized
from dulwich.client import get_transport_and_path
from dulwich.config import ConfigFile
from dulwich.config import parse_submodules
from dulwich.errors import NotGitRepository
from dulwich.refs import ANNOTATED_TAG_SUFFIX
from dulwich.repo import Repo
from poetry.console.exceptions import PoetryConsoleError
from poetry.utils.authenticator import get_default_authenticator
from poetry.utils.helpers import remove_directory
if TYPE_CHECKING:
from dulwich.client import FetchPackResult
from dulwich.client import GitClient
logger = logging.getLogger(__name__)
def is_revision_sha(revision: str | None) -> bool:
return re.match(r"^\b[0-9a-f]{5,40}\b$", revision or "") is not None
def annotated_tag(ref: str | bytes) -> bytes:
if isinstance(ref, str):
ref = ref.encode("utf-8")
return ref + ANNOTATED_TAG_SUFFIX
@dataclasses.dataclass
class GitRefSpec:
branch: str | None = None
revision: str | None = None
tag: str | None = None
ref: bytes = dataclasses.field(default_factory=lambda: b"HEAD")
def resolve(self, remote_refs: FetchPackResult) -> None:
"""
Resolve the ref using the provided remote refs.
"""
self._normalise(remote_refs=remote_refs)
self._set_head(remote_refs=remote_refs)
def _normalise(self, remote_refs: FetchPackResult) -> None:
"""
Internal helper method to determine if given revision is
1. a branch or tag; if so, set corresponding properties.
2. a short sha; if so, resolve full sha and set as revision
"""
if self.revision:
ref = f"refs/tags/{self.revision}".encode()
if ref in remote_refs.refs or annotated_tag(ref) in remote_refs.refs:
# this is a tag, incorrectly specified as a revision, tags take priority
self.tag = self.revision
self.revision = None
elif (
self.revision.encode("utf-8") in remote_refs.refs
or f"refs/heads/{self.revision}".encode() in remote_refs.refs
):
# this is most likely a ref spec or a branch incorrectly specified
self.branch = self.revision
self.revision = None
elif (
self.branch
and f"refs/heads/{self.branch}".encode() not in remote_refs.refs
and (
f"refs/tags/{self.branch}".encode() in remote_refs.refs
or annotated_tag(f"refs/tags/{self.branch}") in remote_refs.refs
)
):
# this is a tag incorrectly specified as a branch
self.tag = self.branch
self.branch = None
if self.revision and self.is_sha_short:
# revision is a short sha, resolve to full sha
short_sha = self.revision.encode("utf-8")
for sha in remote_refs.refs.values():
if sha.startswith(short_sha):
self.revision = sha.decode("utf-8")
break
def _set_head(self, remote_refs: FetchPackResult) -> None:
"""
Internal helper method to populate ref and set it's sha as the remote's head
and default ref.
"""
self.ref = remote_refs.symrefs[b"HEAD"]
if self.revision:
head = self.revision.encode("utf-8")
else:
if self.tag:
ref = f"refs/tags/{self.tag}".encode()
annotated = annotated_tag(ref)
self.ref = annotated if annotated in remote_refs.refs else ref
elif self.branch:
self.ref = (
self.branch.encode("utf-8")
if self.is_ref
else f"refs/heads/{self.branch}".encode()
)
head = remote_refs.refs[self.ref]
remote_refs.refs[self.ref] = remote_refs.refs[b"HEAD"] = head
@property
def key(self) -> str:
return self.revision or self.branch or self.tag or self.ref.decode("utf-8")
@property
def is_sha(self) -> bool:
return is_revision_sha(revision=self.revision)
@property
def is_ref(self) -> bool:
return self.branch is not None and self.branch.startswith("refs/")
@property
def is_sha_short(self) -> bool:
return self.revision is not None and self.is_sha and len(self.revision) < 40
@dataclasses.dataclass
class GitRepoLocalInfo:
repo: dataclasses.InitVar[Repo | Path]
origin: str = dataclasses.field(init=False)
revision: str = dataclasses.field(init=False)
def __post_init__(self, repo: Repo | Path) -> None:
repo = Git.as_repo(repo=repo) if not isinstance(repo, Repo) else repo
self.origin = Git.get_remote_url(repo=repo, remote="origin")
self.revision = Git.get_revision(repo=repo)
class Git:
@staticmethod
def as_repo(repo: Path) -> Repo:
return Repo(str(repo))
@staticmethod
def get_remote_url(repo: Repo, remote: str = "origin") -> str:
with repo:
config = repo.get_config()
section = (b"remote", remote.encode("utf-8"))
url = ""
if config.has_section(section):
value = config.get(section, b"url")
url = value.decode("utf-8")
return url
@staticmethod
def get_revision(repo: Repo) -> str:
with repo:
return repo.head().decode("utf-8")
@classmethod
def info(cls, repo: Repo | Path) -> GitRepoLocalInfo:
return GitRepoLocalInfo(repo=repo)
@staticmethod
def get_name_from_source_url(url: str) -> str:
return re.sub(r"(.git)?$", "", url.rsplit("/", 1)[-1])
@classmethod
def _fetch_remote_refs(cls, url: str, local: Repo) -> FetchPackResult:
"""
Helper method to fetch remote refs.
"""
client: GitClient
path: str
kwargs: dict[str, str] = {}
credentials = get_default_authenticator().get_credentials_for_git_url(url=url)
if credentials.password and credentials.username:
# we do this conditionally as otherwise, dulwich might complain if these
# parameters are passed in for an ssh url
kwargs["username"] = credentials.username
kwargs["password"] = credentials.password
config = local.get_config_stack()
client, path = get_transport_and_path(url, config=config, **kwargs)
with local:
result: FetchPackResult = client.fetch(
path,
local,
determine_wants=local.object_store.determine_wants_all,
)
return result
@staticmethod
def _clone_legacy(url: str, refspec: GitRefSpec, target: Path) -> Repo:
"""
Helper method to facilitate fallback to using system provided git client via
subprocess calls.
"""
from poetry.vcs.git.system import SystemGit
logger.debug("Cloning '%s' using system git client", url)
if target.exists():
remove_directory(path=target, force=True)
revision = refspec.tag or refspec.branch or refspec.revision or "HEAD"
try:
SystemGit.clone(url, target)
except CalledProcessError:
raise PoetryConsoleError(
f"Failed to clone {url}, check your git configuration and permissions"
" for this repository."
)
if revision:
revision.replace("refs/head/", "")
revision.replace("refs/tags/", "")
try:
SystemGit.checkout(revision, target)
except CalledProcessError:
raise PoetryConsoleError(f"Failed to checkout {url} at '{revision}'")
repo = Repo(str(target))
return repo
@classmethod
def _clone(cls, url: str, refspec: GitRefSpec, target: Path) -> Repo:
"""
Helper method to clone a remove repository at the given `url` at the specified
ref spec.
"""
local: Repo
if not target.exists():
local = Repo.init(str(target), mkdir=True)
porcelain.remote_add(local, "origin", url)
else:
local = Repo(str(target))
remote_refs = cls._fetch_remote_refs(url=url, local=local)
logger.debug(
"Cloning <c2>%s</> at '<c2>%s</>' to <c1>%s</>", url, refspec.key, target
)
try:
refspec.resolve(remote_refs=remote_refs)
except KeyError: # branch / ref does not exist
raise PoetryConsoleError(
f"Failed to clone {url} at '{refspec.key}', verify ref exists on"
" remote."
)
# ensure local HEAD matches remote
local.refs[b"HEAD"] = remote_refs.refs[b"HEAD"]
if refspec.is_ref:
# set ref to current HEAD
local.refs[refspec.ref] = local.refs[b"HEAD"]
for base, prefix in {
(b"refs/remotes/origin", b"refs/heads/"),
(b"refs/tags", b"refs/tags"),
}:
local.refs.import_refs(
base=base,
other={
n[len(prefix) :]: v
for (n, v) in remote_refs.refs.items()
if n.startswith(prefix) and not n.endswith(ANNOTATED_TAG_SUFFIX)
},
)
try:
with local:
local.reset_index()
except (AssertionError, KeyError) as e:
# this implies the ref we need does not exist or is invalid
if isinstance(e, KeyError):
# the local copy is at a bad state, lets remove it
logger.debug(
"Removing local clone (<c1>%s</>) of repository as it is in a"
" broken state.",
local.path,
)
remove_directory(Path(local.path), force=True)
if isinstance(e, AssertionError) and "Invalid object name" not in str(e):
raise
logger.debug(
"\nRequested ref (<c2>%s</c2>) was not fetched to local copy and"
" cannot be used. The following error was"
" raised:\n\n\t<warning>%s</>",
refspec.key,
e,
)
raise PoetryConsoleError(
f"Failed to clone {url} at '{refspec.key}', verify ref exists on"
" remote."
)
return local
@classmethod
def _clone_submodules(cls, repo: Repo) -> None:
"""
Helper method to identify configured submodules and clone them recursively.
"""
repo_root = Path(repo.path)
modules_config = repo_root.joinpath(".gitmodules")
# A relative URL by definition starts with ../ or ./
relative_submodule_regex = re.compile(r"^\.{1,2}/")
if modules_config.exists():
config = ConfigFile.from_path(str(modules_config))
url: bytes
path: bytes
submodules = parse_submodules(config)
for path, url, name in submodules:
path_relative = Path(path.decode("utf-8"))
path_absolute = repo_root.joinpath(path_relative)
url_string = url.decode("utf-8")
if relative_submodule_regex.search(url_string):
url_string = urljoin(f"{Git.get_remote_url(repo)}/", url_string)
source_root = path_absolute.parent
source_root.mkdir(parents=True, exist_ok=True)
with repo:
try:
revision = repo.open_index()[path].sha.decode("utf-8")
except KeyError:
logger.debug(
"Skip submodule %s in %s, path %s not found",
name,
repo.path,
path,
)
continue
cls.clone(
url=url_string,
source_root=source_root,
name=path_relative.name,
revision=revision,
clean=path_absolute.exists()
and not path_absolute.joinpath(".git").is_dir(),
)
@staticmethod
def is_using_legacy_client() -> bool:
from poetry.config.config import Config
legacy_client: bool = Config.create().get(
"experimental.system-git-client", False
)
return legacy_client
@staticmethod
def get_default_source_root() -> Path:
from poetry.config.config import Config
return Path(Config.create().get("cache-dir")) / "src"
@classmethod
def clone(
cls,
url: str,
name: str | None = None,
branch: str | None = None,
tag: str | None = None,
revision: str | None = None,
source_root: Path | None = None,
clean: bool = False,
) -> Repo:
source_root = source_root or cls.get_default_source_root()
source_root.mkdir(parents=True, exist_ok=True)
name = name or cls.get_name_from_source_url(url=url)
target = source_root / name
refspec = GitRefSpec(branch=branch, revision=revision, tag=tag)
if target.exists():
if clean:
# force clean the local copy if it exists, do not reuse
remove_directory(target, force=True)
else:
# check if the current local copy matches the requested ref spec
try:
current_repo = Repo(str(target))
with current_repo:
current_sha = current_repo.head().decode("utf-8")
except (NotGitRepository, AssertionError, KeyError):
# something is wrong with the current checkout, clean it
remove_directory(target, force=True)
else:
if not is_revision_sha(revision=current_sha):
# head is not a sha, this will cause issues later, lets reset
remove_directory(target, force=True)
elif (
refspec.is_sha
and refspec.revision is not None
and current_sha.startswith(refspec.revision)
):
# if revision is used short-circuit remote fetch head matches
return current_repo
try:
if not cls.is_using_legacy_client():
local = cls._clone(url=url, refspec=refspec, target=target)
cls._clone_submodules(repo=local)
return local
except HTTPUnauthorized:
# we do this here to handle http authenticated repositories as dulwich
# does not currently support using credentials from git-credential helpers.
# upstream issue: https://github.com/jelmer/dulwich/issues/873
#
# this is a little inefficient, however preferred as this is transparent
# without additional configuration or changes for existing projects that
# use http basic auth credentials.
logger.debug(
"Unable to fetch from private repository '%s', falling back to"
" system git",
url,
)
# fallback to legacy git client
return cls._clone_legacy(url=url, refspec=refspec, target=target)
|
3562c60602c45a713e7aae07310fab6cc39cec20
|
3b87eaa7f1b8290d1a74ac2bec9573f81aab831d
|
/python/python/examples/class_or_func_def_stmt.py
|
9d066a3b46b73cac29b706d91e009bc63818d85a
|
[
"MIT"
] |
permissive
|
antlr/grammars-v4
|
1f6ba461f9fb2c8f04335ca495249ab6eab8e0ae
|
98c2bc3b68eff9ad4b809d21a6c9d858c5b9ddfa
|
refs/heads/master
| 2023-08-16T13:37:23.691676
| 2023-08-13T15:20:52
| 2023-08-13T15:20:52
| 5,958,314
| 9,255
| 4,577
|
MIT
| 2023-09-13T21:17:22
| 2012-09-25T23:45:11
|
ANTLR
|
UTF-8
|
Python
| false
| false
| 205
|
py
|
class_or_func_def_stmt.py
|
# class_or_func_def_stmt: decorator+ (classdef | funcdef);
# decorator classdef
@decorator
class foo:
pass
# decorator decorator funcdef
@accepts(int,int)
@returns(float)
def bar(low,high):
pass
|
afffec838ee574de9f737ceb24271fc6b00ffe8e
|
c46754b9600a12df4f9d7a6320dfc19aa96b1e1d
|
/examples/research_projects/movement-pruning/emmental/modules/binarizer.py
|
b4a801d56d9de27da30d82d4c2f7b16f40a13ccd
|
[
"Apache-2.0"
] |
permissive
|
huggingface/transformers
|
ccd52a0d7c59e5f13205f32fd96f55743ebc8814
|
4fa0aff21ee083d0197a898cdf17ff476fae2ac3
|
refs/heads/main
| 2023-09-05T19:47:38.981127
| 2023-09-05T19:21:33
| 2023-09-05T19:21:33
| 155,220,641
| 102,193
| 22,284
|
Apache-2.0
| 2023-09-14T20:44:49
| 2018-10-29T13:56:00
|
Python
|
UTF-8
|
Python
| false
| false
| 5,822
|
py
|
binarizer.py
|
# coding=utf-8
# Copyright 2020-present, AllenAI Authors, University of Illinois Urbana-Champaign,
# Intel Nervana Systems and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Binarizers take a (real value) matrix as input and produce a binary (values in {0,1}) mask of the same shape.
"""
import torch
from torch import autograd
class ThresholdBinarizer(autograd.Function):
"""
Thresholdd binarizer.
Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j} > \tau`
where `\tau` is a real value threshold.
Implementation is inspired from:
https://github.com/arunmallya/piggyback
Piggyback: Adapting a Single Network to Multiple Tasks by Learning to Mask Weights
Arun Mallya, Dillon Davis, Svetlana Lazebnik
"""
@staticmethod
def forward(ctx, inputs: torch.tensor, threshold: float, sigmoid: bool):
"""
Args:
inputs (`torch.FloatTensor`)
The input matrix from which the binarizer computes the binary mask.
threshold (`float`)
The threshold value (in R).
sigmoid (`bool`)
If set to ``True``, we apply the sigmoid function to the `inputs` matrix before comparing to `threshold`.
In this case, `threshold` should be a value between 0 and 1.
Returns:
mask (`torch.FloatTensor`)
Binary matrix of the same size as `inputs` acting as a mask (1 - the associated weight is
retained, 0 - the associated weight is pruned).
"""
nb_elems = inputs.numel()
nb_min = int(0.005 * nb_elems) + 1
if sigmoid:
mask = (torch.sigmoid(inputs) > threshold).type(inputs.type())
else:
mask = (inputs > threshold).type(inputs.type())
if mask.sum() < nb_min:
# We limit the pruning so that at least 0.5% (half a percent) of the weights are remaining
k_threshold = inputs.flatten().kthvalue(max(nb_elems - nb_min, 1)).values
mask = (inputs > k_threshold).type(inputs.type())
return mask
@staticmethod
def backward(ctx, gradOutput):
return gradOutput, None, None
class TopKBinarizer(autograd.Function):
"""
Top-k Binarizer.
Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j}`
is among the k% highest values of S.
Implementation is inspired from:
https://github.com/allenai/hidden-networks
What's hidden in a randomly weighted neural network?
Vivek Ramanujan*, Mitchell Wortsman*, Aniruddha Kembhavi, Ali Farhadi, Mohammad Rastegari
"""
@staticmethod
def forward(ctx, inputs: torch.tensor, threshold: float):
"""
Args:
inputs (`torch.FloatTensor`)
The input matrix from which the binarizer computes the binary mask.
threshold (`float`)
The percentage of weights to keep (the rest is pruned).
`threshold` is a float between 0 and 1.
Returns:
mask (`torch.FloatTensor`)
Binary matrix of the same size as `inputs` acting as a mask (1 - the associated weight is
retained, 0 - the associated weight is pruned).
"""
# Get the subnetwork by sorting the inputs and using the top threshold %
mask = inputs.clone()
_, idx = inputs.flatten().sort(descending=True)
j = int(threshold * inputs.numel())
# flat_out and mask access the same memory.
flat_out = mask.flatten()
flat_out[idx[j:]] = 0
flat_out[idx[:j]] = 1
return mask
@staticmethod
def backward(ctx, gradOutput):
return gradOutput, None
class MagnitudeBinarizer(object):
"""
Magnitude Binarizer.
Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j}`
is among the k% highest values of |S| (absolute value).
Implementation is inspired from https://github.com/NervanaSystems/distiller/blob/2291fdcc2ea642a98d4e20629acb5a9e2e04b4e6/distiller/pruning/automated_gradual_pruner.py#L24
"""
@staticmethod
def apply(inputs: torch.tensor, threshold: float):
"""
Args:
inputs (`torch.FloatTensor`)
The input matrix from which the binarizer computes the binary mask.
This input marix is typically the weight matrix.
threshold (`float`)
The percentage of weights to keep (the rest is pruned).
`threshold` is a float between 0 and 1.
Returns:
mask (`torch.FloatTensor`)
Binary matrix of the same size as `inputs` acting as a mask (1 - the associated weight is
retained, 0 - the associated weight is pruned).
"""
# Get the subnetwork by sorting the inputs and using the top threshold %
mask = inputs.clone()
_, idx = inputs.abs().flatten().sort(descending=True)
j = int(threshold * inputs.numel())
# flat_out and mask access the same memory.
flat_out = mask.flatten()
flat_out[idx[j:]] = 0
flat_out[idx[:j]] = 1
return mask
|
a8fbc5826339abb319d4a75e70ef95e07f9ceca9
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/doc/source/serve/doc_code/tutorial_pytorch.py
|
66bd2fb6314f899e8074b47c7693d05255b43d55
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,678
|
py
|
tutorial_pytorch.py
|
# fmt: off
# __doc_import_begin__
from ray import serve
from io import BytesIO
from PIL import Image
from starlette.requests import Request
from typing import Dict
import torch
from torchvision import transforms
from torchvision.models import resnet18
# __doc_import_end__
# fmt: on
# __doc_define_servable_begin__
@serve.deployment
class ImageModel:
def __init__(self):
self.model = resnet18(pretrained=True).eval()
self.preprocessor = transforms.Compose(
[
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Lambda(lambda t: t[:3, ...]), # remove alpha channel
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
async def __call__(self, starlette_request: Request) -> Dict:
image_payload_bytes = await starlette_request.body()
pil_image = Image.open(BytesIO(image_payload_bytes))
print("[1/3] Parsed image data: {}".format(pil_image))
pil_images = [pil_image] # Our current batch size is one
input_tensor = torch.cat(
[self.preprocessor(i).unsqueeze(0) for i in pil_images]
)
print("[2/3] Images transformed, tensor shape {}".format(input_tensor.shape))
with torch.no_grad():
output_tensor = self.model(input_tensor)
print("[3/3] Inference done!")
return {"class_index": int(torch.argmax(output_tensor[0]))}
# __doc_define_servable_end__
# __doc_deploy_begin__
image_model = ImageModel.bind()
# __doc_deploy_end__
|
5ad1e99e46420e075e09d53315b5efedc96c923d
|
e34810541899182d3a0835e02fa68389af63a805
|
/pypsa/io.py
|
8ed64623502f0c6804ebe37a66a8af2d4e109aa5
|
[
"MIT"
] |
permissive
|
PyPSA/PyPSA
|
483216289643ca496d66d316a22e000afa15706c
|
38b710c73950d05164e7d6c9dd786065ee7cde44
|
refs/heads/master
| 2023-08-19T20:55:17.329666
| 2023-08-17T10:40:50
| 2023-08-17T10:40:50
| 49,414,256
| 891
| 399
|
MIT
| 2023-09-14T14:09:38
| 2016-01-11T09:04:18
|
Python
|
UTF-8
|
Python
| false
| false
| 47,437
|
py
|
io.py
|
# -*- coding: utf-8 -*-
"""
Functions for importing and exporting data.
"""
__author__ = (
"PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html"
)
__copyright__ = (
"Copyright 2015-2023 PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html, "
"MIT License"
)
import logging
logger = logging.getLogger(__name__)
import json
import math
import os
from glob import glob
from pathlib import Path
from urllib.request import urlretrieve
import numpy as np
import pandas as pd
import validators
from pypsa.descriptors import update_linkports_component_attrs
try:
import xarray as xr
has_xarray = True
except ImportError:
has_xarray = False
# for the writable data directory follow the XDG guidelines
# https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
_writable_dir = os.path.join(os.path.expanduser("~"), ".local", "share")
_data_dir = os.path.join(
os.environ.get("XDG_DATA_HOME", os.environ.get("APPDATA", _writable_dir)),
"pypsa-networks",
)
_data_dir = Path(_data_dir)
try:
_data_dir.mkdir(exist_ok=True)
except FileNotFoundError:
os.makedirs(_data_dir)
def _retrieve_from_url(path):
local_path = _data_dir / os.path.basename(path)
logger.info(f"Retrieving network data from {path}")
urlretrieve(path, local_path)
return str(local_path)
class ImpExper(object):
ds = None
def __enter__(self):
if self.ds is not None:
self.ds = self.ds.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.finish()
if self.ds is not None:
self.ds.__exit__(exc_type, exc_val, exc_tb)
def finish(self):
pass
class Exporter(ImpExper):
def remove_static(self, list_name):
pass
def remove_series(self, list_name, attr):
pass
class Importer(ImpExper):
pass
class ImporterCSV(Importer):
def __init__(self, csv_folder_name, encoding):
self.csv_folder_name = csv_folder_name
self.encoding = encoding
assert os.path.isdir(csv_folder_name), "Directory {} does not exist.".format(
csv_folder_name
)
def get_attributes(self):
fn = os.path.join(self.csv_folder_name, "network.csv")
if not os.path.isfile(fn):
return None
return dict(pd.read_csv(fn, encoding=self.encoding).iloc[0])
def get_meta(self):
fn = os.path.join(self.csv_folder_name, "meta.json")
if not os.path.isfile(fn):
return {}
return json.loads(open(fn).read())
def get_snapshots(self):
fn = os.path.join(self.csv_folder_name, "snapshots.csv")
if not os.path.isfile(fn):
return None
df = pd.read_csv(fn, index_col=0, encoding=self.encoding, parse_dates=True)
# backwards-compatibility: level "snapshot" was rename to "timestep"
if "snapshot" in df:
df["snapshot"] = pd.to_datetime(df.snapshot)
if "timestep" in df:
df["timestep"] = pd.to_datetime(df.timestep)
return df
def get_investment_periods(self):
fn = os.path.join(self.csv_folder_name, "investment_periods.csv")
if not os.path.isfile(fn):
return None
return pd.read_csv(fn, index_col=0, encoding=self.encoding)
def get_static(self, list_name):
fn = os.path.join(self.csv_folder_name, list_name + ".csv")
return (
pd.read_csv(fn, index_col=0, encoding=self.encoding)
if os.path.isfile(fn)
else None
)
def get_series(self, list_name):
for fn in os.listdir(self.csv_folder_name):
if fn.startswith(list_name + "-") and fn.endswith(".csv"):
attr = fn[len(list_name) + 1 : -4]
df = pd.read_csv(
os.path.join(self.csv_folder_name, fn),
index_col=0,
encoding=self.encoding,
parse_dates=True,
)
yield attr, df
class ExporterCSV(Exporter):
def __init__(self, csv_folder_name, encoding):
self.csv_folder_name = csv_folder_name
self.encoding = encoding
# make sure directory exists
if not os.path.isdir(csv_folder_name):
logger.warning(
"Directory {} does not exist, creating it".format(csv_folder_name)
)
os.mkdir(csv_folder_name)
def save_attributes(self, attrs):
name = attrs.pop("name")
df = pd.DataFrame(attrs, index=pd.Index([name], name="name"))
fn = os.path.join(self.csv_folder_name, "network.csv")
df.to_csv(fn, encoding=self.encoding)
def save_meta(self, meta):
fn = os.path.join(self.csv_folder_name, "meta.json")
open(fn, "w").write(json.dumps(meta))
def save_snapshots(self, snapshots):
fn = os.path.join(self.csv_folder_name, "snapshots.csv")
snapshots.to_csv(fn, encoding=self.encoding)
def save_investment_periods(self, investment_periods):
fn = os.path.join(self.csv_folder_name, "investment_periods.csv")
investment_periods.to_csv(fn, encoding=self.encoding)
def save_static(self, list_name, df):
fn = os.path.join(self.csv_folder_name, list_name + ".csv")
df.to_csv(fn, encoding=self.encoding)
def save_series(self, list_name, attr, df):
fn = os.path.join(self.csv_folder_name, list_name + "-" + attr + ".csv")
df.to_csv(fn, encoding=self.encoding)
def remove_static(self, list_name):
fns = glob(os.path.join(self.csv_folder_name, list_name) + "*.csv")
if fns:
for fn in fns:
os.unlink(fn)
logger.warning("Stale csv file(s) {} removed".format(", ".join(fns)))
def remove_series(self, list_name, attr):
fn = os.path.join(self.csv_folder_name, list_name + "-" + attr + ".csv")
if os.path.exists(fn):
os.unlink(fn)
class ImporterHDF5(Importer):
def __init__(self, path):
self.path = path
if isinstance(path, (str, Path)):
if validators.url(str(path)):
path = _retrieve_from_url(path)
self.ds = pd.HDFStore(path, mode="r")
self.index = {}
def get_attributes(self):
return dict(self.ds["/network"].reset_index().iloc[0])
def get_meta(self):
return json.loads(self.ds["/meta"][0] if "/meta" in self.ds else "{}")
def get_snapshots(self):
return self.ds["/snapshots"] if "/snapshots" in self.ds else None
def get_investment_periods(self):
return (
self.ds["/investment_periods"] if "/investment_periods" in self.ds else None
)
def get_static(self, list_name):
if "/" + list_name not in self.ds:
return None
if self.pypsa_version is None or self.pypsa_version < [0, 13, 1]:
df = self.ds["/" + list_name]
else:
df = self.ds["/" + list_name].set_index("name")
self.index[list_name] = df.index
return df
def get_series(self, list_name):
for tab in self.ds:
if tab.startswith("/" + list_name + "_t/"):
attr = tab[len("/" + list_name + "_t/") :]
df = self.ds[tab]
df.columns = self.index[list_name][df.columns]
yield attr, df
class ExporterHDF5(Exporter):
def __init__(self, path, **kwargs):
self.ds = pd.HDFStore(path, mode="w", **kwargs)
self.index = {}
def save_attributes(self, attrs):
name = attrs.pop("name")
self.ds.put(
"/network",
pd.DataFrame(attrs, index=pd.Index([name], name="name")),
format="table",
index=False,
)
def save_meta(self, meta):
self.ds.put("/meta", pd.Series(json.dumps(meta)))
def save_snapshots(self, snapshots):
self.ds.put("/snapshots", snapshots, format="table", index=False)
def save_investment_periods(self, investment_periods):
self.ds.put(
"/investment_periods",
investment_periods,
format="table",
index=False,
)
def save_static(self, list_name, df):
df = df.rename_axis(index="name")
self.index[list_name] = df.index
df = df.reset_index()
self.ds.put("/" + list_name, df, format="table", index=False)
def save_series(self, list_name, attr, df):
df = df.set_axis(self.index[list_name].get_indexer(df.columns), axis="columns")
self.ds.put("/" + list_name + "_t/" + attr, df, format="table", index=False)
if has_xarray:
class ImporterNetCDF(Importer):
def __init__(self, path):
self.path = path
if isinstance(path, (str, Path)):
if validators.url(str(path)):
path = _retrieve_from_url(path)
self.ds = xr.open_dataset(path)
else:
self.ds = path
def __enter__(self):
if isinstance(self.path, (str, Path)):
super(ImporterNetCDF, self).__init__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if isinstance(self.path, (str, Path)):
super(ImporterNetCDF, self).__exit__(exc_type, exc_val, exc_tb)
def get_attributes(self):
return {
attr[len("network_") :]: val
for attr, val in self.ds.attrs.items()
if attr.startswith("network_")
}
def get_meta(self):
return json.loads(self.ds.attrs.get("meta", "{}"))
def get_snapshots(self):
return self.get_static("snapshots", "snapshots")
def get_investment_periods(self):
return self.get_static("investment_periods", "investment_periods")
def get_static(self, list_name, index_name=None):
t = list_name + "_"
i = len(t)
if index_name is None:
index_name = list_name + "_i"
if index_name not in self.ds.coords:
return None
index = self.ds.coords[index_name].to_index().rename("name")
df = pd.DataFrame(index=index)
for attr in self.ds.data_vars.keys():
if attr.startswith(t) and attr[i : i + 2] != "t_":
df[attr[i:]] = self.ds[attr].to_pandas()
return df
def get_series(self, list_name):
t = list_name + "_t_"
for attr in self.ds.data_vars.keys():
if attr.startswith(t):
df = self.ds[attr].to_pandas()
df.index.name = "name"
df.columns.name = "name"
yield attr[len(t) :], df
class ExporterNetCDF(Exporter):
def __init__(
self, path, compression={"zlib": True, "complevel": 4}, float32=False
):
self.path = path
self.compression = compression
self.float32 = float32
self.ds = xr.Dataset()
def save_attributes(self, attrs):
self.ds.attrs.update(
("network_" + attr, val) for attr, val in attrs.items()
)
def save_meta(self, meta):
self.ds.attrs["meta"] = json.dumps(meta)
def save_snapshots(self, snapshots):
snapshots = snapshots.rename_axis(index="snapshots")
for attr in snapshots.columns:
self.ds["snapshots_" + attr] = snapshots[attr]
def save_investment_periods(self, investment_periods):
investment_periods = investment_periods.rename_axis(
index="investment_periods"
)
for attr in investment_periods.columns:
self.ds["investment_periods_" + attr] = investment_periods[attr]
def save_static(self, list_name, df):
df = df.rename_axis(index=list_name + "_i")
self.ds[list_name + "_i"] = df.index
for attr in df.columns:
self.ds[list_name + "_" + attr] = df[attr]
def save_series(self, list_name, attr, df):
df = df.rename_axis(
index="snapshots", columns=list_name + "_t_" + attr + "_i"
)
self.ds[list_name + "_t_" + attr] = df
def set_compression_encoding(self):
logger.debug(f"Setting compression encodings: {self.compression}")
for v in self.ds.data_vars:
if self.ds[v].dtype.kind not in ["U", "O"]:
self.ds[v].encoding.update(self.compression)
def typecast_float32(self):
logger.debug(f"Typecasting float64 to float32.")
for v in self.ds.data_vars:
if self.ds[v].dtype == np.float64:
self.ds[v] = self.ds[v].astype(np.float32)
def finish(self):
if self.float32:
self.typecast_float32()
if self.compression:
self.set_compression_encoding()
if self.path is not None:
self.ds.to_netcdf(self.path)
def _export_to_exporter(network, exporter, basename, export_standard_types=False):
"""
Export to exporter.
Both static and series attributes of components are exported, but only
if they have non-default values.
Parameters
----------
exporter : Exporter
Initialized exporter instance
basename : str
Basename, used for logging
export_standard_types : boolean, default False
If True, then standard types are exported too (upon reimporting you
should then set "ignore_standard_types" when initialising the netowrk).
"""
# exportable component types
# what about None???? - nan is float?
allowed_types = (float, int, bool, str) + tuple(np.sctypeDict.values())
# first export network properties
attrs = dict(
(attr, getattr(network, attr))
for attr in dir(network)
if (
not attr.startswith("__")
and isinstance(getattr(network, attr), allowed_types)
)
)
exporter.save_attributes(attrs)
exporter.save_meta(network.meta)
# now export snapshots
if isinstance(network.snapshot_weightings.index, pd.MultiIndex):
network.snapshot_weightings.index.rename(["period", "timestep"], inplace=True)
else:
network.snapshot_weightings.index.rename("snapshot", inplace=True)
snapshots = network.snapshot_weightings.reset_index()
exporter.save_snapshots(snapshots)
# export investment period weightings
investment_periods = network.investment_period_weightings
exporter.save_investment_periods(investment_periods)
exported_components = []
for component in network.all_components - {"SubNetwork"}:
list_name = network.components[component]["list_name"]
attrs = network.components[component]["attrs"]
df = network.df(component)
pnl = network.pnl(component)
if not export_standard_types and component in network.standard_type_components:
df = df.drop(network.components[component]["standard_types"].index)
# first do static attributes
df = df.rename_axis(index="name")
if df.empty:
exporter.remove_static(list_name)
continue
col_export = []
for col in df.columns:
# do not export derived attributes
if col in ["sub_network", "r_pu", "x_pu", "g_pu", "b_pu"]:
continue
if (
col in attrs.index
and pd.isnull(attrs.at[col, "default"])
and pd.isnull(df[col]).all()
):
continue
if (
col in attrs.index
and df[col].dtype == attrs.at[col, "dtype"]
and (df[col] == attrs.at[col, "default"]).all()
):
continue
col_export.append(col)
exporter.save_static(list_name, df[col_export])
# now do varying attributes
for attr in pnl:
if attr not in attrs.index:
col_export = pnl[attr].columns
else:
default = attrs.at[attr, "default"]
if pd.isnull(default):
col_export = pnl[attr].columns[(~pd.isnull(pnl[attr])).any()]
else:
col_export = pnl[attr].columns[(pnl[attr] != default).any()]
if len(col_export) > 0:
df = pnl[attr].reset_index()[col_export]
exporter.save_series(list_name, attr, df)
else:
exporter.remove_series(list_name, attr)
exported_components.append(list_name)
logger.info(
f"Exported network {str(basename or '<unnamed>')} "
f"has {', '.join(exported_components)}"
)
def import_from_csv_folder(network, csv_folder_name, encoding=None, skip_time=False):
"""
Import network data from CSVs in a folder.
The CSVs must follow the standard form, see ``pypsa/examples``.
Parameters
----------
csv_folder_name : string
Name of folder
encoding : str, default None
Encoding to use for UTF when reading (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_
skip_time : bool, default False
Skip reading in time dependent attributes
Examples
----------
>>> network.import_from_csv_folder(csv_folder_name)
"""
basename = Path(csv_folder_name).name
with ImporterCSV(csv_folder_name, encoding=encoding) as importer:
_import_from_importer(network, importer, basename=basename, skip_time=skip_time)
def export_to_csv_folder(
network, csv_folder_name, encoding=None, export_standard_types=False
):
"""
Export network and components to a folder of CSVs.
Both static and series attributes of all components are exported, but only
if they have non-default values.
If ``csv_folder_name`` does not already exist, it is created.
Static attributes are exported in one CSV file per component,
e.g. ``generators.csv``.
Series attributes are exported in one CSV file per component per
attribute, e.g. ``generators-p_set.csv``.
Parameters
----------
csv_folder_name : string
Name of folder to which to export.
encoding : str, default None
Encoding to use for UTF when reading (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_
export_standard_types : boolean, default False
If True, then standard types are exported too (upon reimporting you
should then set "ignore_standard_types" when initialising the network).
Examples
--------
>>> network.export_to_csv_folder(csv_folder_name)
"""
basename = os.path.basename(csv_folder_name)
with ExporterCSV(csv_folder_name=csv_folder_name, encoding=encoding) as exporter:
_export_to_exporter(
network,
exporter,
basename=basename,
export_standard_types=export_standard_types,
)
def import_from_hdf5(network, path, skip_time=False):
"""
Import network data from HDF5 store at `path`.
Parameters
----------
path : string, Path
Name of HDF5 store. The string could be a URL.
skip_time : bool, default False
Skip reading in time dependent attributes
"""
basename = Path(path).name
with ImporterHDF5(path) as importer:
_import_from_importer(network, importer, basename=basename, skip_time=skip_time)
def export_to_hdf5(network, path, export_standard_types=False, **kwargs):
"""
Export network and components to an HDF store.
Both static and series attributes of components are exported, but only
if they have non-default values.
If path does not already exist, it is created.
Parameters
----------
path : string
Name of hdf5 file to which to export (if it exists, it is overwritten)
export_standard_types : boolean, default False
If True, then standard types are exported too (upon reimporting you
should then set "ignore_standard_types" when initialising the network).
**kwargs
Extra arguments for pd.HDFStore to specify f.i. compression
(default: complevel=4)
Examples
--------
>>> network.export_to_hdf5(filename)
"""
kwargs.setdefault("complevel", 4)
basename = os.path.basename(path)
with ExporterHDF5(path, **kwargs) as exporter:
_export_to_exporter(
network,
exporter,
basename=basename,
export_standard_types=export_standard_types,
)
def import_from_netcdf(network, path, skip_time=False):
"""
Import network data from netCDF file or xarray Dataset at `path`.
Parameters
----------
path : string|xr.Dataset
Path to netCDF dataset or instance of xarray Dataset.
The string could be a URL.
skip_time : bool, default False
Skip reading in time dependent attributes
"""
assert has_xarray, "xarray must be installed for netCDF support."
basename = Path(path).name
with ImporterNetCDF(path=path) as importer:
_import_from_importer(network, importer, basename=basename, skip_time=skip_time)
def export_to_netcdf(
network,
path=None,
export_standard_types=False,
compression=None,
float32=False,
):
"""
Export network and components to a netCDF file.
Both static and series attributes of components are exported, but only
if they have non-default values.
If path does not already exist, it is created.
If no path is passed, no file is exported, but the xarray.Dataset
is still returned.
Be aware that this cannot export boolean attributes on the Network
class, e.g. network.my_bool = False is not supported by netCDF.
Parameters
----------
path : string|None
Name of netCDF file to which to export (if it exists, it is overwritten);
if None is passed, no file is exported.
export_standard_types : boolean, default False
If True, then standard types are exported too (upon reimporting you
should then set "ignore_standard_types" when initialising the network).
compression : dict|None
Compression level to use for all features which are being prepared.
The compression is handled via xarray.Dataset.to_netcdf(...). For details see:
https://docs.xarray.dev/en/stable/generated/xarray.Dataset.to_netcdf.html
An example compression directive is ``{'zlib': True, 'complevel': 4}``.
The default is None which disables compression.
float32 : boolean, default False
If True, typecasts values to float32.
Returns
-------
ds : xarray.Dataset
Examples
--------
>>> network.export_to_netcdf("my_file.nc")
"""
assert has_xarray, "xarray must be installed for netCDF support."
basename = os.path.basename(path) if path is not None else None
with ExporterNetCDF(path, compression, float32) as exporter:
_export_to_exporter(
network,
exporter,
basename=basename,
export_standard_types=export_standard_types,
)
return exporter.ds
def _import_from_importer(network, importer, basename, skip_time=False):
"""
Import network data from importer.
Parameters
----------
skip_time : bool
Skip importing time
"""
attrs = importer.get_attributes()
network.meta = importer.get_meta()
current_pypsa_version = [int(s) for s in network.pypsa_version.split(".")]
pypsa_version = None
if attrs is not None:
network.name = attrs.pop("name")
try:
pypsa_version = [int(s) for s in attrs.pop("pypsa_version").split(".")]
except KeyError:
pypsa_version = None
for attr, val in attrs.items():
setattr(network, attr, val)
##https://docs.python.org/3/tutorial/datastructures.html#comparing-sequences-and-other-types
if pypsa_version is None or pypsa_version < current_pypsa_version:
pypsa_version_str = (
".".join(map(str, pypsa_version)) if pypsa_version is not None else "?"
)
current_pypsa_version_str = ".".join(map(str, current_pypsa_version))
msg = (
f"Importing network from PyPSA version v{pypsa_version_str} while "
f"current version is v{current_pypsa_version_str}. Read the "
"release notes at https://pypsa.readthedocs.io/en/latest/release_notes.html "
"to prepare your network for import."
)
logger.warning(msg)
if pypsa_version is None or pypsa_version < [0, 18, 0]:
network._multi_invest = 0
importer.pypsa_version = pypsa_version
importer.current_pypsa_version = current_pypsa_version
# if there is snapshots.csv, read in snapshot data
df = importer.get_snapshots()
if df is not None:
# check if imported snapshots have MultiIndex
snapshot_levels = set(["period", "timestep", "snapshot"]).intersection(
df.columns
)
if snapshot_levels:
df.set_index(sorted(snapshot_levels), inplace=True)
network.set_snapshots(df.index)
cols = ["objective", "generators", "stores"]
if not df.columns.intersection(cols).empty:
network.snapshot_weightings = df.reindex(
index=network.snapshots, columns=cols
)
elif "weightings" in df.columns:
network.snapshot_weightings = df["weightings"].reindex(network.snapshots)
network.set_snapshots(df.index)
# read in investment period weightings
periods = importer.get_investment_periods()
if periods is not None:
network._investment_periods = periods.index
network._investment_period_weightings = periods.reindex(
network.investment_periods
)
imported_components = []
# now read in other components; make sure buses and carriers come first
for component in ["Bus", "Carrier"] + sorted(
network.all_components - {"Bus", "Carrier", "SubNetwork"}
):
list_name = network.components[component]["list_name"]
df = importer.get_static(list_name)
if df is None:
if component == "Bus":
logger.error("Error, no buses found")
return
continue
import_components_from_dataframe(network, df, component)
if component == "Link":
update_linkports_component_attrs(network)
if not skip_time:
for attr, df in importer.get_series(list_name):
df.set_index(network.snapshots, inplace=True)
import_series_from_dataframe(network, df, component, attr)
logger.debug(getattr(network, list_name))
imported_components.append(list_name)
logger.info(
f"Imported network {str(basename or network.name or '<unnamed>')} "
f"has {', '.join(imported_components)}"
)
def import_components_from_dataframe(network, dataframe, cls_name):
"""
Import components from a pandas DataFrame.
If columns are missing then defaults are used.
If extra columns are added, these are left in the resulting component dataframe.
Parameters
----------
dataframe : pandas.DataFrame
A DataFrame whose index is the names of the components and
whose columns are the non-default attributes.
cls_name : string
Name of class of component, e.g. ``"Line", "Bus", "Generator", "StorageUnit"``
Examples
--------
>>> import pandas as pd
>>> buses = ['Berlin', 'Frankfurt', 'Munich', 'Hamburg']
>>> network.import_components_from_dataframe(
pd.DataFrame({"v_nom" : 380, "control" : 'PV'},
index=buses),
"Bus")
>>> network.import_components_from_dataframe(
pd.DataFrame({"carrier" : "solar", "bus" : buses, "p_nom_extendable" : True},
index=[b+" PV" for b in buses]),
"Generator")
See Also
--------
pypsa.Network.madd
"""
attrs = network.components[cls_name]["attrs"]
static_attrs = attrs[attrs.static].drop("name")
non_static_attrs = attrs[~attrs.static]
# Clean dataframe and ensure correct types
dataframe = pd.DataFrame(dataframe)
dataframe.index = dataframe.index.astype(str)
for k in static_attrs.index:
if k not in dataframe.columns:
dataframe[k] = static_attrs.at[k, "default"]
else:
if static_attrs.at[k, "type"] == "string":
dataframe[k] = dataframe[k].replace({np.nan: ""})
dataframe[k] = dataframe[k].astype(static_attrs.at[k, "typ"])
# check all the buses are well-defined
for attr in ["bus", "bus0", "bus1"]:
if attr in dataframe.columns:
missing = dataframe.index[~dataframe[attr].isin(network.buses.index)]
if len(missing) > 0:
logger.warning(
"The following %s have buses which are not defined:\n%s",
cls_name,
missing,
)
non_static_attrs_in_df = non_static_attrs.index.intersection(dataframe.columns)
old_df = network.df(cls_name)
new_df = dataframe.drop(non_static_attrs_in_df, axis=1)
if not old_df.empty:
new_df = pd.concat((old_df, new_df), sort=False)
if not new_df.index.is_unique:
logger.error("Error, new components for {} are not unique".format(cls_name))
return
new_df.index.name = cls_name
setattr(network, network.components[cls_name]["list_name"], new_df)
# now deal with time-dependent properties
pnl = network.pnl(cls_name)
for k in non_static_attrs_in_df:
# If reading in outputs, fill the outputs
pnl[k] = pnl[k].reindex(
columns=new_df.index, fill_value=non_static_attrs.at[k, "default"]
)
pnl[k].loc[:, dataframe.index] = dataframe.loc[:, k].values
setattr(network, network.components[cls_name]["list_name"] + "_t", pnl)
def import_series_from_dataframe(network, dataframe, cls_name, attr):
"""
Import time series from a pandas DataFrame.
Parameters
----------
dataframe : pandas.DataFrame
A DataFrame whose index is ``network.snapshots`` and
whose columns are a subset of the relevant components.
cls_name : string
Name of class of component
attr : string
Name of time-varying series attribute
Examples
--------
>>> import numpy as np
>>> network.set_snapshots(range(10))
>>> network.import_series_from_dataframe(
pd.DataFrame(np.random.rand(10, 4),
columns=network.generators.index,
index=range(10)),
"Generator",
"p_max_pu")
See Also
--------
pypsa.Network.madd()
"""
df = network.df(cls_name)
pnl = network.pnl(cls_name)
list_name = network.components[cls_name]["list_name"]
dataframe.columns.name = cls_name
dataframe.index.name = "snapshot"
diff = dataframe.columns.difference(df.index)
if len(diff) > 0:
logger.warning(
f"Components {diff} for attribute {attr} of {cls_name} "
f"are not in main components dataframe {list_name}"
)
attrs = network.components[cls_name]["attrs"]
expected_attrs = attrs[lambda ds: ds.type.str.contains("series")].index
if attr not in expected_attrs:
pnl[attr] = dataframe
return
attr_series = attrs.loc[attr]
default = attr_series.default
columns = dataframe.columns
diff = network.snapshots.difference(dataframe.index)
if len(diff):
logger.warning(
f"Snapshots {diff} are missing from {attr} of {cls_name}."
f" Filling with default value '{default}'"
)
dataframe = dataframe.reindex(network.snapshots, fill_value=default)
if not attr_series.static:
pnl[attr] = pnl[attr].reindex(
columns=df.index.union(columns), fill_value=default
)
else:
pnl[attr] = pnl[attr].reindex(columns=(pnl[attr].columns.union(columns)))
pnl[attr].loc[network.snapshots, columns] = dataframe.loc[
network.snapshots, columns
]
def import_from_pypower_ppc(network, ppc, overwrite_zero_s_nom=None):
"""
Import network from PYPOWER PPC dictionary format version 2.
Converts all baseMVA to base power of 1 MVA.
For the meaning of the pypower indices, see also pypower/idx_*.
Parameters
----------
ppc : PYPOWER PPC dict
overwrite_zero_s_nom : Float or None, default None
Examples
--------
>>> from pypower.api import case30
>>> ppc = case30()
>>> network.import_from_pypower_ppc(ppc)
"""
version = ppc["version"]
if int(version) != 2:
logger.warning(
"Warning, importing from PYPOWER may not work if PPC version is not 2!"
)
logger.warning(
"Warning: Note that when importing from PYPOWER, some PYPOWER features not supported: areas, gencosts, component status"
)
baseMVA = ppc["baseMVA"]
# dictionary to store pandas DataFrames of PyPower data
pdf = {}
# add buses
# integer numbering will be bus names
index = np.array(ppc["bus"][:, 0], dtype=int)
columns = [
"type",
"Pd",
"Qd",
"Gs",
"Bs",
"area",
"v_mag_pu_set",
"v_ang_set",
"v_nom",
"zone",
"v_mag_pu_max",
"v_mag_pu_min",
]
pdf["buses"] = pd.DataFrame(
index=index, columns=columns, data=ppc["bus"][:, 1 : len(columns) + 1]
)
if (pdf["buses"]["v_nom"] == 0.0).any():
logger.warning(
"Warning, some buses have nominal voltage of 0., setting the nominal voltage of these to 1."
)
pdf["buses"].loc[pdf["buses"]["v_nom"] == 0.0, "v_nom"] = 1.0
# rename controls
controls = ["", "PQ", "PV", "Slack"]
pdf["buses"]["control"] = pdf["buses"].pop("type").map(lambda i: controls[int(i)])
# add loads for any buses with Pd or Qd
pdf["loads"] = pdf["buses"].loc[
pdf["buses"][["Pd", "Qd"]].any(axis=1), ["Pd", "Qd"]
]
pdf["loads"]["bus"] = pdf["loads"].index
pdf["loads"].rename(columns={"Qd": "q_set", "Pd": "p_set"}, inplace=True)
pdf["loads"].index = ["L" + str(i) for i in range(len(pdf["loads"]))]
# add shunt impedances for any buses with Gs or Bs
shunt = pdf["buses"].loc[
pdf["buses"][["Gs", "Bs"]].any(axis=1), ["v_nom", "Gs", "Bs"]
]
# base power for shunt is 1 MVA, so no need to rebase here
shunt["g"] = shunt["Gs"] / shunt["v_nom"] ** 2
shunt["b"] = shunt["Bs"] / shunt["v_nom"] ** 2
pdf["shunt_impedances"] = shunt.reindex(columns=["g", "b"])
pdf["shunt_impedances"]["bus"] = pdf["shunt_impedances"].index
pdf["shunt_impedances"].index = [
"S" + str(i) for i in range(len(pdf["shunt_impedances"]))
]
# add gens
# it is assumed that the pypower p_max is the p_nom
# could also do gen.p_min_pu = p_min/p_nom
columns = "bus, p_set, q_set, q_max, q_min, v_set_pu, mva_base, status, p_nom, p_min, Pc1, Pc2, Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf".split(
", "
)
index = ["G" + str(i) for i in range(len(ppc["gen"]))]
pdf["generators"] = pd.DataFrame(
index=index, columns=columns, data=ppc["gen"][:, : len(columns)]
)
# make sure bus name is an integer
pdf["generators"]["bus"] = np.array(ppc["gen"][:, 0], dtype=int)
# add branchs
## branch data
# fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax
columns = "bus0, bus1, r, x, b, s_nom, rateB, rateC, tap_ratio, phase_shift, status, v_ang_min, v_ang_max".split(
", "
)
pdf["branches"] = pd.DataFrame(
columns=columns, data=ppc["branch"][:, : len(columns)]
)
pdf["branches"]["original_index"] = pdf["branches"].index
pdf["branches"]["bus0"] = pdf["branches"]["bus0"].astype(int)
pdf["branches"]["bus1"] = pdf["branches"]["bus1"].astype(int)
# s_nom = 0 indicates an unconstrained line
zero_s_nom = pdf["branches"]["s_nom"] == 0.0
if zero_s_nom.any():
if overwrite_zero_s_nom is not None:
pdf["branches"].loc[zero_s_nom, "s_nom"] = overwrite_zero_s_nom
else:
logger.warning(
"Warning: there are {} branches with s_nom equal to zero, "
"they will probably lead to infeasibilities and should be "
"replaced with a high value using the `overwrite_zero_s_nom` "
"argument.".format(zero_s_nom.sum())
)
# determine bus voltages of branches to detect transformers
v_nom = pdf["branches"].bus0.map(pdf["buses"].v_nom)
v_nom_1 = pdf["branches"].bus1.map(pdf["buses"].v_nom)
# split branches into transformers and lines
transformers = (
(v_nom != v_nom_1)
| (
(pdf["branches"].tap_ratio != 0.0) & (pdf["branches"].tap_ratio != 1.0)
) # NB: PYPOWER has strange default of 0. for tap ratio
| (pdf["branches"].phase_shift != 0)
)
pdf["transformers"] = pd.DataFrame(pdf["branches"][transformers])
pdf["lines"] = pdf["branches"][~transformers].drop(
["tap_ratio", "phase_shift"], axis=1
)
# convert transformers from base baseMVA to base s_nom
pdf["transformers"]["r"] = (
pdf["transformers"]["r"] * pdf["transformers"]["s_nom"] / baseMVA
)
pdf["transformers"]["x"] = (
pdf["transformers"]["x"] * pdf["transformers"]["s_nom"] / baseMVA
)
pdf["transformers"]["b"] = (
pdf["transformers"]["b"] * baseMVA / pdf["transformers"]["s_nom"]
)
# correct per unit impedances
pdf["lines"]["r"] = v_nom**2 * pdf["lines"]["r"] / baseMVA
pdf["lines"]["x"] = v_nom**2 * pdf["lines"]["x"] / baseMVA
pdf["lines"]["b"] = pdf["lines"]["b"] * baseMVA / v_nom**2
if (pdf["transformers"]["tap_ratio"] == 0.0).any():
logger.warning(
"Warning, some transformers have a tap ratio of 0., setting the tap ratio of these to 1."
)
pdf["transformers"].loc[
pdf["transformers"]["tap_ratio"] == 0.0, "tap_ratio"
] = 1.0
# name them nicely
pdf["transformers"].index = ["T" + str(i) for i in range(len(pdf["transformers"]))]
pdf["lines"].index = ["L" + str(i) for i in range(len(pdf["lines"]))]
# TODO
##----- OPF Data -----##
## generator cost data
# 1 startup shutdown n x1 y1 ... xn yn
# 2 startup shutdown n c(n-1) ... c0
for component in [
"Bus",
"Load",
"Generator",
"Line",
"Transformer",
"ShuntImpedance",
]:
import_components_from_dataframe(
network, pdf[network.components[component]["list_name"]], component
)
network.generators["control"] = network.generators.bus.map(network.buses["control"])
# for consistency with pypower, take the v_mag set point from the generators
network.buses.loc[network.generators.bus, "v_mag_pu_set"] = np.asarray(
network.generators["v_set_pu"]
)
def import_from_pandapower_net(
network, net, extra_line_data=False, use_pandapower_index=False
):
"""
Import PyPSA network from pandapower net.
Importing from pandapower is still in beta;
not all pandapower components are supported.
Unsupported features include:
- three-winding transformers
- switches
- in_service status and
- tap positions of transformers
Parameters
----------
net : pandapower network
extra_line_data : boolean, default: False
if True, the line data for all parameters is imported instead of only the type
use_pandapower_index : boolean, default: False
if True, use integer numbers which is the pandapower index standard
if False, use any net.name as index (e.g. 'Bus 1' (str) or 1 (int))
Examples
--------
>>> network.import_from_pandapower_net(net)
OR
>>> import pypsa
>>> import pandapower as pp
>>> import pandapower.networks as pn
>>> net = pn.create_cigre_network_mv(with_der='all')
>>> network = pypsa.Network()
>>> network.import_from_pandapower_net(net, extra_line_data=True)
"""
logger.warning(
"Warning: Importing from pandapower is still in beta; not all pandapower data is supported.\nUnsupported features include: three-winding transformers, switches, in_service status, shunt impedances and tap positions of transformers."
)
d = {}
d["Bus"] = pd.DataFrame(
{"v_nom": net.bus.vn_kv.values, "v_mag_pu_set": 1.0},
index=net.bus.name,
)
d["Bus"].loc[
net.bus.name.loc[net.gen.bus].values, "v_mag_pu_set"
] = net.gen.vm_pu.values
d["Bus"].loc[
net.bus.name.loc[net.ext_grid.bus].values, "v_mag_pu_set"
] = net.ext_grid.vm_pu.values
d["Load"] = pd.DataFrame(
{
"p_set": (net.load.scaling * net.load.p_mw).values,
"q_set": (net.load.scaling * net.load.q_mvar).values,
"bus": net.bus.name.loc[net.load.bus].values,
},
index=net.load.name,
)
# deal with PV generators
_tmp_gen = pd.DataFrame(
{
"p_set": (net.gen.scaling * net.gen.p_mw).values,
"q_set": 0.0,
"bus": net.bus.name.loc[net.gen.bus].values,
"control": "PV",
},
index=net.gen.name,
)
# deal with PQ "static" generators
_tmp_sgen = pd.DataFrame(
{
"p_set": (net.sgen.scaling * net.sgen.p_mw).values,
"q_set": (net.sgen.scaling * net.sgen.q_mvar).values,
"bus": net.bus.name.loc[net.sgen.bus].values,
"control": "PQ",
},
index=net.sgen.name,
)
_tmp_ext_grid = pd.DataFrame(
{
"control": "Slack",
"p_set": 0.0,
"q_set": 0.0,
"bus": net.bus.name.loc[net.ext_grid.bus].values,
},
index=net.ext_grid.name.fillna("External Grid"),
)
# concat all generators and index according to option
d["Generator"] = pd.concat(
[_tmp_gen, _tmp_sgen, _tmp_ext_grid], ignore_index=use_pandapower_index
)
if extra_line_data is False:
d["Line"] = pd.DataFrame(
{
"type": net.line.std_type.values,
"bus0": net.bus.name.loc[net.line.from_bus].values,
"bus1": net.bus.name.loc[net.line.to_bus].values,
"length": net.line.length_km.values,
"num_parallel": net.line.parallel.values,
},
index=net.line.name,
)
else:
r = net.line.r_ohm_per_km.values * net.line.length_km.values
x = net.line.x_ohm_per_km.values * net.line.length_km.values
# capacitance values from pandapower in nF; transformed here:
f = net.f_hz
b = net.line.c_nf_per_km.values * net.line.length_km.values * 1e-9
b = b * 2 * math.pi * f
u = net.bus.vn_kv.loc[net.line.from_bus].values
s_nom = u * net.line.max_i_ka.values
d["Line"] = pd.DataFrame(
{
"r": r,
"x": x,
"b": b,
"s_nom": s_nom,
"bus0": net.bus.name.loc[net.line.from_bus].values,
"bus1": net.bus.name.loc[net.line.to_bus].values,
"length": net.line.length_km.values,
"num_parallel": net.line.parallel.values,
},
index=net.line.name,
)
# check, if the trafo is based on a standard-type:
if net.trafo.std_type.any():
d["Transformer"] = pd.DataFrame(
{
"type": net.trafo.std_type.values,
"bus0": net.bus.name.loc[net.trafo.hv_bus].values,
"bus1": net.bus.name.loc[net.trafo.lv_bus].values,
"tap_position": net.trafo.tap_pos.values,
},
index=net.trafo.name,
)
d["Transformer"] = d["Transformer"].fillna(0)
# if it's not based on a standard-type - get the included values:
else:
s_nom = net.trafo.sn_mva.values
# documented at https://pandapower.readthedocs.io/en/develop/elements/trafo.html?highlight=transformer#impedance-values
z = net.trafo.vk_percent.values / 100.0 / net.trafo.sn_mva.values
r = net.trafo.vkr_percent.values / 100.0 / net.trafo.sn_mva.values
x = np.sqrt(z**2 - r**2)
y = net.trafo.i0_percent.values / 100.0
g = (
net.trafo.pfe_kw.values
/ net.trafo.sn_mva.values
/ 1000
/ net.trafo.sn_mva.values
)
b = np.sqrt(y**2 - g**2)
d["Transformer"] = pd.DataFrame(
{
"phase_shift": net.trafo.shift_degree.values,
"s_nom": s_nom,
"bus0": net.bus.name.loc[net.trafo.hv_bus].values,
"bus1": net.bus.name.loc[net.trafo.lv_bus].values,
"r": r,
"x": x,
"g": g,
"b": b,
"tap_position": net.trafo.tap_pos.values,
},
index=net.trafo.name,
)
d["Transformer"] = d["Transformer"].fillna(0)
# documented at https://pypsa.readthedocs.io/en/latest/components.html#shunt-impedance
g_shunt = net.shunt.p_mw.values / net.shunt.vn_kv.values**2
b_shunt = net.shunt.q_mvar.values / net.shunt.vn_kv.values**2
d["ShuntImpedance"] = pd.DataFrame(
{
"bus": net.bus.name.loc[net.shunt.bus].values,
"g": g_shunt,
"b": b_shunt,
},
index=net.shunt.name,
)
d["ShuntImpedance"] = d["ShuntImpedance"].fillna(0)
for c in [
"Bus",
"Load",
"Generator",
"Line",
"Transformer",
"ShuntImpedance",
]:
network.import_components_from_dataframe(d[c], c)
# amalgamate buses connected by closed switches
bus_switches = net.switch[(net.switch.et == "b") & net.switch.closed]
bus_switches["stays"] = bus_switches.bus.map(net.bus.name)
bus_switches["goes"] = bus_switches.element.map(net.bus.name)
to_replace = pd.Series(bus_switches.stays.values, bus_switches.goes.values)
for i in to_replace.index:
network.remove("Bus", i)
for c in network.iterate_components({"Load", "Generator", "ShuntImpedance"}):
c.df.bus.replace(to_replace, inplace=True)
for c in network.iterate_components({"Line", "Transformer"}):
c.df.bus0.replace(to_replace, inplace=True)
c.df.bus1.replace(to_replace, inplace=True)
|
b4f95915e9ea389a5b6e09b19da73e0d28fa8460
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/KoubeiRetailWmsSupplierreportQueryResponse.py
|
a7227e7860ec96c571aeca4e27599eab41b18797
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,018
|
py
|
KoubeiRetailWmsSupplierreportQueryResponse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.SupplierReport import SupplierReport
class KoubeiRetailWmsSupplierreportQueryResponse(AlipayResponse):
def __init__(self):
super(KoubeiRetailWmsSupplierreportQueryResponse, self).__init__()
self._page_no = None
self._page_size = None
self._supplier_report_list = None
self._total_count = None
@property
def page_no(self):
return self._page_no
@page_no.setter
def page_no(self, value):
self._page_no = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def supplier_report_list(self):
return self._supplier_report_list
@supplier_report_list.setter
def supplier_report_list(self, value):
if isinstance(value, list):
self._supplier_report_list = list()
for i in value:
if isinstance(i, SupplierReport):
self._supplier_report_list.append(i)
else:
self._supplier_report_list.append(SupplierReport.from_alipay_dict(i))
@property
def total_count(self):
return self._total_count
@total_count.setter
def total_count(self, value):
self._total_count = value
def parse_response_content(self, response_content):
response = super(KoubeiRetailWmsSupplierreportQueryResponse, self).parse_response_content(response_content)
if 'page_no' in response:
self.page_no = response['page_no']
if 'page_size' in response:
self.page_size = response['page_size']
if 'supplier_report_list' in response:
self.supplier_report_list = response['supplier_report_list']
if 'total_count' in response:
self.total_count = response['total_count']
|
a6ab7b925321d27206c771adf1ecedd0df21a3d8
|
8ca19f1a31070738b376c0370c4bebf6b7efcb43
|
/office365/planner/tasks/task_details.py
|
303a6cd1603cdc0d61a6d5b2953b580075b0bff8
|
[
"MIT"
] |
permissive
|
vgrem/Office365-REST-Python-Client
|
2ef153d737c6ed5445ba1e446aeaec39c4ef4ed3
|
cbd245d1af8d69e013c469cfc2a9851f51c91417
|
refs/heads/master
| 2023-09-02T14:20:40.109462
| 2023-08-31T19:14:05
| 2023-08-31T19:14:05
| 51,305,798
| 1,006
| 326
|
MIT
| 2023-08-28T05:38:02
| 2016-02-08T15:24:51
|
Python
|
UTF-8
|
Python
| false
| false
| 228
|
py
|
task_details.py
|
from office365.entity import Entity
class PlannerTaskDetails(Entity):
"""
The plannerTaskDetails resource represents the additional information about a task.
Each task object has a details object.
"""
pass
|
d4a7edd531f5efbdedc5026b60c635b1582b2481
|
11cd362cdd78c2fc48042ed203614b201ac94aa6
|
/desktop/core/ext-py3/django-axes-5.13.0/tests/test_login.py
|
39035210f08de0faf73a685fde7ace6c6b8feaa0
|
[
"MIT",
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] |
permissive
|
cloudera/hue
|
b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908
|
dccb9467675c67b9c3399fc76c5de6d31bfb8255
|
refs/heads/master
| 2023-08-31T06:49:25.724501
| 2023-08-28T20:45:00
| 2023-08-28T20:45:00
| 732,593
| 5,655
| 2,244
|
Apache-2.0
| 2023-09-14T03:05:41
| 2010-06-21T19:46:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 27,438
|
py
|
test_login.py
|
"""
Integration tests for the login handling.
TODO: Clean up the tests in this module.
"""
from importlib import import_module
from django.contrib.auth import get_user_model, login, logout
from django.http import HttpRequest
from django.test import override_settings, TestCase
from django.urls import reverse
from axes.conf import settings
from axes.helpers import get_cache, make_cache_key_list
from axes.models import AccessAttempt
from tests.base import AxesTestCase
class DjangoLoginTestCase(TestCase):
def setUp(self):
engine = import_module(settings.SESSION_ENGINE)
self.request = HttpRequest()
self.request.session = engine.SessionStore()
self.username = "john.doe"
self.password = "hunter2"
self.user = get_user_model().objects.create(username=self.username)
self.user.set_password(self.password)
self.user.save()
self.user.backend = "django.contrib.auth.backends.ModelBackend"
class DjangoContribAuthLoginTestCase(DjangoLoginTestCase):
def test_login(self):
login(self.request, self.user)
def test_logout(self):
login(self.request, self.user)
logout(self.request)
@override_settings(AXES_ENABLED=False)
class DjangoTestClientLoginTestCase(DjangoLoginTestCase):
def test_client_login(self):
self.client.login(username=self.username, password=self.password)
def test_client_logout(self):
self.client.login(username=self.username, password=self.password)
self.client.logout()
def test_client_force_login(self):
self.client.force_login(self.user)
class DatabaseLoginTestCase(AxesTestCase):
"""
Test for lockouts under different configurations and circumstances to prevent false positives and false negatives.
Always block attempted logins for the same user from the same IP.
Always allow attempted logins for a different user from a different IP.
"""
IP_1 = "10.1.1.1"
IP_2 = "10.2.2.2"
IP_3 = "10.2.2.3"
USER_1 = "valid-user-1"
USER_2 = "valid-user-2"
USER_3 = "valid-user-3"
EMAIL_1 = "valid-email-1@example.com"
EMAIL_2 = "valid-email-2@example.com"
VALID_USERNAME = USER_1
VALID_EMAIL = EMAIL_1
VALID_PASSWORD = "valid-password"
VALID_IP_ADDRESS = IP_1
WRONG_PASSWORD = "wrong-password"
LOCKED_MESSAGE = "Account locked: too many login attempts."
LOGIN_FORM_KEY = '<input type="submit" value="Log in" />'
ATTEMPT_NOT_BLOCKED = 200
ALLOWED = 302
BLOCKED = 403
def _login(self, username, password, ip_addr="127.0.0.1", **kwargs):
"""
Login a user and get the response.
IP address can be configured to test IP blocking functionality.
"""
post_data = {"username": username, "password": password}
post_data.update(kwargs)
return self.client.post(
reverse("admin:login"),
post_data,
REMOTE_ADDR=ip_addr,
HTTP_USER_AGENT="test-browser",
)
def _lockout_user_from_ip(self, username, ip_addr):
for _ in range(settings.AXES_FAILURE_LIMIT):
response = self._login(
username=username, password=self.WRONG_PASSWORD, ip_addr=ip_addr
)
return response
def _lockout_user1_from_ip1(self):
return self._lockout_user_from_ip(username=self.USER_1, ip_addr=self.IP_1)
def setUp(self):
"""
Create two valid users for authentication.
"""
super().setUp()
self.user2 = get_user_model().objects.create_superuser(
username=self.USER_2,
email=self.EMAIL_2,
password=self.VALID_PASSWORD,
is_staff=True,
is_superuser=True,
)
def test_login(self):
"""
Test a valid login for a real username.
"""
response = self._login(self.username, self.password)
self.assertNotContains(
response, self.LOGIN_FORM_KEY, status_code=self.ALLOWED, html=True
)
def test_lockout_limit_once(self):
"""
Test the login lock trying to login one more time than failure limit.
"""
response = self.lockout()
self.assertContains(response, self.LOCKED_MESSAGE, status_code=self.BLOCKED)
def test_lockout_limit_many(self):
"""
Test the login lock trying to login a lot of times more than failure limit.
"""
self.lockout()
for _ in range(settings.AXES_FAILURE_LIMIT):
response = self.login()
self.assertContains(response, self.LOCKED_MESSAGE, status_code=self.BLOCKED)
def attempt_count(self):
return AccessAttempt.objects.count()
@override_settings(AXES_RESET_ON_SUCCESS=False)
def test_reset_on_success_false(self):
self.almost_lockout()
self.login(is_valid_username=True, is_valid_password=True)
response = self.login()
self.assertContains(response, self.LOCKED_MESSAGE, status_code=self.BLOCKED)
self.assertTrue(self.attempt_count())
@override_settings(AXES_RESET_ON_SUCCESS=True)
def test_reset_on_success_true(self):
self.almost_lockout()
self.assertTrue(self.attempt_count())
self.login(is_valid_username=True, is_valid_password=True)
self.assertFalse(self.attempt_count())
response = self.lockout()
self.assertContains(response, self.LOCKED_MESSAGE, status_code=self.BLOCKED)
self.assertTrue(self.attempt_count())
@override_settings(AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP=True)
def test_lockout_by_combination_user_and_ip(self):
"""
Test login failure when AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP is True.
"""
# test until one try before the limit
for _ in range(1, settings.AXES_FAILURE_LIMIT):
response = self.login(is_valid_username=True, is_valid_password=False)
# Check if we are in the same login page
self.assertContains(response, self.LOGIN_FORM_KEY, html=True)
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now
response = self.login(is_valid_username=True, is_valid_password=False)
self.assertContains(response, self.LOCKED_MESSAGE, status_code=403)
@override_settings(AXES_ONLY_USER_FAILURES=True)
def test_lockout_by_only_user_failures(self):
"""
Test login failure when AXES_ONLY_USER_FAILURES is True.
"""
# test until one try before the limit
for _ in range(1, settings.AXES_FAILURE_LIMIT):
response = self._login(self.username, self.WRONG_PASSWORD)
# Check if we are in the same login page
self.assertContains(response, self.LOGIN_FORM_KEY, html=True)
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now
response = self._login(self.username, self.WRONG_PASSWORD)
self.assertContains(response, self.LOCKED_MESSAGE, status_code=self.BLOCKED)
# reset the username only and make sure we can log in now even though our IP has failed each time
self.reset(username=self.username)
response = self._login(self.username, self.password)
# Check if we are still in the login page
self.assertNotContains(
response, self.LOGIN_FORM_KEY, status_code=self.ALLOWED, html=True
)
# now create failure_limit + 1 failed logins and then we should still
# be able to login with valid_username
for _ in range(settings.AXES_FAILURE_LIMIT):
response = self._login(self.username, self.password)
# Check if we can still log in with valid user
response = self._login(self.username, self.password)
self.assertNotContains(
response, self.LOGIN_FORM_KEY, status_code=self.ALLOWED, html=True
)
# Test for true and false positives when blocking by IP *OR* user (default)
# Cache disabled. Default settings.
def test_lockout_by_ip_blocks_when_same_user_same_ip_without_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 1 is still blocked from IP 1.
response = self._login(self.USER_1, self.VALID_PASSWORD, ip_addr=self.IP_1)
self.assertEqual(response.status_code, self.BLOCKED)
def test_lockout_by_ip_allows_when_same_user_diff_ip_without_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 1 can still login from IP 2.
response = self._login(self.USER_1, self.VALID_PASSWORD, ip_addr=self.IP_2)
self.assertEqual(response.status_code, self.ALLOWED)
def test_lockout_by_ip_blocks_when_diff_user_same_ip_without_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 2 is also locked out from IP 1.
response = self._login(self.USER_2, self.VALID_PASSWORD, ip_addr=self.IP_1)
self.assertEqual(response.status_code, self.BLOCKED)
def test_lockout_by_ip_allows_when_diff_user_diff_ip_without_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 2 can still login from IP 2.
response = self._login(self.USER_2, self.VALID_PASSWORD, ip_addr=self.IP_2)
self.assertEqual(response.status_code, self.ALLOWED)
# Test for true and false positives when blocking by user only.
# Cache disabled. When AXES_ONLY_USER_FAILURES = True
@override_settings(AXES_ONLY_USER_FAILURES=True)
def test_lockout_by_user_blocks_when_same_user_same_ip_without_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 1 is still blocked from IP 1.
response = self._login(self.USER_1, self.VALID_PASSWORD, ip_addr=self.IP_1)
self.assertEqual(response.status_code, self.BLOCKED)
@override_settings(AXES_ONLY_USER_FAILURES=True)
def test_lockout_by_user_blocks_when_same_user_diff_ip_without_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 1 is also locked out from IP 2.
response = self._login(self.USER_1, self.VALID_PASSWORD, ip_addr=self.IP_2)
self.assertEqual(response.status_code, self.BLOCKED)
@override_settings(AXES_ONLY_USER_FAILURES=True)
def test_lockout_by_user_allows_when_diff_user_same_ip_without_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 2 can still login from IP 1.
response = self._login(self.USER_2, self.VALID_PASSWORD, ip_addr=self.IP_1)
self.assertEqual(response.status_code, self.ALLOWED)
@override_settings(AXES_ONLY_USER_FAILURES=True)
def test_lockout_by_user_allows_when_diff_user_diff_ip_without_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 2 can still login from IP 2.
response = self._login(self.USER_2, self.VALID_PASSWORD, ip_addr=self.IP_2)
self.assertEqual(response.status_code, self.ALLOWED)
@override_settings(AXES_ONLY_USER_FAILURES=True)
def test_lockout_by_user_with_empty_username_allows_other_users_without_cache(self):
# User with empty username is locked out from IP 1.
self._lockout_user_from_ip(username="", ip_addr=self.IP_1)
# Still possible to access the login page
response = self.client.get(reverse("admin:login"), REMOTE_ADDR=self.IP_1)
self.assertContains(response, self.LOGIN_FORM_KEY, status_code=200, html=True)
# Test for true and false positives when blocking by user and IP together.
# Cache disabled. When LOCK_OUT_BY_COMBINATION_USER_AND_IP = True
@override_settings(AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP=True)
def test_lockout_by_user_and_ip_blocks_when_same_user_same_ip_without_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 1 is still blocked from IP 1.
response = self._login(self.USER_1, self.VALID_PASSWORD, ip_addr=self.IP_1)
self.assertEqual(response.status_code, self.BLOCKED)
@override_settings(AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP=True)
def test_lockout_by_user_and_ip_allows_when_same_user_diff_ip_without_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 1 can still login from IP 2.
response = self._login(self.USER_1, self.VALID_PASSWORD, ip_addr=self.IP_2)
self.assertEqual(response.status_code, self.ALLOWED)
@override_settings(AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP=True)
def test_lockout_by_user_and_ip_allows_when_diff_user_same_ip_without_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 2 can still login from IP 1.
response = self._login(self.USER_2, self.VALID_PASSWORD, ip_addr=self.IP_1)
self.assertEqual(response.status_code, self.ALLOWED)
@override_settings(AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP=True)
def test_lockout_by_user_and_ip_allows_when_diff_user_diff_ip_without_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 2 can still login from IP 2.
response = self._login(self.USER_2, self.VALID_PASSWORD, ip_addr=self.IP_2)
self.assertEqual(response.status_code, self.ALLOWED)
@override_settings(AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP=True)
def test_lockout_by_user_and_ip_with_empty_username_allows_other_users_without_cache(
self,
):
# User with empty username is locked out from IP 1.
self._lockout_user_from_ip(username="", ip_addr=self.IP_1)
# Still possible to access the login page
response = self.client.get(reverse("admin:login"), REMOTE_ADDR=self.IP_1)
self.assertContains(response, self.LOGIN_FORM_KEY, status_code=200, html=True)
# Test for true and false positives when blocking by IP *OR* user (default)
# With cache enabled. Default criteria.
def test_lockout_by_ip_blocks_when_same_user_same_ip_using_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 1 is still blocked from IP 1.
response = self._login(self.USER_1, self.VALID_PASSWORD, ip_addr=self.IP_1)
self.assertEqual(response.status_code, self.BLOCKED)
def test_lockout_by_ip_allows_when_same_user_diff_ip_using_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 1 can still login from IP 2.
response = self._login(self.USER_1, self.VALID_PASSWORD, ip_addr=self.IP_2)
self.assertEqual(response.status_code, self.ALLOWED)
def test_lockout_by_ip_blocks_when_diff_user_same_ip_using_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 2 is also locked out from IP 1.
response = self._login(self.USER_2, self.VALID_PASSWORD, ip_addr=self.IP_1)
self.assertEqual(response.status_code, self.BLOCKED)
def test_lockout_by_ip_allows_when_diff_user_diff_ip_using_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 2 can still login from IP 2.
response = self._login(self.USER_2, self.VALID_PASSWORD, ip_addr=self.IP_2)
self.assertEqual(response.status_code, self.ALLOWED)
@override_settings(AXES_ONLY_USER_FAILURES=True)
def test_lockout_by_user_with_empty_username_allows_other_users_using_cache(self):
# User with empty username is locked out from IP 1.
self._lockout_user_from_ip(username="", ip_addr=self.IP_1)
# Still possible to access the login page
response = self.client.get(reverse("admin:login"), REMOTE_ADDR=self.IP_1)
self.assertContains(response, self.LOGIN_FORM_KEY, status_code=200, html=True)
# Test for true and false positives when blocking by user only.
# With cache enabled. When AXES_ONLY_USER_FAILURES = True
@override_settings(AXES_ONLY_USER_FAILURES=True)
def test_lockout_by_user_blocks_when_same_user_same_ip_using_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 1 is still blocked from IP 1.
response = self._login(self.USER_1, self.VALID_PASSWORD, ip_addr=self.IP_1)
self.assertEqual(response.status_code, self.BLOCKED)
@override_settings(AXES_ONLY_USER_FAILURES=True)
def test_lockout_by_user_blocks_when_same_user_diff_ip_using_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 1 is also locked out from IP 2.
response = self._login(self.USER_1, self.VALID_PASSWORD, ip_addr=self.IP_2)
self.assertEqual(response.status_code, self.BLOCKED)
@override_settings(AXES_ONLY_USER_FAILURES=True)
def test_lockout_by_user_allows_when_diff_user_same_ip_using_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 2 can still login from IP 1.
response = self._login(self.USER_2, self.VALID_PASSWORD, ip_addr=self.IP_1)
self.assertEqual(response.status_code, self.ALLOWED)
@override_settings(AXES_ONLY_USER_FAILURES=True)
def test_lockout_by_user_allows_when_diff_user_diff_ip_using_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 2 can still login from IP 2.
response = self._login(self.USER_2, self.VALID_PASSWORD, ip_addr=self.IP_2)
self.assertEqual(response.status_code, self.ALLOWED)
# Test for true and false positives when blocking by user and IP together.
# With cache enabled. When LOCK_OUT_BY_COMBINATION_USER_AND_IP = True
@override_settings(AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP=True)
def test_lockout_by_user_and_ip_blocks_when_same_user_same_ip_using_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 1 is still blocked from IP 1.
response = self._login(self.USER_1, self.VALID_PASSWORD, ip_addr=self.IP_1)
self.assertEqual(response.status_code, self.BLOCKED)
@override_settings(AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP=True)
def test_lockout_by_user_and_ip_allows_when_same_user_diff_ip_using_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 1 can still login from IP 2.
response = self._login(self.USER_1, self.VALID_PASSWORD, ip_addr=self.IP_2)
self.assertEqual(response.status_code, self.ALLOWED)
@override_settings(AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP=True)
def test_lockout_by_user_and_ip_allows_when_diff_user_same_ip_using_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 2 can still login from IP 1.
response = self._login(self.USER_2, self.VALID_PASSWORD, ip_addr=self.IP_1)
self.assertEqual(response.status_code, self.ALLOWED)
@override_settings(AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP=True)
def test_lockout_by_user_and_ip_allows_when_diff_user_diff_ip_using_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 2 can still login from IP 2.
response = self._login(self.USER_2, self.VALID_PASSWORD, ip_addr=self.IP_2)
self.assertEqual(response.status_code, self.ALLOWED)
@override_settings(
AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP=True, AXES_FAILURE_LIMIT=2
)
def test_lockout_by_user_and_ip_allows_when_diff_user_same_ip_using_cache_multiple_attempts(
self,
):
# User 1 is locked out from IP 1.
response = self._login(self.USER_1, self.WRONG_PASSWORD, self.IP_1)
self.assertEqual(response.status_code, self.ATTEMPT_NOT_BLOCKED)
# Second attempt from different IP
response = self._login(self.USER_1, self.WRONG_PASSWORD, self.IP_2)
self.assertEqual(response.status_code, self.ATTEMPT_NOT_BLOCKED)
# Second attempt from same IP, different username
response = self._login(self.USER_2, self.WRONG_PASSWORD, self.IP_1)
self.assertEqual(response.status_code, self.ATTEMPT_NOT_BLOCKED)
# User 1 is blocked from IP 1
response = self._login(self.USER_1, self.WRONG_PASSWORD, ip_addr=self.IP_1)
self.assertContains(response, self.LOCKED_MESSAGE, status_code=self.BLOCKED)
# User 1 is blocked from IP 2
response = self._login(self.USER_1, self.WRONG_PASSWORD, ip_addr=self.IP_2)
self.assertContains(response, self.LOCKED_MESSAGE, status_code=self.BLOCKED)
# User 2 can still login from IP 2, only he has 1 attempt left
response = self._login(self.USER_2, self.VALID_PASSWORD, ip_addr=self.IP_2)
self.assertEqual(response.status_code, self.ALLOWED)
@override_settings(AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP=True)
def test_lockout_by_user_and_ip_with_empty_username_allows_other_users_using_cache(
self,
):
# User with empty username is locked out from IP 1.
self._lockout_user_from_ip(username="", ip_addr=self.IP_1)
# Still possible to access the login page
response = self.client.get(reverse("admin:login"), REMOTE_ADDR=self.IP_1)
self.assertContains(response, self.LOGIN_FORM_KEY, status_code=200, html=True)
# Test for true and false positives when blocking by user or IP together.
# With cache enabled. When AXES_LOCK_OUT_BY_USER_OR_IP = True
@override_settings(AXES_LOCK_OUT_BY_USER_OR_IP=True)
def test_lockout_by_user_or_ip_blocks_when_same_user_same_ip_using_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 1 is still blocked from IP 1.
response = self._login(self.USER_1, self.VALID_PASSWORD, ip_addr=self.IP_1)
self.assertEqual(response.status_code, self.BLOCKED)
@override_settings(AXES_LOCK_OUT_BY_USER_OR_IP=True)
def test_lockout_by_user_or_ip_allows_when_same_user_diff_ip_using_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 1 is blocked out from IP 1
response = self._login(self.USER_1, self.VALID_PASSWORD, ip_addr=self.IP_2)
self.assertEqual(response.status_code, self.BLOCKED)
@override_settings(AXES_LOCK_OUT_BY_USER_OR_IP=True)
def test_lockout_by_user_or_ip_allows_when_diff_user_same_ip_using_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 2 can still login from IP 1.
response = self._login(self.USER_2, self.VALID_PASSWORD, ip_addr=self.IP_1)
self.assertEqual(response.status_code, self.BLOCKED)
@override_settings(AXES_LOCK_OUT_BY_USER_OR_IP=True, AXES_FAILURE_LIMIT=3)
def test_lockout_by_user_or_ip_allows_when_diff_user_same_ip_using_cache_multiple_attempts(
self,
):
# User 1 is locked out from IP 1.
response = self._login(self.USER_1, self.WRONG_PASSWORD, self.IP_1)
self.assertEqual(response.status_code, self.ATTEMPT_NOT_BLOCKED)
# Second attempt from different IP
response = self._login(self.USER_1, self.WRONG_PASSWORD, self.IP_2)
self.assertEqual(response.status_code, self.ATTEMPT_NOT_BLOCKED)
# User 1 is blocked on all IPs, he reached 2 attempts
response = self._login(self.USER_1, self.WRONG_PASSWORD, ip_addr=self.IP_2)
self.assertContains(response, self.LOCKED_MESSAGE, status_code=self.BLOCKED)
response = self._login(self.USER_1, self.WRONG_PASSWORD, ip_addr=self.IP_3)
self.assertContains(response, self.LOCKED_MESSAGE, status_code=self.BLOCKED)
# IP 1 has still one attempt left
response = self._login(self.USER_2, self.WRONG_PASSWORD, self.IP_1)
self.assertEqual(response.status_code, self.ATTEMPT_NOT_BLOCKED)
# But now IP 1 is blocked for all attempts
response = self._login(self.USER_1, self.WRONG_PASSWORD, ip_addr=self.IP_1)
self.assertContains(response, self.LOCKED_MESSAGE, status_code=self.BLOCKED)
response = self._login(self.USER_2, self.WRONG_PASSWORD, ip_addr=self.IP_1)
self.assertContains(response, self.LOCKED_MESSAGE, status_code=self.BLOCKED)
response = self._login(self.USER_3, self.WRONG_PASSWORD, ip_addr=self.IP_1)
self.assertContains(response, self.LOCKED_MESSAGE, status_code=self.BLOCKED)
@override_settings(AXES_LOCK_OUT_BY_USER_OR_IP=True, AXES_FAILURE_LIMIT=3)
def test_lockout_by_user_or_ip_allows_when_diff_user_same_ip_using_cache_multiple_failed_attempts(
self,
):
""" Test, if the failed attempts make also impact on the attempt count """
# User 1 is locked out from IP 1.
response = self._login(self.USER_1, self.WRONG_PASSWORD, self.IP_1)
self.assertEqual(response.status_code, self.ATTEMPT_NOT_BLOCKED)
# Second attempt from different IP
response = self._login(self.USER_1, self.WRONG_PASSWORD, self.IP_2)
self.assertEqual(response.status_code, self.ATTEMPT_NOT_BLOCKED)
# Second attempt from same IP, different username
response = self._login(self.USER_2, self.WRONG_PASSWORD, self.IP_1)
self.assertEqual(response.status_code, self.ATTEMPT_NOT_BLOCKED)
# User 1 is blocked from IP 2
response = self._login(self.USER_1, self.WRONG_PASSWORD, ip_addr=self.IP_2)
self.assertContains(response, self.LOCKED_MESSAGE, status_code=self.BLOCKED)
# On IP 2 it is only 2. attempt, for user 2 it is also 2. attempt -> allow log in
response = self._login(self.USER_2, self.VALID_PASSWORD, ip_addr=self.IP_2)
self.assertEqual(response.status_code, self.ALLOWED)
@override_settings(AXES_LOCK_OUT_BY_USER_OR_IP=True)
def test_lockout_by_user_or_ip_allows_when_diff_user_diff_ip_using_cache(self):
# User 1 is locked out from IP 1.
self._lockout_user1_from_ip1()
# User 2 can still login from IP 2.
response = self._login(self.USER_2, self.VALID_PASSWORD, ip_addr=self.IP_2)
self.assertEqual(response.status_code, self.ALLOWED)
@override_settings(AXES_LOCK_OUT_BY_USER_OR_IP=True)
def test_lockout_by_user_or_ip_with_empty_username_allows_other_users_using_cache(
self,
):
# User with empty username is locked out from IP 1.
self._lockout_user_from_ip(username="", ip_addr=self.IP_1)
# Still possible to access the login page
response = self.client.get(reverse("admin:login"), REMOTE_ADDR=self.IP_1)
self.assertContains(response, self.LOGIN_FORM_KEY, status_code=200, html=True)
# Test the same logic with cache handler
@override_settings(AXES_HANDLER="axes.handlers.cache.AxesCacheHandler")
class CacheLoginTestCase(DatabaseLoginTestCase):
def attempt_count(self):
cache = get_cache()
keys = cache._cache
return len(keys)
def reset(self, **kwargs):
get_cache().delete(make_cache_key_list([kwargs])[0])
|
2d0e8edaaac63d6b281a9ac7960602a787f9864b
|
2d05050d0ada29f7680b4df20c10bb85b0530e45
|
/python/tvm/driver/tvmc/registry.py
|
c2e74eb1935ecf1348df32fd1ae2a757a106071c
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
apache/tvm
|
87cb617f9a131fa44e1693303aaddf70e7a4c403
|
d75083cd97ede706338ab413dbc964009456d01b
|
refs/heads/main
| 2023-09-04T11:24:26.263032
| 2023-09-04T07:26:00
| 2023-09-04T07:26:00
| 70,746,484
| 4,575
| 1,903
|
Apache-2.0
| 2023-09-14T19:06:33
| 2016-10-12T22:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,863
|
py
|
registry.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This file contains functions for processing registry based inputs for the TVMC CLI
"""
from tvm.driver.tvmc import TVMCException
# We can't tell the type inside an Array but all current options are strings so
# it can default to that. Bool is used alongside Integer but aren't distinguished
# between as both are represented by IntImm
INTERNAL_TO_NATIVE_TYPE = {"runtime.String": str, "IntImm": int, "Array": str}
INTERNAL_TO_HELP = {"runtime.String": " string", "IntImm": "", "Array": " options"}
def _generate_registry_option_args(parser, registry, name):
target_group = parser.add_argument_group(f"{registry.flag_registry_name} {name}")
for option_name, option_type in registry.list_registered_options(name).items():
if option_type in INTERNAL_TO_NATIVE_TYPE:
target_group.add_argument(
f"--{registry.flag_registry_name}-{name}-{option_name}",
type=INTERNAL_TO_NATIVE_TYPE[option_type],
help=(
f"{registry.flag_registry_name.title()} "
+ "{name} {option_name}{INTERNAL_TO_HELP[option_type]}"
),
)
def generate_registry_args(parser, registry, default=None):
"""Walks through the given registry and generates arguments for each of the available options"""
parser.add_argument(
f"--{registry.flag_registry_name}",
help=f"{registry.flag_registry_name.title()} to compile the model with",
required=False,
default=default,
)
names = registry.list_registered()
for name in names:
_generate_registry_option_args(parser, registry, name)
def _reconstruct_registry_options(args, registry, name):
options = {}
for option, option_type in registry.list_registered_options(name).items():
if option_type in INTERNAL_TO_NATIVE_TYPE:
var_name = f"{registry.flag_registry_name}_{name}_{option.replace('-', '_')}"
option_value = getattr(args, var_name)
if option_value is not None:
options[option] = option_value
return options
def reconstruct_registry_entity(args, registry):
"""Reconstructs an entity from arguments generated from a registry"""
possible_names = registry.list_registered()
name = getattr(args, registry.flag_registry_name)
if name is None:
return None
if name not in possible_names:
raise TVMCException(f'{registry.flag_registry_name.title()} "{name}" is not defined')
reconstructed = {
possible_name: _reconstruct_registry_options(args, registry, possible_name)
for possible_name in possible_names
}
for possible_name in possible_names:
if possible_name != name and reconstructed[possible_name]:
first_option = list(reconstructed[possible_name])[0]
raise TVMCException(
f"Passed --{registry.flag_registry_name}-{possible_name}-{first_option} "
f"but did not specify {possible_name} executor"
)
return registry(name, reconstructed[name])
|
b3c565935b8728cdbe8b9a980e972bcb9687be19
|
19d72f2dc579312ffc01dc019891dc73e0763302
|
/reward/reward_sample.py
|
b70d3edde3af5174f8add45477c80ca7e93ae252
|
[
"MIT",
"MIT-0"
] |
permissive
|
aws-deepracer-community/deepracer-analysis
|
609916997fc9fdd4a57dcf349cc2664423c6739a
|
26a6be78f24554dc86b34a72e87f3795578b44dc
|
refs/heads/master
| 2023-08-16T18:22:39.494096
| 2023-08-12T08:42:20
| 2023-08-12T08:43:09
| 245,901,296
| 129
| 101
|
MIT-0
| 2023-08-12T08:20:46
| 2020-03-08T22:56:13
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
reward_sample.py
|
from time import time
class Reward:
def __init__(self, verbose=False):
self.previous_steps = None
self.initial_time = None
self.verbose = verbose
@staticmethod
def get_vector_length(v):
return (v[0] ** 2 + v[1] ** 2) ** 0.5
@staticmethod
def vector(a, b):
return b[0] - a[0], b[1] - a[1]
@staticmethod
def get_time(params):
# remember: this will not return time before
# the first step has completed so the total
# time will be slightly lower
return params.get('timestamp', None) or time()
def reward_function(self, params):
if self.previous_steps is None \
or self.previous_steps > params['steps']:
# new lap!
self.initial_time = self.get_time(params)
else:
# we're continuing
pass
steering_factor = 1.0
if abs(params['steering_angle']) > 14:
steering_factor = 0.7
reward = float(steering_factor)
self.previous_steps = params['steps']
if self.verbose:
print(params)
return reward
reward_object = Reward()
def reward_function(params):
return reward_object.reward_function(params)
|
152dc685d55efea1a3331bb4c155845a8082554b
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/script/helpers.py
|
9f0d4399d3d20b132b71bd032ab7f30e0943d369
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 785
|
py
|
helpers.py
|
"""Helpers for automation integration."""
from homeassistant.components.blueprint import DomainBlueprints
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.singleton import singleton
from .const import DOMAIN, LOGGER
DATA_BLUEPRINTS = "script_blueprints"
def _blueprint_in_use(hass: HomeAssistant, blueprint_path: str) -> bool:
"""Return True if any script references the blueprint."""
from . import scripts_with_blueprint # pylint: disable=import-outside-toplevel
return len(scripts_with_blueprint(hass, blueprint_path)) > 0
@singleton(DATA_BLUEPRINTS)
@callback
def async_get_blueprints(hass: HomeAssistant) -> DomainBlueprints:
"""Get script blueprints."""
return DomainBlueprints(hass, DOMAIN, LOGGER, _blueprint_in_use)
|
c3655379b13cb39c72b6d4c8a99f65efa116f517
|
07df6279388a17192eb4e4e417383a1f56208839
|
/mmdet3d/core/points/base_points.py
|
929fa21e62efa150eb048ecdf403b3bbb904d284
|
[
"Apache-2.0"
] |
permissive
|
HuangJunJie2017/BEVDet
|
11d4ca45286739c9bd099f715cb0edc9408a914f
|
f71858d02eb0fbd09860150ade67558d7984b1be
|
refs/heads/dev2.1
| 2023-05-23T15:35:45.216750
| 2023-05-07T16:35:04
| 2023-05-07T16:35:04
| 432,979,408
| 985
| 192
|
Apache-2.0
| 2023-04-28T15:06:51
| 2021-11-29T09:28:12
|
Python
|
UTF-8
|
Python
| false
| false
| 16,601
|
py
|
base_points.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import abstractmethod
import numpy as np
import torch
from ..bbox.structures.utils import rotation_3d_in_axis
class BasePoints(object):
"""Base class for Points.
Args:
tensor (torch.Tensor | np.ndarray | list): a N x points_dim matrix.
points_dim (int, optional): Number of the dimension of a point.
Each row is (x, y, z). Defaults to 3.
attribute_dims (dict, optional): Dictionary to indicate the
meaning of extra dimension. Defaults to None.
Attributes:
tensor (torch.Tensor): Float matrix of N x points_dim.
points_dim (int): Integer indicating the dimension of a point.
Each row is (x, y, z, ...).
attribute_dims (bool): Dictionary to indicate the meaning of extra
dimension. Defaults to None.
rotation_axis (int): Default rotation axis for points rotation.
"""
def __init__(self, tensor, points_dim=3, attribute_dims=None):
if isinstance(tensor, torch.Tensor):
device = tensor.device
else:
device = torch.device('cpu')
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
# Use reshape, so we don't end up creating a new tensor that
# does not depend on the inputs (and consequently confuses jit)
tensor = tensor.reshape((0, points_dim)).to(
dtype=torch.float32, device=device)
assert tensor.dim() == 2 and tensor.size(-1) == \
points_dim, tensor.size()
self.tensor = tensor
self.points_dim = points_dim
self.attribute_dims = attribute_dims
self.rotation_axis = 0
@property
def coord(self):
"""torch.Tensor: Coordinates of each point in shape (N, 3)."""
return self.tensor[:, :3]
@coord.setter
def coord(self, tensor):
"""Set the coordinates of each point."""
try:
tensor = tensor.reshape(self.shape[0], 3)
except (RuntimeError, ValueError): # for torch.Tensor and np.ndarray
raise ValueError(f'got unexpected shape {tensor.shape}')
if not isinstance(tensor, torch.Tensor):
tensor = self.tensor.new_tensor(tensor)
self.tensor[:, :3] = tensor
@property
def height(self):
"""torch.Tensor:
A vector with height of each point in shape (N, 1), or None."""
if self.attribute_dims is not None and \
'height' in self.attribute_dims.keys():
return self.tensor[:, self.attribute_dims['height']]
else:
return None
@height.setter
def height(self, tensor):
"""Set the height of each point."""
try:
tensor = tensor.reshape(self.shape[0])
except (RuntimeError, ValueError): # for torch.Tensor and np.ndarray
raise ValueError(f'got unexpected shape {tensor.shape}')
if not isinstance(tensor, torch.Tensor):
tensor = self.tensor.new_tensor(tensor)
if self.attribute_dims is not None and \
'height' in self.attribute_dims.keys():
self.tensor[:, self.attribute_dims['height']] = tensor
else:
# add height attribute
if self.attribute_dims is None:
self.attribute_dims = dict()
attr_dim = self.shape[1]
self.tensor = torch.cat([self.tensor, tensor.unsqueeze(1)], dim=1)
self.attribute_dims.update(dict(height=attr_dim))
self.points_dim += 1
@property
def color(self):
"""torch.Tensor:
A vector with color of each point in shape (N, 3), or None."""
if self.attribute_dims is not None and \
'color' in self.attribute_dims.keys():
return self.tensor[:, self.attribute_dims['color']]
else:
return None
@color.setter
def color(self, tensor):
"""Set the color of each point."""
try:
tensor = tensor.reshape(self.shape[0], 3)
except (RuntimeError, ValueError): # for torch.Tensor and np.ndarray
raise ValueError(f'got unexpected shape {tensor.shape}')
if tensor.max() >= 256 or tensor.min() < 0:
warnings.warn('point got color value beyond [0, 255]')
if not isinstance(tensor, torch.Tensor):
tensor = self.tensor.new_tensor(tensor)
if self.attribute_dims is not None and \
'color' in self.attribute_dims.keys():
self.tensor[:, self.attribute_dims['color']] = tensor
else:
# add color attribute
if self.attribute_dims is None:
self.attribute_dims = dict()
attr_dim = self.shape[1]
self.tensor = torch.cat([self.tensor, tensor], dim=1)
self.attribute_dims.update(
dict(color=[attr_dim, attr_dim + 1, attr_dim + 2]))
self.points_dim += 3
@property
def shape(self):
"""torch.Shape: Shape of points."""
return self.tensor.shape
def shuffle(self):
"""Shuffle the points.
Returns:
torch.Tensor: The shuffled index.
"""
idx = torch.randperm(self.__len__(), device=self.tensor.device)
self.tensor = self.tensor[idx]
return idx
def rotate(self, rotation, axis=None):
"""Rotate points with the given rotation matrix or angle.
Args:
rotation (float | np.ndarray | torch.Tensor): Rotation matrix
or angle.
axis (int, optional): Axis to rotate at. Defaults to None.
"""
if not isinstance(rotation, torch.Tensor):
rotation = self.tensor.new_tensor(rotation)
assert rotation.shape == torch.Size([3, 3]) or \
rotation.numel() == 1, f'invalid rotation shape {rotation.shape}'
if axis is None:
axis = self.rotation_axis
if rotation.numel() == 1:
rotated_points, rot_mat_T = rotation_3d_in_axis(
self.tensor[:, :3][None], rotation, axis=axis, return_mat=True)
self.tensor[:, :3] = rotated_points.squeeze(0)
rot_mat_T = rot_mat_T.squeeze(0)
else:
# rotation.numel() == 9
self.tensor[:, :3] = self.tensor[:, :3] @ rotation
rot_mat_T = rotation
return rot_mat_T
@abstractmethod
def flip(self, bev_direction='horizontal'):
"""Flip the points along given BEV direction.
Args:
bev_direction (str): Flip direction (horizontal or vertical).
"""
pass
def translate(self, trans_vector):
"""Translate points with the given translation vector.
Args:
trans_vector (np.ndarray, torch.Tensor): Translation
vector of size 3 or nx3.
"""
if not isinstance(trans_vector, torch.Tensor):
trans_vector = self.tensor.new_tensor(trans_vector)
trans_vector = trans_vector.squeeze(0)
if trans_vector.dim() == 1:
assert trans_vector.shape[0] == 3
elif trans_vector.dim() == 2:
assert trans_vector.shape[0] == self.tensor.shape[0] and \
trans_vector.shape[1] == 3
else:
raise NotImplementedError(
f'Unsupported translation vector of shape {trans_vector.shape}'
)
self.tensor[:, :3] += trans_vector
def in_range_3d(self, point_range):
"""Check whether the points are in the given range.
Args:
point_range (list | torch.Tensor): The range of point
(x_min, y_min, z_min, x_max, y_max, z_max)
Note:
In the original implementation of SECOND, checking whether
a box in the range checks whether the points are in a convex
polygon, we try to reduce the burden for simpler cases.
Returns:
torch.Tensor: A binary vector indicating whether each point is
inside the reference range.
"""
in_range_flags = ((self.tensor[:, 0] > point_range[0])
& (self.tensor[:, 1] > point_range[1])
& (self.tensor[:, 2] > point_range[2])
& (self.tensor[:, 0] < point_range[3])
& (self.tensor[:, 1] < point_range[4])
& (self.tensor[:, 2] < point_range[5]))
return in_range_flags
@property
def bev(self):
"""torch.Tensor: BEV of the points in shape (N, 2)."""
return self.tensor[:, [0, 1]]
def in_range_bev(self, point_range):
"""Check whether the points are in the given range.
Args:
point_range (list | torch.Tensor): The range of point
in order of (x_min, y_min, x_max, y_max).
Returns:
torch.Tensor: Indicating whether each point is inside
the reference range.
"""
in_range_flags = ((self.bev[:, 0] > point_range[0])
& (self.bev[:, 1] > point_range[1])
& (self.bev[:, 0] < point_range[2])
& (self.bev[:, 1] < point_range[3]))
return in_range_flags
@abstractmethod
def convert_to(self, dst, rt_mat=None):
"""Convert self to ``dst`` mode.
Args:
dst (:obj:`CoordMode`): The target Box mode.
rt_mat (np.ndarray | torch.Tensor, optional): The rotation and
translation matrix between different coordinates.
Defaults to None.
The conversion from `src` coordinates to `dst` coordinates
usually comes along the change of sensors, e.g., from camera
to LiDAR. This requires a transformation matrix.
Returns:
:obj:`BasePoints`: The converted box of the same type
in the `dst` mode.
"""
pass
def scale(self, scale_factor):
"""Scale the points with horizontal and vertical scaling factors.
Args:
scale_factors (float): Scale factors to scale the points.
"""
self.tensor[:, :3] *= scale_factor
def __getitem__(self, item):
"""
Note:
The following usage are allowed:
1. `new_points = points[3]`:
return a `Points` that contains only one point.
2. `new_points = points[2:10]`:
return a slice of points.
3. `new_points = points[vector]`:
where vector is a torch.BoolTensor with `length = len(points)`.
Nonzero elements in the vector will be selected.
4. `new_points = points[3:11, vector]`:
return a slice of points and attribute dims.
5. `new_points = points[4:12, 2]`:
return a slice of points with single attribute.
Note that the returned Points might share storage with this Points,
subject to Pytorch's indexing semantics.
Returns:
:obj:`BasePoints`: A new object of
:class:`BasePoints` after indexing.
"""
original_type = type(self)
if isinstance(item, int):
return original_type(
self.tensor[item].view(1, -1),
points_dim=self.points_dim,
attribute_dims=self.attribute_dims)
elif isinstance(item, tuple) and len(item) == 2:
if isinstance(item[1], slice):
start = 0 if item[1].start is None else item[1].start
stop = self.tensor.shape[1] if \
item[1].stop is None else item[1].stop
step = 1 if item[1].step is None else item[1].step
item = list(item)
item[1] = list(range(start, stop, step))
item = tuple(item)
elif isinstance(item[1], int):
item = list(item)
item[1] = [item[1]]
item = tuple(item)
p = self.tensor[item[0], item[1]]
keep_dims = list(
set(item[1]).intersection(set(range(3, self.tensor.shape[1]))))
if self.attribute_dims is not None:
attribute_dims = self.attribute_dims.copy()
for key in self.attribute_dims.keys():
cur_attribute_dims = attribute_dims[key]
if isinstance(cur_attribute_dims, int):
cur_attribute_dims = [cur_attribute_dims]
intersect_attr = list(
set(cur_attribute_dims).intersection(set(keep_dims)))
if len(intersect_attr) == 1:
attribute_dims[key] = intersect_attr[0]
elif len(intersect_attr) > 1:
attribute_dims[key] = intersect_attr
else:
attribute_dims.pop(key)
else:
attribute_dims = None
elif isinstance(item, (slice, np.ndarray, torch.Tensor)):
p = self.tensor[item]
attribute_dims = self.attribute_dims
else:
raise NotImplementedError(f'Invalid slice {item}!')
assert p.dim() == 2, \
f'Indexing on Points with {item} failed to return a matrix!'
return original_type(
p, points_dim=p.shape[1], attribute_dims=attribute_dims)
def __len__(self):
"""int: Number of points in the current object."""
return self.tensor.shape[0]
def __repr__(self):
"""str: Return a strings that describes the object."""
return self.__class__.__name__ + '(\n ' + str(self.tensor) + ')'
@classmethod
def cat(cls, points_list):
"""Concatenate a list of Points into a single Points.
Args:
points_list (list[:obj:`BasePoints`]): List of points.
Returns:
:obj:`BasePoints`: The concatenated Points.
"""
assert isinstance(points_list, (list, tuple))
if len(points_list) == 0:
return cls(torch.empty(0))
assert all(isinstance(points, cls) for points in points_list)
# use torch.cat (v.s. layers.cat)
# so the returned points never share storage with input
cat_points = cls(
torch.cat([p.tensor for p in points_list], dim=0),
points_dim=points_list[0].tensor.shape[1],
attribute_dims=points_list[0].attribute_dims)
return cat_points
def to(self, device):
"""Convert current points to a specific device.
Args:
device (str | :obj:`torch.device`): The name of the device.
Returns:
:obj:`BasePoints`: A new boxes object on the
specific device.
"""
original_type = type(self)
return original_type(
self.tensor.to(device),
points_dim=self.points_dim,
attribute_dims=self.attribute_dims)
def clone(self):
"""Clone the Points.
Returns:
:obj:`BasePoints`: Box object with the same properties
as self.
"""
original_type = type(self)
return original_type(
self.tensor.clone(),
points_dim=self.points_dim,
attribute_dims=self.attribute_dims)
@property
def device(self):
"""str: The device of the points are on."""
return self.tensor.device
def __iter__(self):
"""Yield a point as a Tensor of shape (4,) at a time.
Returns:
torch.Tensor: A point of shape (4,).
"""
yield from self.tensor
def new_point(self, data):
"""Create a new point object with data.
The new point and its tensor has the similar properties
as self and self.tensor, respectively.
Args:
data (torch.Tensor | numpy.array | list): Data to be copied.
Returns:
:obj:`BasePoints`: A new point object with ``data``,
the object's other properties are similar to ``self``.
"""
new_tensor = self.tensor.new_tensor(data) \
if not isinstance(data, torch.Tensor) else data.to(self.device)
original_type = type(self)
return original_type(
new_tensor,
points_dim=self.points_dim,
attribute_dims=self.attribute_dims)
|
5853633f18162445981e5e3d7583c0253cdbb637
|
f241df59f8e6c13cab13ec3b5d5d9ade89c419f7
|
/leo/plugins/writers/markdown.py
|
bf9c08dc2d5a5fafbe0c05d468097e78f8e67f68
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
leo-editor/leo-editor
|
6c6e09c1ae89cb9b1952c9f5b0c3a6c76ae9e625
|
a3f6c3ebda805dc40cd93123948f153a26eccee5
|
refs/heads/devel
| 2023-08-28T08:57:01.365701
| 2023-08-23T10:21:57
| 2023-08-23T10:21:57
| 16,728,437
| 1,671
| 219
|
NOASSERTION
| 2023-09-14T19:39:01
| 2014-02-11T11:14:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,398
|
py
|
markdown.py
|
#@+leo-ver=5-thin
#@+node:ekr.20140726091031.18073: * @file ../plugins/writers/markdown.py
"""The @auto write code for markdown."""
from leo.core import leoGlobals as g
from leo.core.leoNodes import Position
import leo.plugins.writers.basewriter as basewriter
#@+others
#@+node:ekr.20140726091031.18075: ** class MarkdownWriter(BaseWriter)
class MarkdownWriter(basewriter.BaseWriter):
"""The writer class for markdown files."""
#@+others
#@+node:ekr.20140726091031.18076: *3* mdw.write
def write(self, root: Position) -> None:
"""Write all the *descendants* of an @auto-markdown node."""
# Fix bug 66: errors inhibited read @auto foo.md.
# New in Leo 5.5: Skip !headlines. Convert all others to '#' sections.
self.root = root
self.write_root(root)
for p in root.subtree():
if g.app.force_at_auto_sentinels: # pragma: no cover
self.put_node_sentinel(p, '<!--', delim2='-->')
self.write_headline(p)
# Ensure that every section ends with exactly two newlines.
s = p.b.rstrip() + '\n\n'
lines = s.splitlines(False)
for s in lines:
if not g.isDirective(s):
self.put(s)
root.setVisited()
#@+node:ekr.20141110223158.20: *3* mdw.write_headline
def write_headline(self, p: Position) -> None:
"""
Write or skip the headline.
New in Leo 5.5: Always write '#' sections.
This will cause perfect import to fail.
The alternatives are much worse.
"""
level = p.level() - self.root.level()
assert level > 0, p.h
kind = p.h and p.h[0]
if kind == '!':
pass # The signal for a declaration node.
else:
self.put(f"{'#' * level} {p.h.lstrip()}") # Leo 6.6.4: preserve spacing.
#@+node:ekr.20171230170642.1: *3* mdw.write_root
def write_root(self, root: Position) -> None:
"""Write the root @auto-org node."""
lines = [z for z in g.splitLines(root.b) if not g.isDirective(z)]
for s in lines: # pragma: no cover (the root node usually contains no extra text).
self.put(s)
#@-others
#@-others
writer_dict = {
'@auto': ['@auto-md', '@auto-markdown',],
'class': MarkdownWriter,
'extensions': ['.md',],
}
#@@language python
#@@tabwidth -4
#@-leo
|
c48f98fe7c530fca1995aef3d11bf45d88df6e0f
|
f6492af1b4c162c7d76edc7de5fa671dff963b11
|
/src/RestrictedPython/Guards.py
|
6713bc9e4be6f7fdaab28c414c245e86e21f7d47
|
[
"ZPL-2.1"
] |
permissive
|
zopefoundation/RestrictedPython
|
4cf9b85819648672078c746f059b714feb252ed0
|
aac3f96d07212cb6e07f3bcafd6da63f531ca9e0
|
refs/heads/master
| 2023-09-03T20:13:55.796804
| 2023-08-31T06:08:49
| 2023-08-31T06:08:49
| 8,480,726
| 341
| 43
|
NOASSERTION
| 2023-09-01T06:01:53
| 2013-02-28T14:29:45
|
Python
|
UTF-8
|
Python
| false
| false
| 7,646
|
py
|
Guards.py
|
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
# This tiny set of safe builtins is extended by users of the module.
# AccessControl.ZopeGuards contains a large set of wrappers for builtins.
# DocumentTemplate.DT_UTil contains a few.
import builtins
from RestrictedPython._compat import IS_PY311_OR_GREATER
safe_builtins = {}
_safe_names = [
'__build_class__',
'None',
'False',
'True',
'abs',
'bool',
'bytes',
'callable',
'chr',
'complex',
'divmod',
'float',
'hash',
'hex',
'id',
'int',
'isinstance',
'issubclass',
'len',
'oct',
'ord',
'pow',
'range',
'repr',
'round',
'slice',
'sorted',
'str',
'tuple',
'zip'
]
_safe_exceptions = [
'ArithmeticError',
'AssertionError',
'AttributeError',
'BaseException',
'BufferError',
'BytesWarning',
'DeprecationWarning',
'EOFError',
'EnvironmentError',
'Exception',
'FloatingPointError',
'FutureWarning',
'GeneratorExit',
'IOError',
'ImportError',
'ImportWarning',
'IndentationError',
'IndexError',
'KeyError',
'KeyboardInterrupt',
'LookupError',
'MemoryError',
'NameError',
'NotImplementedError',
'OSError',
'OverflowError',
'PendingDeprecationWarning',
'ReferenceError',
'RuntimeError',
'RuntimeWarning',
'StopIteration',
'SyntaxError',
'SyntaxWarning',
'SystemError',
'SystemExit',
'TabError',
'TypeError',
'UnboundLocalError',
'UnicodeDecodeError',
'UnicodeEncodeError',
'UnicodeError',
'UnicodeTranslateError',
'UnicodeWarning',
'UserWarning',
'ValueError',
'Warning',
'ZeroDivisionError',
]
if IS_PY311_OR_GREATER:
_safe_exceptions.append("ExceptionGroup")
for name in _safe_names:
safe_builtins[name] = getattr(builtins, name)
for name in _safe_exceptions:
safe_builtins[name] = getattr(builtins, name)
# Wrappers provided by this module:
# delattr
# setattr
# Wrappers provided by ZopeGuards:
# __import__
# apply
# dict
# enumerate
# filter
# getattr
# hasattr
# iter
# list
# map
# max
# min
# sum
# all
# any
# Builtins that are intentionally disabled
# compile - don't let them produce new code
# dir - a general purpose introspector, probably hard to wrap
# execfile - no direct I/O
# file - no direct I/O
# globals - uncontrolled namespace access
# input - no direct I/O
# locals - uncontrolled namespace access
# open - no direct I/O
# raw_input - no direct I/O
# vars - uncontrolled namespace access
# There are several strings that describe Python. I think there's no
# point to including these, although they are obviously safe:
# copyright, credits, exit, help, license, quit
# Not provided anywhere. Do something about these? Several are
# related to new-style classes, which we are too scared of to support
# <0.3 wink>. coerce, buffer, and reload are esoteric enough that no
# one should care.
# buffer
# bytearray
# classmethod
# coerce
# eval
# intern
# memoryview
# object
# property
# reload
# staticmethod
# super
# type
def _write_wrapper():
# Construct the write wrapper class
def _handler(secattr, error_msg):
# Make a class method.
def handler(self, *args):
try:
f = getattr(self.ob, secattr)
except AttributeError:
raise TypeError(error_msg)
f(*args)
return handler
class Wrapper:
def __init__(self, ob):
self.__dict__['ob'] = ob
__setitem__ = _handler(
'__guarded_setitem__',
'object does not support item or slice assignment')
__delitem__ = _handler(
'__guarded_delitem__',
'object does not support item or slice assignment')
__setattr__ = _handler(
'__guarded_setattr__',
'attribute-less object (assign or del)')
__delattr__ = _handler(
'__guarded_delattr__',
'attribute-less object (assign or del)')
return Wrapper
def _full_write_guard():
# Nested scope abuse!
# safetypes and Wrapper variables are used by guard()
safetypes = {dict, list}
Wrapper = _write_wrapper()
def guard(ob):
# Don't bother wrapping simple types, or objects that claim to
# handle their own write security.
if type(ob) in safetypes or hasattr(ob, '_guarded_writes'):
return ob
# Hand the object to the Wrapper instance, then return the instance.
return Wrapper(ob)
return guard
full_write_guard = _full_write_guard()
def guarded_setattr(object, name, value):
setattr(full_write_guard(object), name, value)
safe_builtins['setattr'] = guarded_setattr
def guarded_delattr(object, name):
delattr(full_write_guard(object), name)
safe_builtins['delattr'] = guarded_delattr
def safer_getattr(object, name, default=None, getattr=getattr):
"""Getattr implementation which prevents using format on string objects.
format() is considered harmful:
http://lucumr.pocoo.org/2016/12/29/careful-with-str-format/
"""
if name in ('format', 'format_map') and (
isinstance(object, str) or
(isinstance(object, type) and issubclass(object, str))):
raise NotImplementedError(
'Using the format*() methods of `str` is not safe')
if name.startswith('_'):
raise AttributeError(
'"{name}" is an invalid attribute name because it '
'starts with "_"'.format(name=name)
)
return getattr(object, name, default)
safe_builtins['_getattr_'] = safer_getattr
def guarded_iter_unpack_sequence(it, spec, _getiter_):
"""Protect sequence unpacking of targets in a 'for loop'.
The target of a for loop could be a sequence.
For example "for a, b in it"
=> Each object from the iterator needs guarded sequence unpacking.
"""
# The iteration itself needs to be protected as well.
for ob in _getiter_(it):
yield guarded_unpack_sequence(ob, spec, _getiter_)
def guarded_unpack_sequence(it, spec, _getiter_):
"""Protect nested sequence unpacking.
Protect the unpacking of 'it' by wrapping it with '_getiter_'.
Furthermore for each child element, defined by spec,
guarded_unpack_sequence is called again.
Have a look at transformer.py 'gen_unpack_spec' for a more detailed
explanation.
"""
# Do the guarded unpacking of the sequence.
ret = list(_getiter_(it))
# If the sequence is shorter then expected the interpreter will raise
# 'ValueError: need more than X value to unpack' anyway
# => No childs are unpacked => nothing to protect.
if len(ret) < spec['min_len']:
return ret
# For all child elements do the guarded unpacking again.
for (idx, child_spec) in spec['childs']:
ret[idx] = guarded_unpack_sequence(ret[idx], child_spec, _getiter_)
return ret
safe_globals = {'__builtins__': safe_builtins}
|
518ff430b60bcec1e988dc23b68e78ff15afdb60
|
b739654b057190041d3f82d035874fe10e4825d4
|
/qtpynodeeditor/__init__.py
|
915044431ab312f7e9b2bbf2d58d0fbb6acbd50b
|
[
"BSD-3-Clause"
] |
permissive
|
klauer/qtpynodeeditor
|
708cec70ae51cdbf52262e1cdf0d0bd33bf5e137
|
523e76e15ef26edc73fdad6fdd65df9babbde73b
|
refs/heads/master
| 2023-08-16T06:35:08.051000
| 2023-08-04T16:50:21
| 2023-08-04T16:50:21
| 175,901,436
| 141
| 50
|
NOASSERTION
| 2023-08-15T06:23:03
| 2019-03-15T22:55:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,208
|
py
|
__init__.py
|
from .connection import Connection
from .connection_geometry import ConnectionGeometry
from .connection_graphics_object import ConnectionGraphicsObject
from .connection_painter import ConnectionPainter
from .data_model_registry import DataModelRegistry
from .enums import ConnectionPolicy, NodeValidationState, PortType
from .exceptions import (ConnectionCycleFailure, ConnectionDataTypeFailure,
ConnectionPointFailure, ConnectionPortNotEmptyFailure,
ConnectionRequiresPortFailure, ConnectionSelfFailure,
MultipleInputConnectionError, NodeConnectionFailure,
PortsAlreadyConnectedError, PortsOfSameTypeError)
from .flow_scene import FlowScene
from .flow_view import FlowView
from .node import Node, NodeDataType
from .node_connection_interaction import NodeConnectionInteraction
from .node_data import NodeData, NodeDataModel
from .node_geometry import NodeGeometry
from .node_graphics_object import NodeGraphicsObject
from .node_painter import NodePainter, NodePainterDelegate
from .node_state import NodeState
from .port import Port, opposite_port
from .style import (ConnectionStyle, FlowViewStyle, NodeStyle, Style,
StyleCollection)
from .version import __version__ # noqa: F401
__all__ = [
'Connection',
'ConnectionCycleFailure',
'ConnectionDataTypeFailure',
'ConnectionGeometry',
'ConnectionGraphicsObject',
'ConnectionPainter',
'ConnectionPointFailure',
'ConnectionPolicy',
'ConnectionPortNotEmptyFailure',
'ConnectionRequiresPortFailure',
'ConnectionSelfFailure',
'ConnectionStyle',
'DataModelRegistry',
'FlowScene',
'FlowView',
'FlowViewStyle',
'MultipleInputConnectionError',
'Node',
'NodeConnectionFailure',
'NodeConnectionInteraction',
'NodeData',
'NodeDataModel',
'NodeDataType',
'NodeGeometry',
'NodeGraphicsObject',
'NodePainter',
'NodePainterDelegate',
'NodeState',
'NodeStyle',
'NodeValidationState',
'Port',
'PortType',
'PortsAlreadyConnectedError',
'PortsOfSameTypeError',
'Style',
'StyleCollection',
'opposite_port',
]
|
58ebdc893fc482741353c7546affef9b61ed5fb3
|
5142e81b50d15202ff79a34c9b888f18d2baec27
|
/plotnine/geoms/geom_map.py
|
56ad84e1afae20b410c67a5c1fd9155ff4f213c1
|
[
"MIT"
] |
permissive
|
has2k1/plotnine
|
03c0e979b6b05b5e92cb869cca903cfce20988dc
|
ef5650c4aabb29dcfe810043fb0fc8a4ea83f14b
|
refs/heads/main
| 2023-08-30T22:17:07.835055
| 2023-08-08T07:57:53
| 2023-08-08T07:57:53
| 89,276,692
| 3,719
| 233
|
MIT
| 2023-08-08T13:09:24
| 2017-04-24T19:00:44
|
Python
|
UTF-8
|
Python
| false
| false
| 9,434
|
py
|
geom_map.py
|
from __future__ import annotations
import typing
import numpy as np
import pandas as pd
from ..doctools import document
from ..exceptions import PlotnineError
from ..utils import SIZE_FACTOR, to_rgba
from .geom import geom
from .geom_point import geom_point
from .geom_polygon import geom_polygon
if typing.TYPE_CHECKING:
from typing import Any
import numpy.typing as npt
from shapely.geometry.polygon import LinearRing, Polygon
from plotnine.iapi import panel_view
from plotnine.typing import (
Aes,
Axes,
Coord,
DataLike,
DrawingArea,
Layer,
PathPatch,
)
@document
class geom_map(geom):
"""
Draw map feature
The map feature are drawn without any special projections.
{usage}
Parameters
----------
{common_parameters}
Notes
-----
This geom is best suited for plotting a shapefile read into
geopandas dataframe. The dataframe should have a ``geometry``
column.
"""
DEFAULT_AES = {
"alpha": 1,
"color": "#111111",
"fill": "#333333",
"linetype": "solid",
"shape": "o",
"size": 0.5,
"stroke": 0.5,
}
DEFAULT_PARAMS = {
"stat": "identity",
"position": "identity",
"na_rm": False,
}
REQUIRED_AES = {"geometry"}
def __init__(
self,
mapping: Aes | None = None,
data: DataLike | None = None,
**kwargs: Any,
):
geom.__init__(self, mapping, data, **kwargs)
# Almost all geodataframes loaded from shapefiles
# have a geometry column.
if "geometry" not in self.mapping:
self.mapping["geometry"] = "geometry"
def setup_data(self, data: pd.DataFrame) -> pd.DataFrame:
if not len(data):
return data
# Remove any NULL geometries, and remember
# All the non-Null shapes in a shapefile are required to be
# of the same shape type.
bool_idx = np.array([g is not None for g in data["geometry"]])
if not np.all(bool_idx):
data = data.loc[bool_idx]
# Add polygon limits. Scale training uses them
try:
bounds = data["geometry"].bounds
except AttributeError:
# The geometry is not a GeoSeries
# Bounds calculation is extracted from
# geopandas.base.GeoPandasBase.bounds
bounds = pd.DataFrame(
np.array([x.bounds for x in data["geometry"]]),
columns=["xmin", "ymin", "xmax", "ymax"],
index=data.index,
)
else:
bounds.rename(
columns={
"minx": "xmin",
"maxx": "xmax",
"miny": "ymin",
"maxy": "ymax",
},
inplace=True,
)
data = pd.concat([data, bounds], axis=1)
return data
def draw_panel(
self,
data: pd.DataFrame,
panel_params: panel_view,
coord: Coord,
ax: Axes,
**params: Any,
):
if not len(data):
return
data.loc[data["color"].isna(), "color"] = "none"
data.loc[data["fill"].isna(), "fill"] = "none"
data["fill"] = to_rgba(data["fill"], data["alpha"])
geom_type = data.geometry.iloc[0].geom_type
if geom_type in ("Polygon", "MultiPolygon"):
from matplotlib.collections import PatchCollection
data["size"] *= SIZE_FACTOR
patches = [PolygonPatch(g) for g in data["geometry"]]
coll = PatchCollection(
patches,
edgecolor=data["color"],
facecolor=data["fill"],
linestyle=data["linetype"],
linewidth=data["size"],
zorder=params["zorder"],
rasterized=params["raster"],
)
ax.add_collection(coll)
elif geom_type == "Point":
# Extract point coordinates from shapely geom
# and plot with geom_point
arr = np.array([list(g.coords)[0] for g in data["geometry"]])
data["x"] = arr[:, 0]
data["y"] = arr[:, 1]
for _, gdata in data.groupby("group"):
gdata.reset_index(inplace=True, drop=True)
gdata.is_copy = None
geom_point.draw_group(gdata, panel_params, coord, ax, **params)
elif geom_type == "MultiPoint":
# Where n is the length of the dataframe (no. of multipoints),
# m is the number of all points in all multipoints
#
# - MultiPoint -> List of Points (tuples) (n -> m)
# - Explode the list, to create a dataframe were each point
# is associated with the right aesthetics (n -> m)
# - Create x & y columns from the points (m -> m)
data["points"] = [
[p.coords[0] for p in mp.geoms] for mp in data["geometry"]
]
data = data.explode("points", ignore_index=True)
data["x"] = [p[0] for p in data["points"]]
data["y"] = [p[1] for p in data["points"]]
geom_point.draw_group(data, panel_params, coord, ax, **params)
elif geom_type in ("LineString", "MultiLineString"):
from matplotlib.collections import LineCollection
data["size"] *= SIZE_FACTOR
data["color"] = to_rgba(data["color"], data["alpha"])
segments = []
for g in data["geometry"]:
if g.geom_type == "LineString":
segments.append(g.coords)
else:
segments.extend(_g.coords for _g in g.geoms)
coll = LineCollection(
segments,
edgecolor=data["color"],
linewidth=data["size"],
linestyle=data["linetype"],
zorder=params["zorder"],
rasterized=params["raster"],
)
ax.add_collection(coll)
else:
raise TypeError(f"Could not plot geometry of type '{geom_type}'")
@staticmethod
def draw_legend(
data: pd.Series[Any], da: DrawingArea, lyr: Layer
) -> DrawingArea:
"""
Draw a rectangle in the box
Parameters
----------
data : Series
Data Row
da : DrawingArea
Canvas
lyr : layer
Layer
Returns
-------
out : DrawingArea
"""
data["size"] = data["stroke"]
del data["stroke"]
return geom_polygon.draw_legend(data, da, lyr)
def PolygonPatch(
obj: Polygon,
) -> PathPatch:
"""
Return a Matplotlib patch from a Polygon/MultiPolygon Geometry
Parameters
----------
obj : shapley.geometry.Polygon | shapley.geometry.MultiPolygon
A Polygon or MultiPolygon to create a patch for description
Returns
-------
result : matplotlib.patches.PathPatch
A patch representing the shapely geometry
Notes
-----
This functionality was originally provided by the descartes package
by Sean Gillies (BSD license, https://pypi.org/project/descartes)
which is nolonger being maintained.
"""
from matplotlib.patches import PathPatch
from matplotlib.path import Path
def cw_coords(ring: LinearRing) -> npt.NDArray[Any]:
"""
Return Clockwise array coordinates
Parameters
----------
ring: shapely.geometry.polygon.LinearRing
LinearRing
Returns
-------
out: ndarray
(n x 2) array of coordinate points.
"""
if ring.is_ccw:
return np.asarray(ring.coords)[:, :2][::-1]
return np.asarray(ring.coords)[:, :2]
def ccw_coords(ring: LinearRing) -> npt.NDArray[Any]:
"""
Return Counter Clockwise array coordinates
Parameters
----------
ring: shapely.geometry.polygon.LinearRing
LinearRing
Returns
-------
out: ndarray
(n x 2) array of coordinate points.
"""
if ring.is_ccw:
return np.asarray(ring.coords)[:, :2]
return np.asarray(ring.coords)[:, :2][::-1]
# The interiors are holes in the Polygon
# MPL draws a hole if the vertex points are specified
# in an opposite direction. So we use Clockwise for
# the exterior/shell and Counter-Clockwise for any
# interiors/holes
if obj.geom_type == "Polygon":
_exterior = [Path(cw_coords(obj.exterior))]
_interior = [Path(ccw_coords(ring)) for ring in obj.interiors]
else:
# A MultiPolygon has one or more Polygon geoms.
# Concatenate the exterior of all the Polygons
# and the interiors
_exterior = []
_interior = []
for p in obj.geoms:
_exterior.append(Path(cw_coords(p.exterior)))
_interior.extend([Path(ccw_coords(ring)) for ring in p.interiors])
path = Path.make_compound_path(*_exterior, *_interior)
return PathPatch(path)
def check_geopandas():
try:
import geopandas # noqa: F401
except ImportError:
raise PlotnineError(
"geom_map requires geopandas. Please install geopandas."
)
|
1cf6cda633a3b57757c5109d53cd4b9fc3f6371d
|
ff4ce3522d502248f56b32438b303c3301185709
|
/tests/test_deps_env/random-lines/1.0/scripts/random-lines
|
afda9200fde1902d27307282ce306b7978b23d3b
|
[
"Apache-2.0"
] |
permissive
|
common-workflow-language/cwltool
|
d8304f3dcd6e31bda6d0ea11452b692987e39b28
|
bd89c5694685bff46bf56fb32316c8f6fe0d799d
|
refs/heads/main
| 2023-08-24T09:43:39.331516
| 2023-08-23T15:05:17
| 2023-08-23T16:45:11
| 43,816,051
| 336
| 258
|
Apache-2.0
| 2023-09-13T10:55:19
| 2015-10-07T13:03:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,300
|
random-lines
|
#!/usr/bin/env python
# Dan Blankenberg
# Selects N random lines from a file and outputs to another file, maintaining original line order
# allows specifying a seed
# does two passes to determine line offsets/count, and then to output contents
import optparse
import random
import sys
def get_random_by_subtraction( line_offsets, num_lines ):
while len( line_offsets ) > num_lines:
del line_offsets[ random.randint( 0, len( line_offsets ) - 1 ) ]
return line_offsets
def get_random_by_sample( line_offsets, num_lines ):
line_offsets = random.sample( line_offsets, num_lines )
line_offsets.sort()
return line_offsets
def get_random( line_offsets, num_lines ):
if num_lines > ( len( line_offsets ) / 2 ):
return get_random_by_subtraction( line_offsets, num_lines )
else:
return get_random_by_sample( line_offsets, num_lines )
def __main__():
parser = optparse.OptionParser()
parser.add_option( '-s', '--seed', dest='seed', action='store', type="string", default=None, help='Set the random seed.' )
(options, args) = parser.parse_args()
input = open( args[0], 'rb' )
output = sys.stdout
num_lines = int( args[1] )
assert num_lines > 0, "You must select at least one line."
if options.seed is not None:
random.seed( options.seed )
# get line offsets
line_offsets = []
teller = input.tell
readliner = input.readline
appender = line_offsets.append
while True:
offset = teller()
if readliner():
appender( offset )
else:
break
total_lines = len( line_offsets )
assert num_lines <= total_lines, "Error: asked to select more lines (%i) than there were in the file (%i)." % ( num_lines, total_lines )
# get random line offsets
line_offsets = get_random( line_offsets, num_lines )
# write out random lines
seeker = input.seek
writer = output.write
for line_offset in line_offsets:
seeker( line_offset )
writer( readliner().decode("utf-8") )
input.close()
output.close()
#print("Kept %i of %i total lines." % ( num_lines, total_lines ))
#if options.seed is not None:
# print('Used random seed of "%s".' % options.seed)
if __name__ == "__main__":
__main__()
|
|
31b8d2ceed0f6a5cd58b5351eaee8d988bfffe04
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/tests/forms_tests/field_tests/test_filepathfield.py
|
092001b4537eaa6ea1ca68a795abcc0243d929a7
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,298
|
py
|
test_filepathfield.py
|
import os.path
from django.core.exceptions import ValidationError
from django.forms import FilePathField
from django.test import SimpleTestCase
PATH = os.path.dirname(os.path.abspath(__file__))
def fix_os_paths(x):
if isinstance(x, str):
return x.removeprefix(PATH).replace("\\", "/")
elif isinstance(x, tuple):
return tuple(fix_os_paths(list(x)))
elif isinstance(x, list):
return [fix_os_paths(y) for y in x]
else:
return x
class FilePathFieldTest(SimpleTestCase):
expected_choices = [
("/filepathfield_test_dir/__init__.py", "__init__.py"),
("/filepathfield_test_dir/a.py", "a.py"),
("/filepathfield_test_dir/ab.py", "ab.py"),
("/filepathfield_test_dir/b.py", "b.py"),
("/filepathfield_test_dir/c/__init__.py", "__init__.py"),
("/filepathfield_test_dir/c/d.py", "d.py"),
("/filepathfield_test_dir/c/e.py", "e.py"),
("/filepathfield_test_dir/c/f/__init__.py", "__init__.py"),
("/filepathfield_test_dir/c/f/g.py", "g.py"),
("/filepathfield_test_dir/h/__init__.py", "__init__.py"),
("/filepathfield_test_dir/j/__init__.py", "__init__.py"),
]
path = os.path.join(PATH, "filepathfield_test_dir") + "/"
def assertChoices(self, field, expected_choices):
self.assertEqual(fix_os_paths(field.choices), expected_choices)
def test_fix_os_paths(self):
self.assertEqual(fix_os_paths(self.path), ("/filepathfield_test_dir/"))
def test_nonexistent_path(self):
with self.assertRaisesMessage(FileNotFoundError, "nonexistent"):
FilePathField(path="nonexistent")
def test_no_options(self):
f = FilePathField(path=self.path)
expected = [
("/filepathfield_test_dir/README", "README"),
] + self.expected_choices[:4]
self.assertChoices(f, expected)
def test_clean(self):
f = FilePathField(path=self.path)
msg = "'Select a valid choice. a.py is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean("a.py")
self.assertEqual(
fix_os_paths(f.clean(self.path + "a.py")), "/filepathfield_test_dir/a.py"
)
def test_match(self):
f = FilePathField(path=self.path, match=r"^.*?\.py$")
self.assertChoices(f, self.expected_choices[:4])
def test_recursive(self):
f = FilePathField(path=self.path, recursive=True, match=r"^.*?\.py$")
expected = [
("/filepathfield_test_dir/__init__.py", "__init__.py"),
("/filepathfield_test_dir/a.py", "a.py"),
("/filepathfield_test_dir/ab.py", "ab.py"),
("/filepathfield_test_dir/b.py", "b.py"),
("/filepathfield_test_dir/c/__init__.py", "c/__init__.py"),
("/filepathfield_test_dir/c/d.py", "c/d.py"),
("/filepathfield_test_dir/c/e.py", "c/e.py"),
("/filepathfield_test_dir/c/f/__init__.py", "c/f/__init__.py"),
("/filepathfield_test_dir/c/f/g.py", "c/f/g.py"),
("/filepathfield_test_dir/h/__init__.py", "h/__init__.py"),
("/filepathfield_test_dir/j/__init__.py", "j/__init__.py"),
]
self.assertChoices(f, expected)
def test_allow_folders(self):
f = FilePathField(path=self.path, allow_folders=True, allow_files=False)
self.assertChoices(
f,
[
("/filepathfield_test_dir/c", "c"),
("/filepathfield_test_dir/h", "h"),
("/filepathfield_test_dir/j", "j"),
],
)
def test_recursive_no_folders_or_files(self):
f = FilePathField(
path=self.path, recursive=True, allow_folders=False, allow_files=False
)
self.assertChoices(f, [])
def test_recursive_folders_without_files(self):
f = FilePathField(
path=self.path, recursive=True, allow_folders=True, allow_files=False
)
self.assertChoices(
f,
[
("/filepathfield_test_dir/c", "c"),
("/filepathfield_test_dir/h", "h"),
("/filepathfield_test_dir/j", "j"),
("/filepathfield_test_dir/c/f", "c/f"),
],
)
|
3f6314435de251a37a5db56e4d91c0b73cbf7019
|
1adebf72de7aa7147b1148ba35280645fbe5bbd3
|
/supriya/ugens/mac.py
|
fab397e9724be8404863680765fee490119090ea
|
[
"MIT"
] |
permissive
|
josiah-wolf-oberholtzer/supriya
|
d0c4f921a06e3f9df40f91a226a1c038d3ef84d5
|
2ebf835ce9bbfca19e4220628a32c30fa66e04f7
|
refs/heads/main
| 2023-07-20T00:06:23.955530
| 2023-07-18T03:02:14
| 2023-07-18T03:02:14
| 17,463,359
| 227
| 28
|
MIT
| 2023-07-18T03:02:15
| 2014-03-06T02:27:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,183
|
py
|
mac.py
|
from ..enums import SignalRange
from .bases import UGen, param, ugen
@ugen(kr=True, signal_range=SignalRange.UNIPOLAR)
class KeyState(UGen):
keycode = param(0.0)
minimum = param(0.0)
maximum = param(1.0)
lag = param(0.2)
@ugen(kr=True, signal_range=SignalRange.UNIPOLAR)
class MouseButton(UGen):
"""
A mouse-button tracker.
::
>>> supriya.ugens.MouseButton.kr()
MouseButton.kr()
"""
minimum = param(0.0)
maximum = param(1.0)
lag = param(0.2)
@ugen(kr=True, signal_range=SignalRange.UNIPOLAR)
class MouseX(UGen):
"""
A mouse cursor tracker.
MouseX tracks the y-axis of the mouse cursor position.
::
>>> supriya.ugens.MouseX.kr()
MouseX.kr()
"""
minimum = param(0.0)
maximum = param(1.0)
warp = param(0.0)
lag = param(0.2)
@ugen(kr=True, signal_range=SignalRange.UNIPOLAR)
class MouseY(UGen):
"""
A mouse cursor tracker.
MouseY tracks the y-axis of the mouse cursor position.
::
>>> supriya.ugens.MouseY.kr()
MouseY.kr()
"""
minimum = param(0.0)
maximum = param(1.0)
warp = param(0.0)
lag = param(0.2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.