hexsha
stringlengths
40
40
size
int64
1
1.03M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
239
max_stars_repo_name
stringlengths
5
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
239
max_issues_repo_name
stringlengths
5
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
239
max_forks_repo_name
stringlengths
5
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.03M
avg_line_length
float64
1
958k
max_line_length
int64
1
1.03M
alphanum_fraction
float64
0
1
acef8a878317bc2a83dd395e55febc19abfea4f5
4,492
py
Python
userbot/plugins/markdown.py
azizkziba/FridayUserbot
f77a692f74d1c2da2b6e7ee47f5eee7a9dd9e138
[ "MIT" ]
67
2020-03-04T09:34:55.000Z
2022-03-04T11:09:10.000Z
userbot/plugins/markdown.py
azizkziba/FridayUserbot
f77a692f74d1c2da2b6e7ee47f5eee7a9dd9e138
[ "MIT" ]
5
2020-08-25T15:58:13.000Z
2021-02-09T09:57:57.000Z
userbot/plugins/markdown.py
azizkziba/FridayUserbot
f77a692f74d1c2da2b6e7ee47f5eee7a9dd9e138
[ "MIT" ]
450
2019-07-12T13:18:41.000Z
2022-03-29T18:47:42.000Z
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import re from functools import partial from telethon import events from telethon.tl.functions.messages import EditMessageRequest from telethon.extensions.markdown import DEFAULT_URL_RE from telethon.utils import add_surrogate, del_surrogate from telethon.tl.types import ( MessageEntityBold, MessageEntityItalic, MessageEntityCode, MessageEntityPre, MessageEntityTextUrl ) def parse_url_match(m): entity = MessageEntityTextUrl( offset=m.start(), length=len(m.group(1)), url=del_surrogate(m.group(2)) ) return m.group(1), entity def get_tag_parser(tag, entity): # TODO unescape escaped tags? def tag_parser(m): return m.group(1), entity(offset=m.start(), length=len(m.group(1))) tag = re.escape(tag) return re.compile(tag + r'(.+?)' + tag, re.DOTALL), tag_parser PRINTABLE_ASCII = range(0x21, 0x7f) def parse_aesthetics(m): def aesthetify(string): for c in string: c = ord(c) if c in PRINTABLE_ASCII: c += 0xFF00 - 0x20 elif c == ord(" "): c = 0x3000 yield chr(c) return "".join(aesthetify(m[1])), None def parse_subreddit(m): text = '/' + m.group(3) entity = MessageEntityTextUrl( offset=m.start(2), length=len(text), url=f'reddit.com{text}' ) return m.group(1) + text, entity def parse_strikethrough(m): text = m.group(2) text = "\u0336".join(text) + "\u0336 " return text, None PARSED_ENTITIES = ( MessageEntityBold, MessageEntityItalic, MessageEntityCode, MessageEntityPre, MessageEntityTextUrl ) # A matcher is a tuple of (regex pattern, parse function) # where the parse function takes the match and returns (text, entity) MATCHERS = [ (DEFAULT_URL_RE, parse_url_match), (get_tag_parser('**', MessageEntityBold)), (get_tag_parser('__', MessageEntityItalic)), (get_tag_parser('```', partial(MessageEntityPre, language=''))), (get_tag_parser('`', MessageEntityCode)), (re.compile(r'\+\+(.+?)\+\+'), parse_aesthetics), (re.compile(r'([^/\w]|^)(/?(r/\w+))'), parse_subreddit), (re.compile(r"(?<!\w)(~{2})(?!~~)(.+?)(?<!~)\1(?!\w)"), parse_strikethrough) ] def parse(message, old_entities=None): entities = [] old_entities = sorted(old_entities or [], key=lambda e: e.offset) i = 0 after = 0 message = add_surrogate(message) while i < len(message): for after, e in enumerate(old_entities[after:], start=after): # If the next entity is strictly to our right, we're done here if i < e.offset: break # Skip already existing entities if we're at one if i == e.offset: i += e.length # Find the first pattern that matches for pattern, parser in MATCHERS: match = pattern.match(message, pos=i) if match: break else: i += 1 continue text, entity = parser(match) # Shift old entities after our current position (so they stay in place) shift = len(text) - len(match[0]) if shift: for e in old_entities[after:]: e.offset += shift # Replace whole match with text from parser message = ''.join(( message[:match.start()], text, message[match.end():] )) # Append entity if we got one if entity: entities.append(entity) # Skip past the match i += len(text) return del_surrogate(message), entities + old_entities @borg.on(events.MessageEdited(outgoing=True)) @borg.on(events.NewMessage(outgoing=True)) async def reparse(event): old_entities = event.message.entities or [] parser = partial(parse, old_entities=old_entities) message, msg_entities = await borg._parse_message_text(event.raw_text, parser) if len(old_entities) >= len(msg_entities) and event.raw_text == message: return await borg(EditMessageRequest( peer=await event.get_input_chat(), id=event.message.id, message=message, no_webpage=not bool(event.message.media), entities=msg_entities )) raise events.StopPropagation
30.351351
82
0.620882
acef8adb79f8b0fda48b887b9aed31ceea246ddd
2,742
py
Python
version.py
mwillsey/incubator-tvm
e02dc69fef294eb73dd65d18949ed9e108f60cda
[ "Apache-2.0" ]
2
2020-04-17T02:25:16.000Z
2020-11-25T11:39:43.000Z
version.py
mwillsey/incubator-tvm
e02dc69fef294eb73dd65d18949ed9e108f60cda
[ "Apache-2.0" ]
3
2020-04-20T15:37:55.000Z
2020-05-13T05:34:28.000Z
version.py
mwillsey/incubator-tvm
e02dc69fef294eb73dd65d18949ed9e108f60cda
[ "Apache-2.0" ]
1
2020-04-08T07:08:04.000Z
2020-04-08T07:08:04.000Z
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ This is the global script that set the version information of TVM. This script runs and update all the locations that related to versions List of affected files: - tvm-root/python/tvm/_ffi/libinfo.py - tvm-root/include/tvm/runtime/c_runtime_api.h - tvm-root/conda/tvm/meta.yaml - tvm-root/conda/tvm-libs/meta.yaml """ import os import re # current version # We use the version of the incoming release for code # that is under development __version__ = "0.7.dev1" # Implementations def update(file_name, pattern, repl): update = [] hit_counter = 0 need_update = False for l in open(file_name): result = re.findall(pattern, l) if result: assert len(result) == 1 hit_counter += 1 if result[0] != repl: l = re.sub(pattern, repl, l) need_update = True print("%s: %s->%s" % (file_name, result[0], repl)) else: print("%s: version is already %s" % (file_name, repl)) update.append(l) if hit_counter != 1: raise RuntimeError("Cannot find version in %s" % file_name) if need_update: with open(file_name, "w") as output_file: for l in update: output_file.write(l) def main(): proj_root = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) # python path update( os.path.join(proj_root, "python", "tvm", "_ffi", "libinfo.py"), r"(?<=__version__ = \")[.0-9a-z]+", __version__, ) # C++ header update( os.path.join(proj_root, "include", "tvm", "runtime", "c_runtime_api.h"), '(?<=TVM_VERSION ")[.0-9a-z]+', __version__, ) # conda for path in ["tvm", "tvm-libs"]: update( os.path.join(proj_root, "conda", path, "meta.yaml"), '(?<=version = ")[.0-9a-z]+', __version__, ) if __name__ == "__main__": main()
31.159091
80
0.631656
acef8bab4c1794c189543beeb557d2c2d27a26c1
1,400
py
Python
tools/checkLinks.py
airladon/figureone
108a3b2041fa9663c25adc14876240e1baa4ee11
[ "MIT" ]
31
2020-06-19T22:09:39.000Z
2022-02-06T17:11:38.000Z
tools/checkLinks.py
airladon/figureone
108a3b2041fa9663c25adc14876240e1baa4ee11
[ "MIT" ]
2
2021-09-02T04:21:43.000Z
2022-02-15T19:47:34.000Z
tools/checkLinks.py
airladon/figureone
108a3b2041fa9663c25adc14876240e1baa4ee11
[ "MIT" ]
2
2021-07-20T08:57:41.000Z
2021-08-20T05:20:51.000Z
import requests import re from requests_html import HTMLSession session = HTMLSession() paths = [ # 'https://github.com/airladon/FigureOne/tree/main', 'https://github.com/airladon/FigureOne/tree/main/docs/examples', 'https://github.com/airladon/FigureOne/tree/main/docs/tutorials', ] api_reference = session.get('https://airladon.github.io/FigureOne/api/') def check_api(api_path: str): api_id = re.search('#(.*)$', api_path).group(1) check = api_reference.html.find(f'#{api_id}') if (check): return True return False for path in paths: r = session.get(path) links = r.html.find('#readme', first=True).absolute_links failed = [] print() print(f'Testing: {path}') for link in links: link_r = requests.get(link) if link_r.status_code != 200: failed.append(link) print(f' FAILED: {link}') else: print(f' OK: {link}') if len(failed) > 0: print() print(f'Links failed at: {path}') for failed_link in failed: print(f' {failed_link}') folders = r.html.find('.js-active-navigation-container', first=True).absolute_links test_r = requests.get("https://airladon.github.io/FigureOne/api/#obj_rectangle") r = session.get('https://github.com/airladon/FigureOne/tree/main/docs/tutorials') folders = r.html.find('.js-details-container')
26.923077
87
0.642143
acef8bca1585e4bcf0e473a3dad372aa10793d23
429
py
Python
scripts/sensitivity_analysis/sensitivity_erp_cfm.py
NeLy-EPFL/NeuroMechFly
69f9e2d86caac561a50e3e060d007dd50a20d481
[ "Apache-2.0" ]
12
2021-05-07T15:27:11.000Z
2022-01-29T04:26:36.000Z
scripts/sensitivity_analysis/sensitivity_erp_cfm.py
NeLy-EPFL/NeuroMechFly
69f9e2d86caac561a50e3e060d007dd50a20d481
[ "Apache-2.0" ]
15
2021-05-07T14:58:04.000Z
2021-11-10T21:30:58.000Z
scripts/sensitivity_analysis/sensitivity_erp_cfm.py
NeLy-EPFL/NeuroMechFly
69f9e2d86caac561a50e3e060d007dd50a20d481
[ "Apache-2.0" ]
1
2022-01-13T16:08:49.000Z
2022-01-13T16:08:49.000Z
#!/usr/bin/env python """ Script to run kinematic replay. """ import os def main(): for erp in range(11): erp_val = float(erp/10) for cfm in range(11): cfm_val = float(cfm) command = f"run_kinematic_replay -erp {erp_val} -cfm {cfm_val}" print("Running: " + command) os.system(command) if __name__ == "__main__": """ Main """ main()
21.45
75
0.526807
acef8d4a5b10f8a7376c87c9bc52a3cab65bd428
49,036
py
Python
tests/test_project.py
PatMyron/cloudformation-cli
168c28980efc273e2e1a57cbc9a667f2c04ab204
[ "Apache-2.0" ]
null
null
null
tests/test_project.py
PatMyron/cloudformation-cli
168c28980efc273e2e1a57cbc9a667f2c04ab204
[ "Apache-2.0" ]
null
null
null
tests/test_project.py
PatMyron/cloudformation-cli
168c28980efc273e2e1a57cbc9a667f2c04ab204
[ "Apache-2.0" ]
null
null
null
# fixture and parameter have the same name # pylint: disable=redefined-outer-name,useless-super-delegation,protected-access # pylint: disable=too-many-lines import json import logging import os import random import string import zipfile from contextlib import contextmanager from io import StringIO from pathlib import Path from shutil import copyfile from unittest.mock import ANY, MagicMock, patch import pytest import yaml from botocore.exceptions import ClientError, WaiterError from rpdk.core.data_loaders import resource_json, resource_stream from rpdk.core.exceptions import ( DownstreamError, FragmentValidationError, InternalError, InvalidProjectError, SpecValidationError, ) from rpdk.core.plugin_base import LanguagePlugin from rpdk.core.project import ( CFN_METADATA_FILENAME, LAMBDA_RUNTIMES, OVERRIDES_FILENAME, SCHEMA_UPLOAD_FILENAME, SETTINGS_FILENAME, Project, escape_markdown, ) from rpdk.core.test import empty_override from rpdk.core.upload import Uploader from .utils import CONTENTS_UTF8, UnclosingBytesIO ARTIFACT_TYPE_RESOURCE = "RESOURCE" ARTIFACT_TYPE_MODULE = "MODULE" LANGUAGE = "BQHDBC" TYPE_NAME = "AWS::Color::Red" MODULE_TYPE_NAME = "AWS::Color::Red::MODULE" REGION = "us-east-1" ENDPOINT = "cloudformation.beta.com" RUNTIME = random.choice(list(LAMBDA_RUNTIMES)) BLANK_CLIENT_ERROR = {"Error": {"Code": "", "Message": ""}} LOG = logging.getLogger(__name__) REGISTRATION_TOKEN = "foo" TYPE_ARN = "arn:aws:cloudformation:us-east-1:123456789012:type/resource/Foo-Bar-Foo" TYPE_VERSION_ARN = ( "arn:aws:cloudformation:us-east-1:123456789012:type/resource/Foo-Bar-Foo/00000001" ) DESCRIBE_TYPE_COMPLETE_RETURN = { "TypeArn": TYPE_ARN, "TypeVersionArn": TYPE_VERSION_ARN, "Description": "Some detailed progress message.", "ProgressStatus": "COMPLETE", } DESCRIBE_TYPE_FAILED_RETURN = { "Description": "Some detailed progress message.", "ProgressStatus": "FAILED", } CREATE_INPUTS_FILE = "inputs/inputs_1_create.json" UPDATE_INPUTS_FILE = "inputs/inputs_1_update.json" INVALID_INPUTS_FILE = "inputs/inputs_1_invalid.json" PLUGIN_INFORMATION = { "plugin-version": "2.1.3", "plugin-tool-version": "2.0.8", "plugin-name": "java", } @pytest.mark.parametrize("string", ["^[a-z]$", "([a-z])", ".*", "*."]) def test_escape_markdown_with_regex_names(string): assert escape_markdown(string).startswith("\\") def test_escape_markdown_with_empty_string(): assert escape_markdown("") == "" assert escape_markdown(None) is None @pytest.mark.parametrize("string", ["Hello", "SomeProperty"]) def test_escape_markdown(string): assert escape_markdown(string) == string @pytest.fixture def project(tmpdir): unique_dir = "".join(random.choices(string.ascii_uppercase, k=12)) return Project(root=tmpdir.mkdir(unique_dir)) @contextmanager def patch_settings(project, data): with patch.object(project, "settings_path", autospec=True) as mock_path: mock_path.open.return_value.__enter__.return_value = StringIO(data) yield mock_path.open def test_load_settings_invalid_json(project): with patch_settings(project, "") as mock_open: with pytest.raises(InvalidProjectError): project.load_settings() mock_open.assert_called_once_with("r", encoding="utf-8") def test_load_settings_invalid_settings(project): with patch_settings(project, "{}") as mock_open: with pytest.raises(InvalidProjectError): project.load_settings() mock_open.assert_called_once_with("r", encoding="utf-8") def test_load_settings_invalid_modules_settings(project): with patch_settings(project, '{"artifact_type": "MODULE"}') as mock_open: with pytest.raises(InvalidProjectError): project.load_settings() mock_open.assert_called_once_with("r", encoding="utf-8") def test_load_settings_valid_json_for_resource(project): plugin = object() data = json.dumps( { "artifact_type": "RESOURCE", "typeName": TYPE_NAME, "language": LANGUAGE, "runtime": RUNTIME, "entrypoint": None, "testEntrypoint": None, "futureProperty": "value", } ) patch_load = patch( "rpdk.core.project.load_plugin", autospec=True, return_value=plugin ) with patch_settings(project, data) as mock_open, patch_load as mock_load: project.load_settings() mock_open.assert_called_once_with("r", encoding="utf-8") mock_load.assert_called_once_with(LANGUAGE) assert project.type_info == ("AWS", "Color", "Red") assert project.type_name == TYPE_NAME assert project.language == LANGUAGE assert project.artifact_type == ARTIFACT_TYPE_RESOURCE assert project._plugin is plugin assert project.settings == {} def test_load_settings_valid_json_for_resource_backward_compatible(project): plugin = object() data = json.dumps( { "typeName": TYPE_NAME, "language": LANGUAGE, "runtime": RUNTIME, "entrypoint": None, "testEntrypoint": None, } ) patch_load = patch( "rpdk.core.project.load_plugin", autospec=True, return_value=plugin ) with patch_settings(project, data) as mock_open, patch_load as mock_load: project.load_settings() mock_open.assert_called_once_with("r", encoding="utf-8") mock_load.assert_called_once_with(LANGUAGE) assert project.type_info == ("AWS", "Color", "Red") assert project.type_name == TYPE_NAME assert project.language == LANGUAGE assert project.artifact_type == ARTIFACT_TYPE_RESOURCE assert project._plugin is plugin assert project.settings == {} def test_load_settings_valid_json_for_module(project): plugin = object() data = json.dumps( { "artifact_type": "MODULE", "typeName": MODULE_TYPE_NAME, } ) patch_load = patch( "rpdk.core.project.load_plugin", autospec=True, return_value=plugin ) with patch_settings(project, data) as mock_open, patch_load as mock_load: project.load_settings() mock_open.assert_called_once_with("r", encoding="utf-8") mock_load.assert_not_called() assert project.type_info == ("AWS", "Color", "Red", "MODULE") assert project.type_name == MODULE_TYPE_NAME assert project.language is None assert project.artifact_type == ARTIFACT_TYPE_MODULE assert project._plugin is None assert project.settings == {} def test_generate_for_modules_succeeds(project): project.type_info = ("AWS", "Color", "Red", "MODULE") project.artifact_type = ARTIFACT_TYPE_MODULE project.generate() project.generate_docs() def test_load_schema_settings_not_loaded(project): with pytest.raises(InternalError): project.load_schema() def test_load_schema_example(project): project.type_name = "AWS::Color::Blue" project._write_example_schema() project.load_schema() def test_overwrite(): mock_path = MagicMock(spec=Path) Project.overwrite(mock_path, LANGUAGE) mock_path.open.assert_called_once_with("w", encoding="utf-8") mock_f = mock_path.open.return_value.__enter__.return_value mock_f.write.assert_called_once_with(LANGUAGE) def test_safewrite_overwrite(project): path = object() contents = object() patch_attr = patch.object(project, "overwrite_enabled", True) patch_meth = patch.object(project, "overwrite", autospec=True) with patch_attr, patch_meth as mock_overwrite: project.safewrite(path, contents) mock_overwrite.assert_called_once_with(path, contents) def test_safewrite_doesnt_exist(project, tmpdir): path = Path(tmpdir.join("test")).resolve() with patch.object(project, "overwrite_enabled", False): project.safewrite(path, CONTENTS_UTF8) with path.open("r", encoding="utf-8") as f: assert f.read() == CONTENTS_UTF8 def test_safewrite_exists(project, tmpdir, caplog): caplog.set_level(logging.INFO) path = Path(tmpdir.join("test")).resolve() with path.open("w", encoding="utf-8") as f: f.write(CONTENTS_UTF8) with patch.object(project, "overwrite_enabled", False): project.safewrite(path, CONTENTS_UTF8) last_record = caplog.records[-1] assert last_record.levelname == "INFO" assert str(path) in last_record.message def test_generate_no_handlers(project): project.schema = {} mock_plugin = MagicMock(spec=["generate"]) with patch.object(project, "_plugin", mock_plugin): project.generate() project.generate_docs() mock_plugin.generate.assert_called_once_with(project) @pytest.mark.parametrize( "schema_path,path", [ ("data/schema/valid/valid_no_type.json", "generate_with_no_type_defined"), ( "data/schema/valid/valid_type_complex.json", "generate_with_docs_type_complex", ), ( "data/schema/valid/valid_pattern_properties.json", "generate_with_docs_pattern_properties", ), ( "data/schema/valid/valid_no_properties.json", "generate_with_docs_no_properties", ), ( "data/schema/valid/valid_nested_property_object.json", "generate_with_docs_nested_object", ), ( "data/schema/valid/valid_type_composite_primary_identifier.json", "generate_with_docs_composite_primary_identifier", ), ], ) def test_generate_with_docs(project, tmp_path_factory, schema_path, path): project.schema = resource_json(__name__, schema_path) project.type_name = "AWS::Color::Red" # tmpdir conflicts with other tests, make a unique one project.root = tmp_path_factory.mktemp(path) mock_plugin = MagicMock(spec=["generate"]) with patch.object(project, "_plugin", mock_plugin): project.generate() project.generate_docs() mock_plugin.generate.assert_called_once_with(project) docs_dir = project.root / "docs" readme_file = project.root / "docs" / "README.md" assert docs_dir.is_dir() assert readme_file.is_file() with patch.object(project, "_plugin", mock_plugin): project.generate() readme_contents = readme_file.read_text(encoding="utf-8") assert project.type_name in readme_contents def test_generate_docs_with_multityped_property(project, tmp_path_factory): project.schema = resource_json( __name__, "data/schema/valid/valid_multityped_property.json" ) project.type_name = "AWS::Color::Red" # tmpdir conflicts with other tests, make a unique one project.root = tmp_path_factory.mktemp("generate_with_docs_type_complex") mock_plugin = MagicMock(spec=["generate"]) with patch.object(project, "_plugin", mock_plugin): project.generate() project.generate_docs() mock_plugin.generate.assert_called_once_with(project) docs_dir = project.root / "docs" readme_file = project.root / "docs" / "README.md" assert docs_dir.is_dir() assert readme_file.is_file() with patch.object(project, "_plugin", mock_plugin): project.generate() readme_contents = readme_file.read_text(encoding="utf-8") readme_contents_target = resource_stream( __name__, "data/schema/target_output/multityped.md" ) read_me_stripped = readme_contents.strip().replace(" ", "") read_me_target_stripped = readme_contents_target.read().strip().replace(" ", "") LOG.debug("read_me_stripped %s", read_me_stripped) LOG.debug("read_me_target_stripped %s", read_me_target_stripped) assert project.type_name in readme_contents assert read_me_stripped == read_me_target_stripped def test_generate_docs_with_multiref_property(project, tmp_path_factory): project.schema = resource_json( __name__, "data/schema/valid/valid_multiref_property.json" ) project.type_name = "AWS::Color::Red" # tmpdir conflicts with other tests, make a unique one project.root = tmp_path_factory.mktemp("generate_with_docs_type_complex") mock_plugin = MagicMock(spec=["generate"]) with patch.object(project, "_plugin", mock_plugin): project.generate() project.generate_docs() mock_plugin.generate.assert_called_once_with(project) docs_dir = project.root / "docs" readme_file = project.root / "docs" / "README.md" assert docs_dir.is_dir() assert readme_file.is_file() with patch.object(project, "_plugin", mock_plugin): project.generate() readme_contents = readme_file.read_text(encoding="utf-8") readme_contents_target = resource_stream( __name__, "data/schema/target_output/multiref.md" ) read_me_stripped = readme_contents.strip().replace(" ", "") read_me_target_stripped = readme_contents_target.read().strip().replace(" ", "") LOG.debug("read_me_stripped %s", read_me_stripped) LOG.debug("read_me_target_stripped %s", read_me_target_stripped) assert project.type_name in readme_contents assert read_me_stripped == read_me_target_stripped def test_generate_with_docs_invalid_property_type(project, tmp_path_factory): project.schema = resource_json( __name__, "data/schema/invalid/invalid_property_type_invalid.json" ) project.type_name = "AWS::Color::Red" # tmpdir conflicts with other tests, make a unique one project.root = tmp_path_factory.mktemp("generate_with_docs_invalid_property_type") mock_plugin = MagicMock(spec=["generate"]) with patch.object(project, "_plugin", mock_plugin): # skip actual generation project.generate_docs() docs_dir = project.root / "docs" readme_file = project.root / "docs" / "README.md" assert docs_dir.is_dir() assert readme_file.is_file() with patch.object(project, "_plugin", mock_plugin): project.generate() project.generate_docs() readme_contents = readme_file.read_text(encoding="utf-8") assert project.type_name in readme_contents def test_generate_with_docs_no_type(project, tmp_path_factory): project.schema = {"properties": {}} # tmpdir conflicts with other tests, make a unique one project.root = tmp_path_factory.mktemp("generate_with_docs_no_type") mock_plugin = MagicMock(spec=["generate"]) with patch.object(project, "_plugin", mock_plugin): project.generate() project.generate_docs() mock_plugin.generate.assert_called_once_with(project) docs_dir = project.root / "docs" assert not docs_dir.is_dir() def test_generate_with_docs_twice(project, tmp_path_factory): project.schema = {"properties": {}} project.type_name = "AWS::Color::Red" # tmpdir conflicts with other tests, make a unique one project.root = tmp_path_factory.mktemp("generate_with_docs_twice") mock_plugin = MagicMock(spec=["generate"]) with patch.object(project, "_plugin", mock_plugin): project.generate() project.generate_docs() mock_plugin.generate.assert_called_once_with(project) docs_dir = project.root / "docs" readme_file = docs_dir / "README.md" assert docs_dir.is_dir() assert readme_file.is_file() with patch.object(project, "_plugin", mock_plugin): project.generate() project.generate_docs() assert docs_dir.is_dir() assert readme_file.is_file() with patch.object(project, "_plugin", mock_plugin): project.generate() project.generate_docs() readme_contents = readme_file.read_text(encoding="utf-8") assert project.type_name in readme_contents def test_generate_handlers(project, tmpdir): project.type_name = "Test::Handler::Test" expected_actions = {"createAction", "readAction"} project.schema = { "handlers": { "create": {"permissions": ["createAction", "readAction"]}, "read": {"permissions": ["readAction", ""]}, } } project.root = tmpdir mock_plugin = MagicMock(spec=["generate"]) with patch.object(project, "_plugin", mock_plugin): project.generate() role_path = project.root / "resource-role.yaml" with role_path.open("r", encoding="utf-8") as f: template = yaml.safe_load(f.read()) action_list = template["Resources"]["ExecutionRole"]["Properties"]["Policies"][0][ "PolicyDocument" ]["Statement"][0]["Action"] assert all(action in expected_actions for action in action_list) assert len(action_list) == len(expected_actions) assert template["Outputs"]["ExecutionRoleArn"] mock_plugin.generate.assert_called_once_with(project) @pytest.mark.parametrize( "schema", ({"handlers": {"create": {"permissions": [""]}}}, {"handlers": {"create": {}}}), ) def test_generate_handlers_deny_all(project, tmpdir, schema): project.type_name = "Test::Handler::Test" project.schema = schema project.root = tmpdir mock_plugin = MagicMock(spec=["generate"]) with patch.object(project, "_plugin", mock_plugin): project.generate() role_path = project.root / "resource-role.yaml" with role_path.open("r", encoding="utf-8") as f: template = yaml.safe_load(f.read()) statement = template["Resources"]["ExecutionRole"]["Properties"]["Policies"][0][ "PolicyDocument" ]["Statement"][0] assert statement["Effect"] == "Deny" assert statement["Action"][0] == "*" mock_plugin.generate.assert_called_once_with(project) @pytest.mark.parametrize( "schema,result", ( ({"handlers": {"create": {"timeoutInMinutes": 720}}}, 43200), ({"handlers": {"create": {"timeoutInMinutes": 2}}}, 3600), ({"handlers": {"create": {"timeoutInMinutes": 90}}}, 6300), ( { "handlers": { "create": {"timeoutInMinutes": 70}, "update": {"timeoutInMinutes": 90}, } }, 6300, ), ({"handlers": {"create": {}}}, 8400), ({"handlers": {"create": {"timeoutInMinutes": 90}, "read": {}}}, 8400), ), ) def test_generate_handlers_role_session_timeout(project, tmpdir, schema, result): project.type_name = "Test::Handler::Test" project.schema = schema project.root = tmpdir mock_plugin = MagicMock(spec=["generate"]) with patch.object(project, "_plugin", mock_plugin): project.generate() role_path = project.root / "resource-role.yaml" with role_path.open("r", encoding="utf-8") as f: template = yaml.safe_load(f.read()) max_session_timeout = template["Resources"]["ExecutionRole"]["Properties"][ "MaxSessionDuration" ] assert max_session_timeout == result mock_plugin.generate.assert_called_once_with(project) def test_init_resource(project): type_name = "AWS::Color::Red" mock_plugin = MagicMock(spec=["init"]) patch_load_plugin = patch( "rpdk.core.project.load_plugin", autospec=True, return_value=mock_plugin ) with patch_load_plugin as mock_load_plugin: project.init(type_name, LANGUAGE) mock_load_plugin.assert_called_once_with(LANGUAGE) mock_plugin.init.assert_called_once_with(project) assert project.type_info == ("AWS", "Color", "Red") assert project.type_name == type_name assert project.language == LANGUAGE assert project.artifact_type == ARTIFACT_TYPE_RESOURCE assert project._plugin is mock_plugin assert project.settings == {} with project.settings_path.open("r", encoding="utf-8") as f: assert json.load(f) # ends with newline with project.settings_path.open("rb") as f: f.seek(-1, os.SEEK_END) assert f.read() == b"\n" with project.schema_path.open("r", encoding="utf-8") as f: assert json.load(f) for file_inputs in ( "inputs_1_create.json", "inputs_1_update.json", "inputs_1_invalid.json", ): path_file = project.example_inputs_path / file_inputs with path_file.open("r", encoding="utf-8") as f: assert json.load(f) # ends with newline with project.schema_path.open("rb") as f: f.seek(-1, os.SEEK_END) assert f.read() == b"\n" def test_init_module(project): type_name = "AWS::Color::Red" mock_plugin = MagicMock(spec=["init"]) patch_load_plugin = patch( "rpdk.core.project.load_plugin", autospec=True, return_value=mock_plugin ) with patch_load_plugin as mock_load_plugin: project.init_module(type_name) mock_load_plugin.assert_not_called() mock_plugin.init.assert_not_called() assert project.type_info == ("AWS", "Color", "Red") assert project.type_name == type_name assert project.language is None assert project.artifact_type == ARTIFACT_TYPE_MODULE assert project._plugin is None assert project.settings == {} with project.settings_path.open("r", encoding="utf-8") as f: assert json.load(f) # ends with newline with project.settings_path.open("rb") as f: f.seek(-1, os.SEEK_END) assert f.read() == b"\n" def test_load_invalid_schema(project): patch_settings = patch.object(project, "load_settings") patch_schema = patch.object( project, "load_schema", side_effect=SpecValidationError("") ) with patch_settings as mock_settings, patch_schema as mock_schema, pytest.raises( InvalidProjectError ) as excinfo: project.load() mock_settings.assert_called_once_with() mock_schema.assert_called_once_with() assert "invalid" in str(excinfo.value) def test_load_module_project_succeeds(project, tmp_path_factory): project.artifact_type = "MODULE" project.type_name = "Unit::Test::Malik::MODULE" project.root = tmp_path_factory.mktemp("load_module_test") os.mkdir(os.path.join(project.root, "fragments")) copyfile( os.path.join( os.path.dirname(__file__), "data/sample_fragments/fragments/valid_fragment.json", ), os.path.join(project.root, "fragments/valid_fragment.json"), ) patch_load_settings = patch.object( project, "load_settings", return_value={"artifact_type": "MODULE"} ) assert not os.path.exists(os.path.join(project.root, "schema.json")) with patch_load_settings: project.load() assert os.path.exists(os.path.join(project.root, "schema.json")) def test_load_resource_succeeds(project): project.artifact_type = "Resource" project.type_name = "Unit::Test::Resource" patch_load_settings = patch.object( project, "load_settings", return_value={"artifact_type": "RESOURCE"} ) project._write_example_schema() with patch_load_settings: project.load() def test_load_module_project_with_invalid_fragments(project): project.artifact_type = "MODULE" project.type_name = "Unit::Test::Malik::MODULE" patch_load_settings = patch.object( project, "load_settings", return_value={"artifact_type": "MODULE"} ) patch_validate = patch.object( project, "_validate_fragments", side_effect=FragmentValidationError ) with patch_load_settings, patch_validate, pytest.raises(InvalidProjectError): project.load() def test_schema_not_found(project): patch_settings = patch.object(project, "load_settings") patch_schema = patch.object(project, "load_schema", side_effect=FileNotFoundError) with patch_settings as mock_settings, patch_schema as mock_schema, pytest.raises( InvalidProjectError ) as excinfo: project.load() mock_settings.assert_called_once_with() mock_schema.assert_called_once_with() assert "not found" in str(excinfo.value) def test_settings_not_found(project): patch_settings = patch.object( project, "load_settings", side_effect=FileNotFoundError ) patch_schema = patch.object(project, "load_schema") with patch_settings as mock_settings, patch_schema as mock_schema, pytest.raises( InvalidProjectError ) as excinfo: project.load() mock_settings.assert_called_once_with() mock_schema.assert_not_called() assert "not found" in str(excinfo.value) assert "init" in str(excinfo.value) def create_input_file(base): path = base / "inputs" os.mkdir(path, mode=0o777) path_create = base / CREATE_INPUTS_FILE with path_create.open("w", encoding="utf-8") as f: f.write("{}") path_update = base / UPDATE_INPUTS_FILE with path_update.open("w", encoding="utf-8") as f: f.write("{}") path_invalid = base / INVALID_INPUTS_FILE with path_invalid.open("w", encoding="utf-8") as f: f.write("{}") # pylint: disable=too-many-arguments, too-many-locals def test_submit_dry_run(project): project.type_name = TYPE_NAME project.runtime = RUNTIME project.language = LANGUAGE project.artifact_type = ARTIFACT_TYPE_RESOURCE zip_path = project.root / "test.zip" with project.schema_path.open("w", encoding="utf-8") as f: f.write(CONTENTS_UTF8) with project.overrides_path.open("w", encoding="utf-8") as f: f.write(json.dumps(empty_override())) create_input_file(project.root) project.write_settings() patch_plugin = patch.object(project, "_plugin", spec=LanguagePlugin) patch_upload = patch.object(project, "_upload", autospec=True) patch_path = patch("rpdk.core.project.Path", return_value=zip_path) patch_temp = patch("rpdk.core.project.TemporaryFile", autospec=True) # fmt: off # these context managers can't be wrapped by black, but it removes the \ with patch_plugin as mock_plugin, patch_path as mock_path, \ patch_temp as mock_temp, patch_upload as mock_upload: mock_plugin.get_plugin_information = MagicMock(return_value=PLUGIN_INFORMATION) project.submit( True, endpoint_url=ENDPOINT, region_name=REGION, role_arn=None, use_role=True, set_default=False ) # fmt: on mock_temp.assert_not_called() mock_path.assert_called_once_with("{}.zip".format(project.hypenated_name)) mock_plugin.package.assert_called_once_with(project, ANY) mock_upload.assert_not_called() with zipfile.ZipFile(zip_path, mode="r") as zip_file: assert set(zip_file.namelist()) == { SCHEMA_UPLOAD_FILENAME, SETTINGS_FILENAME, OVERRIDES_FILENAME, CREATE_INPUTS_FILE, INVALID_INPUTS_FILE, UPDATE_INPUTS_FILE, CFN_METADATA_FILENAME, } schema_contents = zip_file.read(SCHEMA_UPLOAD_FILENAME).decode("utf-8") assert schema_contents == CONTENTS_UTF8 settings = json.loads(zip_file.read(SETTINGS_FILENAME).decode("utf-8")) assert settings["runtime"] == RUNTIME overrides = json.loads(zip_file.read(OVERRIDES_FILENAME).decode("utf-8")) assert "CREATE" in overrides # https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile.testzip input_create = json.loads(zip_file.read(CREATE_INPUTS_FILE).decode("utf-8")) assert input_create == {} input_invalid = json.loads(zip_file.read(UPDATE_INPUTS_FILE).decode("utf-8")) assert input_invalid == {} input_update = json.loads(zip_file.read(INVALID_INPUTS_FILE).decode("utf-8")) assert input_update == {} assert zip_file.testzip() is None metadata_info = json.loads(zip_file.read(CFN_METADATA_FILENAME).decode("utf-8")) assert "cli-version" in metadata_info assert "plugin-version" in metadata_info assert "plugin-tool-version" in metadata_info def test_submit_dry_run_modules(project): project.type_name = MODULE_TYPE_NAME project.runtime = RUNTIME project.language = LANGUAGE project.artifact_type = ARTIFACT_TYPE_MODULE project.fragment_dir = project.root / "fragments" zip_path = project.root / "test.zip" schema_path = project.root / "schema.json" fragment_path = project.root / "fragments" / "fragment.json" with project.schema_path.open("w", encoding="utf-8") as f: f.write(CONTENTS_UTF8) with schema_path.open("w", encoding="utf-8") as f: f.write(CONTENTS_UTF8) if not os.path.exists(project.root / "fragments"): os.mkdir(project.root / "fragments") with fragment_path.open("w", encoding="utf-8") as f: f.write(CONTENTS_UTF8) with project.overrides_path.open("w", encoding="utf-8") as f: f.write(json.dumps(empty_override())) project.write_settings() patch_plugin = patch.object(project, "_plugin", spec=LanguagePlugin) patch_upload = patch.object(project, "_upload", autospec=True) patch_path = patch("rpdk.core.project.Path", return_value=zip_path) patch_temp = patch("rpdk.core.project.TemporaryFile", autospec=True) # fmt: off # these context managers can't be wrapped by black, but it removes the \ with patch_plugin as mock_plugin, patch_path as mock_path, \ patch_temp as mock_temp, patch_upload as mock_upload: project.submit( True, endpoint_url=ENDPOINT, region_name=REGION, role_arn=None, use_role=True, set_default=False ) # fmt: on mock_temp.assert_not_called() mock_path.assert_called_once_with("{}.zip".format(project.hypenated_name)) mock_plugin.package.assert_not_called() mock_upload.assert_not_called() fragment_file_name = "fragments/fragment.json" with zipfile.ZipFile(zip_path, mode="r") as zip_file: assert set(zip_file.namelist()) == { fragment_file_name, SCHEMA_UPLOAD_FILENAME, SETTINGS_FILENAME, OVERRIDES_FILENAME, } schema_contents = zip_file.read(SCHEMA_UPLOAD_FILENAME).decode("utf-8") assert schema_contents == CONTENTS_UTF8 overrides = json.loads(zip_file.read(OVERRIDES_FILENAME).decode("utf-8")) assert "CREATE" in overrides # https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile.testzip assert zip_file.testzip() is None def test_submit_live_run(project): project.type_name = TYPE_NAME project.runtime = RUNTIME project.language = LANGUAGE project.artifact_type = ARTIFACT_TYPE_RESOURCE with project.schema_path.open("w", encoding="utf-8") as f: f.write(CONTENTS_UTF8) project.write_settings() temp_file = UnclosingBytesIO() patch_plugin = patch.object(project, "_plugin", spec=LanguagePlugin) patch_upload = patch.object(project, "_upload", autospec=True) patch_path = patch("rpdk.core.project.Path", autospec=True) patch_temp = patch("rpdk.core.project.TemporaryFile", return_value=temp_file) # fmt: off # these context managers can't be wrapped by black, but it removes the \ with patch_plugin as mock_plugin, patch_path as mock_path, \ patch_temp as mock_temp, patch_upload as mock_upload: project.submit( False, endpoint_url=ENDPOINT, region_name=REGION, role_arn=None, use_role=True, set_default=True ) # fmt: on mock_path.assert_not_called() mock_temp.assert_called_once_with("w+b") mock_plugin.package.assert_called_once_with(project, ANY) # zip file construction is tested by the dry-run test assert temp_file.tell() == 0 # file was rewound before upload mock_upload.assert_called_once_with( temp_file, region_name=REGION, endpoint_url=ENDPOINT, role_arn=None, use_role=True, set_default=True, ) assert temp_file._was_closed temp_file._close() def test_submit_live_run_for_module(project): project.type_name = MODULE_TYPE_NAME project.runtime = RUNTIME project.language = LANGUAGE project.artifact_type = ARTIFACT_TYPE_MODULE with project.schema_path.open("w", encoding="utf-8") as f: f.write(CONTENTS_UTF8) project.write_settings() temp_file = UnclosingBytesIO() patch_plugin = patch.object(project, "_plugin", spec=LanguagePlugin) patch_path = patch("rpdk.core.project.Path", autospec=True) patch_temp = patch("rpdk.core.project.TemporaryFile", return_value=temp_file) # fmt: off # these context managers can't be wrapped by black, but it removes the \ with patch_plugin as mock_plugin, patch_path as mock_path, \ patch_temp as mock_temp, \ pytest.raises(InternalError): project.submit( False, endpoint_url=ENDPOINT, region_name=REGION, role_arn=None, use_role=True, set_default=True ) # fmt: on mock_path.assert_not_called() mock_temp.assert_called_once_with("w+b") mock_plugin.package.assert_not_called() temp_file._close() def test__upload_good_path_create_role_and_set_default(project): project.type_name = TYPE_NAME project.artifact_type = ARTIFACT_TYPE_RESOURCE project.schema = {"handlers": {}} mock_cfn_client = MagicMock(spec=["register_type"]) mock_cfn_client.register_type.return_value = {"RegistrationToken": "foo"} fileobj = object() patch_sdk = patch("rpdk.core.project.create_sdk_session", autospec=True) patch_uploader = patch.object(Uploader, "upload", return_value="url") patch_exec_role_arn = patch.object( Uploader, "create_or_update_role", return_value="some-execution-role-arn" ) patch_logging_role_arn = patch.object( Uploader, "get_log_delivery_role_arn", return_value="some-log-role-arn" ) patch_uuid = patch("rpdk.core.project.uuid4", autospec=True, return_value="foo") patch_wait = patch.object(project, "_wait_for_registration", autospec=True) with patch_sdk as mock_sdk, patch_uploader as mock_upload_method, patch_logging_role_arn as mock_role_arn_method, patch_exec_role_arn as mock_exec_role_method: # noqa: B950 as it conflicts with formatting rules # pylint: disable=C0301 mock_sdk.return_value.client.side_effect = [mock_cfn_client, MagicMock()] with patch_uuid as mock_uuid, patch_wait as mock_wait: project._upload( fileobj, endpoint_url=None, region_name=None, role_arn=None, use_role=True, set_default=True, ) mock_sdk.assert_called_once_with(None) mock_exec_role_method.assert_called_once_with( project.root / "resource-role.yaml", project.hypenated_name ) mock_upload_method.assert_called_once_with(project.hypenated_name, fileobj) mock_role_arn_method.assert_called_once_with() mock_uuid.assert_called_once_with() mock_cfn_client.register_type.assert_called_once_with( Type="RESOURCE", TypeName=project.type_name, SchemaHandlerPackage="url", ClientRequestToken=mock_uuid.return_value, LoggingConfig={ "LogRoleArn": "some-log-role-arn", "LogGroupName": "aws-color-red-logs", }, ExecutionRoleArn="some-execution-role-arn", ) mock_wait.assert_called_once_with(mock_cfn_client, "foo", True) @pytest.mark.parametrize( ("use_role,expected_additional_args"), [(True, {"ExecutionRoleArn": "someArn"}), (False, {})], ) def test__upload_good_path_skip_role_creation( project, use_role, expected_additional_args ): project.type_name = TYPE_NAME project.artifact_type = ARTIFACT_TYPE_RESOURCE project.schema = {"handlers": {}} mock_cfn_client = MagicMock(spec=["register_type"]) fileobj = object() mock_cfn_client.register_type.return_value = {"RegistrationToken": "foo"} patch_sdk = patch("rpdk.core.project.create_sdk_session", autospec=True) patch_uploader = patch.object(Uploader, "upload", return_value="url") patch_logging_role_arn = patch.object( Uploader, "get_log_delivery_role_arn", return_value="some-log-role-arn" ) patch_uuid = patch("rpdk.core.project.uuid4", autospec=True, return_value="foo") patch_wait = patch.object(project, "_wait_for_registration", autospec=True) with patch_sdk as mock_sdk, patch_uploader as mock_upload_method, patch_logging_role_arn as mock_role_arn_method: # noqa: B950 as it conflicts with formatting rules # pylint: disable=C0301 mock_sdk.return_value.client.side_effect = [mock_cfn_client, MagicMock()] with patch_uuid as mock_uuid, patch_wait as mock_wait: project._upload( fileobj, endpoint_url=None, region_name=None, role_arn="someArn", use_role=use_role, set_default=True, ) mock_sdk.assert_called_once_with(None) mock_upload_method.assert_called_once_with(project.hypenated_name, fileobj) mock_role_arn_method.assert_called_once_with() mock_uuid.assert_called_once_with() mock_wait.assert_called_once_with(mock_cfn_client, "foo", True) mock_cfn_client.register_type.assert_called_once_with( Type="RESOURCE", TypeName=project.type_name, SchemaHandlerPackage="url", ClientRequestToken=mock_uuid.return_value, LoggingConfig={ "LogRoleArn": "some-log-role-arn", "LogGroupName": "aws-color-red-logs", }, **expected_additional_args, ) def test__upload_clienterror(project): project.type_name = TYPE_NAME project.artifact_type = ARTIFACT_TYPE_RESOURCE project.schema = {} mock_cfn_client = MagicMock(spec=["register_type"]) mock_cfn_client.register_type.side_effect = ClientError( BLANK_CLIENT_ERROR, "RegisterType" ) fileobj = object() patch_sdk = patch("rpdk.core.project.create_sdk_session", autospec=True) patch_uploader = patch.object(Uploader, "upload", return_value="url") patch_role_arn = patch.object( Uploader, "get_log_delivery_role_arn", return_value="some-log-role-arn" ) patch_uuid = patch("rpdk.core.project.uuid4", autospec=True, return_value="foo") with patch_sdk as mock_sdk, patch_uploader as mock_upload_method, patch_role_arn as mock_role_arn_method: # noqa: B950 as it conflicts with formatting rules # pylint: disable=C0301 mock_session = mock_sdk.return_value mock_session.client.side_effect = [mock_cfn_client, MagicMock()] with patch_uuid as mock_uuid, pytest.raises(DownstreamError): project._upload( fileobj, endpoint_url=None, region_name=None, role_arn=None, use_role=False, set_default=True, ) mock_sdk.assert_called_once_with(None) mock_upload_method.assert_called_once_with(project.hypenated_name, fileobj) mock_role_arn_method.assert_called_once_with() mock_uuid.assert_called_once_with() mock_cfn_client.register_type.assert_called_once_with( Type="RESOURCE", TypeName=project.type_name, SchemaHandlerPackage="url", ClientRequestToken=mock_uuid.return_value, LoggingConfig={ "LogRoleArn": "some-log-role-arn", "LogGroupName": "aws-color-red-logs", }, ) def test__upload_clienterror_module(project): project.type_name = MODULE_TYPE_NAME project.artifact_type = ARTIFACT_TYPE_MODULE project.schema = {} mock_cfn_client = MagicMock(spec=["register_type"]) mock_cfn_client.register_type.side_effect = ClientError( BLANK_CLIENT_ERROR, "RegisterType" ) fileobj = object() patch_sdk = patch("rpdk.core.project.create_sdk_session", autospec=True) patch_uploader = patch.object(Uploader, "upload", return_value="url") patch_role_arn = patch.object( Uploader, "get_log_delivery_role_arn", return_value="some-log-role-arn" ) patch_uuid = patch("rpdk.core.project.uuid4", autospec=True, return_value="foo") with patch_sdk as mock_sdk, patch_uploader as mock_upload_method, patch_role_arn as mock_role_arn_method: # noqa: B950 as it conflicts with formatting rules # pylint: disable=C0301 mock_session = mock_sdk.return_value mock_session.client.side_effect = [mock_cfn_client, MagicMock()] with patch_uuid as mock_uuid, pytest.raises(DownstreamError): project._upload( fileobj, endpoint_url=None, region_name=None, role_arn=None, use_role=False, set_default=True, ) mock_sdk.assert_called_once_with(None) mock_upload_method.assert_called_once_with(project.hypenated_name, fileobj) mock_role_arn_method.assert_called_once_with() mock_uuid.assert_called_once_with() mock_cfn_client.register_type.assert_called_once_with( Type="MODULE", TypeName=project.type_name, SchemaHandlerPackage="url", ClientRequestToken=mock_uuid.return_value, LoggingConfig={ "LogRoleArn": "some-log-role-arn", "LogGroupName": "aws-color-red-module-logs", }, ) def test__wait_for_registration_set_default(project): mock_cfn_client = MagicMock( spec=["describe_type_registration", "set_type_default_version", "get_waiter"] ) mock_cfn_client.describe_type_registration.return_value = ( DESCRIBE_TYPE_COMPLETE_RETURN ) mock_waiter = MagicMock(spec=["wait"]) mock_cfn_client.get_waiter.return_value = mock_waiter project._wait_for_registration(mock_cfn_client, REGISTRATION_TOKEN, True) mock_cfn_client.describe_type_registration.assert_called_once_with( RegistrationToken=REGISTRATION_TOKEN ) mock_cfn_client.set_type_default_version.assert_called_once_with( Arn=TYPE_VERSION_ARN ) mock_waiter.wait.assert_called_once_with(RegistrationToken=REGISTRATION_TOKEN) def test__wait_for_registration_set_default_fails(project): mock_cfn_client = MagicMock( spec=["describe_type_registration", "set_type_default_version", "get_waiter"] ) mock_cfn_client.describe_type_registration.return_value = ( DESCRIBE_TYPE_COMPLETE_RETURN ) mock_cfn_client.set_type_default_version.side_effect = ClientError( BLANK_CLIENT_ERROR, "SetTypeDefaultVersion" ) mock_waiter = MagicMock(spec=["wait"]) mock_cfn_client.get_waiter.return_value = mock_waiter with pytest.raises(DownstreamError): project._wait_for_registration(mock_cfn_client, REGISTRATION_TOKEN, True) mock_cfn_client.describe_type_registration.assert_called_once_with( RegistrationToken=REGISTRATION_TOKEN ) mock_cfn_client.set_type_default_version.assert_called_once_with( Arn=TYPE_VERSION_ARN ) mock_waiter.wait.assert_called_once_with(RegistrationToken=REGISTRATION_TOKEN) def test__wait_for_registration_no_set_default(project): mock_cfn_client = MagicMock( spec=["describe_type_registration", "set_type_default_version", "get_waiter"] ) mock_cfn_client.describe_type_registration.return_value = ( DESCRIBE_TYPE_COMPLETE_RETURN ) mock_waiter = MagicMock(spec=["wait"]) mock_cfn_client.get_waiter.return_value = mock_waiter project._wait_for_registration(mock_cfn_client, REGISTRATION_TOKEN, False) mock_cfn_client.describe_type_registration.assert_called_once_with( RegistrationToken=REGISTRATION_TOKEN ) mock_cfn_client.set_type_default_version.assert_not_called() mock_waiter.wait.assert_called_once_with(RegistrationToken=REGISTRATION_TOKEN) def test__wait_for_registration_waiter_fails(project): mock_cfn_client = MagicMock( spec=["describe_type_registration", "set_type_default_version", "get_waiter"] ) mock_cfn_client.describe_type_registration.return_value = ( DESCRIBE_TYPE_FAILED_RETURN ) mock_waiter = MagicMock(spec=["wait"]) mock_waiter.wait.side_effect = WaiterError( "TypeRegistrationComplete", "Waiter encountered a terminal failure state", DESCRIBE_TYPE_FAILED_RETURN, ) mock_cfn_client.get_waiter.return_value = mock_waiter with pytest.raises(DownstreamError): project._wait_for_registration(mock_cfn_client, REGISTRATION_TOKEN, True) mock_cfn_client.describe_type_registration.assert_called_once_with( RegistrationToken=REGISTRATION_TOKEN ) mock_cfn_client.set_type_default_version.assert_not_called() mock_waiter.wait.assert_called_once_with(RegistrationToken=REGISTRATION_TOKEN) def test__wait_for_registration_waiter_fails_describe_fails(project): mock_cfn_client = MagicMock( spec=["describe_type_registration", "set_type_default_version", "get_waiter"] ) mock_cfn_client.describe_type_registration.side_effect = ClientError( BLANK_CLIENT_ERROR, "DescribeTypeRegistration" ) mock_waiter = MagicMock(spec=["wait"]) mock_waiter.wait.side_effect = WaiterError( "TypeRegistrationComplete", "Waiter encountered a terminal failure state", DESCRIBE_TYPE_FAILED_RETURN, ) mock_cfn_client.get_waiter.return_value = mock_waiter with pytest.raises(DownstreamError): project._wait_for_registration(mock_cfn_client, REGISTRATION_TOKEN, False) mock_cfn_client.describe_type_registration.assert_called_once_with( RegistrationToken=REGISTRATION_TOKEN ) mock_cfn_client.set_type_default_version.assert_not_called() mock_waiter.wait.assert_called_once_with(RegistrationToken=REGISTRATION_TOKEN) def test__write_settings_invalid_runtime(project): project.runtime = "foo" project.language = LANGUAGE with pytest.raises(InternalError): project.write_settings() @pytest.mark.parametrize( "docs_schema", ( {}, {"primaryIdentifier": ["/properties/Id1", "/properties/Id1"]}, {"primaryIdentifier": ["/properties/Nested/Id1"]}, ), ) def test__get_docs_primary_identifier_bad_path(docs_schema): ref = Project._get_docs_primary_identifier(docs_schema) assert ref is None def test__get_docs_primary_identifier_good_path(): ref = Project._get_docs_primary_identifier( {"primaryIdentifier": ["/properties/Id1"]} ) assert ref == "Id1" def test__get_docs_gettable_atts_empty(): getatt = Project._get_docs_gettable_atts({}) assert getatt == [] @pytest.mark.parametrize( "docs_schema", ( {"readOnlyProperties": ["/properties/Id2"]}, {"properties": {}, "readOnlyProperties": ["/properties/Id2"]}, {"properties": {"Id2": {}}, "readOnlyProperties": ["/properties/Id2"]}, ), ) def test__get_docs_gettable_atts_bad_path(docs_schema): getatt = Project._get_docs_gettable_atts(docs_schema) assert getatt == [ {"name": "Id2", "description": "Returns the <code>Id2</code> value."} ] def test__get_docs_gettable_atts_good_path(): getatt = Project._get_docs_gettable_atts( { "properties": {"Id2": {"description": "foo"}}, "readOnlyProperties": ["/properties/Id2"], } ) assert getatt == [{"name": "Id2", "description": "foo"}] def test_generate_image_build_config(project): project.schema = {} mock_plugin = MagicMock(spec=["generate_image_build_config"]) with patch.object(project, "_plugin", mock_plugin): project.generate_image_build_config() mock_plugin.generate_image_build_config.assert_called_once() def test_generate_image_build_config_plugin_not_supported(project): project.schema = {} mock_plugin = MagicMock(spec=[]) with patch.object(project, "_plugin", mock_plugin): try: project.generate_image_build_config() except InvalidProjectError: pass def test__write_settings_null_executable_entrypoint(project): project.type_name = TYPE_NAME project.artifact_type = ARTIFACT_TYPE_RESOURCE project.runtime = RUNTIME project.language = LANGUAGE project.executable_entrypoint = None project.write_settings() with project.settings_path.open("r", encoding="utf-8") as f: settings = json.load(f) assert "executableEntrypoint" not in settings def test__write_settings_nonnull_executable_entrypoint(project): project.type_name = TYPE_NAME project.artifact_type = ARTIFACT_TYPE_RESOURCE project.runtime = RUNTIME project.language = LANGUAGE project.executable_entrypoint = "executable_entrypoint" project.write_settings() with project.settings_path.open("r", encoding="utf-8") as f: settings = json.load(f) assert "executableEntrypoint" in settings assert settings["executableEntrypoint"] == "executable_entrypoint"
35.025714
239
0.698079
acef8d6d386e75db41f3ee416cb155fd1a6446e0
970
py
Python
midonet/neutron/db/migration/alembic_migration/versions/mitaka/expand/4f3b347ea1c2_revert_dynamic_routing.py
NeCTAR-RC/networking-midonet
7a69af3eab25f57e77738fd8398b6f4854346fd9
[ "Apache-2.0" ]
null
null
null
midonet/neutron/db/migration/alembic_migration/versions/mitaka/expand/4f3b347ea1c2_revert_dynamic_routing.py
NeCTAR-RC/networking-midonet
7a69af3eab25f57e77738fd8398b6f4854346fd9
[ "Apache-2.0" ]
null
null
null
midonet/neutron/db/migration/alembic_migration/versions/mitaka/expand/4f3b347ea1c2_revert_dynamic_routing.py
NeCTAR-RC/networking-midonet
7a69af3eab25f57e77738fd8398b6f4854346fd9
[ "Apache-2.0" ]
null
null
null
# Copyright 2015 Midokura SARL # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Revert dynamic routing Revision ID: 4f3b347ea1c2 Revises: 29cbb88b092 Create Date: 2015-12-04 15:01:34.026502 """ # revision identifiers, used by Alembic. revision = '4f3b347ea1c2' down_revision = '29cbb88b092' from alembic import op def upgrade(): op.drop_table('midonet_advertise_route') op.drop_table('midonet_routing_peers') op.drop_table('midonet_routing_instances')
28.529412
74
0.763918
acef8df5084a95b739b242fd71c2d8d00d310574
11,365
py
Python
tickeys/kivy/uix/image.py
BillBillBillBill/Tickeys-linux
2df31b8665004c58a5d4ab05277f245267d96364
[ "MIT" ]
317
2015-07-30T07:47:41.000Z
2022-03-30T00:38:39.000Z
tickeys/kivy/uix/image.py
BillBillBillBill/Tickeys-linux
2df31b8665004c58a5d4ab05277f245267d96364
[ "MIT" ]
42
2015-07-30T10:32:46.000Z
2021-11-23T02:44:47.000Z
tickeys/kivy/uix/image.py
BillBillBillBill/Tickeys-linux
2df31b8665004c58a5d4ab05277f245267d96364
[ "MIT" ]
69
2015-08-04T03:27:30.000Z
2021-12-27T09:53:26.000Z
''' Image ===== The :class:`Image` widget is used to display an image:: wimg = Image(source='mylogo.png') Asynchronous Loading -------------------- To load an image asynchronously (for example from an external webserver), use the :class:`AsyncImage` subclass:: aimg = AsyncImage(source='http://mywebsite.com/logo.png') This can be useful as it prevents your application from waiting until the image is loaded. If you want to display large images or retrieve them from URL's, using :class:`AsyncImage` will allow these resources to be retrieved on a background thread without blocking your application. Alignment --------- By default, the image is centered and fits inside the widget bounding box. If you don't want that, you can set `allow_stretch` to True and `keep_ratio` to False. You can also inherit from Image and create your own style. For example, if you want your image to be greater than,the size of your widget, you could do:: class FullImage(Image): pass And in your kivy language file:: <-FullImage>: canvas: Color: rgb: (1, 1, 1) Rectangle: texture: self.texture size: self.width + 20, self.height + 20 pos: self.x - 10, self.y - 10 ''' __all__ = ('Image', 'AsyncImage') from kivy.uix.widget import Widget from kivy.core.image import Image as CoreImage from kivy.resources import resource_find from kivy.properties import StringProperty, ObjectProperty, ListProperty, \ AliasProperty, BooleanProperty, NumericProperty from kivy.logger import Logger # delayed imports Loader = None class Image(Widget): '''Image class, see module documentation for more information. ''' source = StringProperty(None) '''Filename / source of your image. :attr:`source` is a :class:`~kivy.properties.StringProperty` and defaults to None. ''' texture = ObjectProperty(None, allownone=True) '''Texture object of the image. Depending of the texture creation, the value will be a :class:`~kivy.graphics.texture.Texture` or a :class:`~kivy.graphics.texture.TextureRegion` object. :attr:`texture` is a :class:`~kivy.properties.ObjectProperty` and defaults to None. ''' texture_size = ListProperty([0, 0]) '''Texture size of the image. .. warning:: The texture size is set after the texture property. So if you listen to the change on :attr:`texture`, the property texture_size will not be up-to-date. Use self.texture.size instead. ''' def get_image_ratio(self): if self.texture: return self.texture.width / float(self.texture.height) return 1. mipmap = BooleanProperty(False) '''Indicate if you want OpenGL mipmapping to be applied to the texture. Read :ref:`mipmap` for more information. .. versionadded:: 1.0.7 :attr:`mipmap` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' image_ratio = AliasProperty(get_image_ratio, None, bind=('texture', )) '''Ratio of the image (width / float(height). :attr:`image_ratio` is a :class:`~kivy.properties.AliasProperty` and is read-only. ''' color = ListProperty([1, 1, 1, 1]) '''Image color, in the format (r, g, b, a). This attribute can be used to 'tint' an image. Be careful: if the source image is not gray/white, the color will not really work as expected. .. versionadded:: 1.0.6 :attr:`color` is a :class:`~kivy.properties.ListProperty` and defaults to [1, 1, 1, 1]. ''' allow_stretch = BooleanProperty(False) '''If True, the normalized image size will be maximized to fit in the image box. Otherwise, if the box is too tall, the image will not be stretched more than 1:1 pixels. .. versionadded:: 1.0.7 :attr:`allow_stretch` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' keep_ratio = BooleanProperty(True) '''If False along with allow_stretch being True, the normalized image size will be maximized to fit in the image box and ignores the aspect ratio of the image. Otherwise, if the box is too tall, the image will not be stretched more than 1:1 pixels. .. versionadded:: 1.0.8 :attr:`keep_ratio` is a :class:`~kivy.properties.BooleanProperty` and defaults to True. ''' keep_data = BooleanProperty(False) '''If True, the underlaying _coreimage will store the raw image data. This is useful when performing pixel based collision detection. .. versionadded:: 1.3.0 :attr:`keep_data` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' anim_delay = NumericProperty(.25) '''Delay the animation if the image is sequenced (like an animated gif). If anim_delay is set to -1, the animation will be stopped. .. versionadded:: 1.0.8 :attr:`anim_delay` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.25 (4 FPS). ''' anim_loop = NumericProperty(0) '''Number of loops to play then stop animating. 0 means keep animating. .. versionadded:: 1.9.0 :attr:`anim_loop` is a :class:`~kivy.properties.NumericProperty` defaults to 0. ''' nocache = BooleanProperty(False) '''If this property is set True, the image will not be added to the internal cache. The cache will simply ignore any calls trying to append the core image. .. versionadded:: 1.6.0 :attr:`nocache` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' def get_norm_image_size(self): if not self.texture: return self.size ratio = self.image_ratio w, h = self.size tw, th = self.texture.size # ensure that the width is always maximized to the containter width if self.allow_stretch: if not self.keep_ratio: return w, h iw = w else: iw = min(w, tw) # calculate the appropriate height ih = iw / ratio # if the height is too higher, take the height of the container # and calculate appropriate width. no need to test further. :) if ih > h: if self.allow_stretch: ih = h else: ih = min(h, th) iw = ih * ratio return iw, ih norm_image_size = AliasProperty(get_norm_image_size, None, bind=( 'texture', 'size', 'image_ratio', 'allow_stretch')) '''Normalized image size within the widget box. This size will always fit the widget size and will preserve the image ratio. :attr:`norm_image_size` is a :class:`~kivy.properties.AliasProperty` and is read-only. ''' def __init__(self, **kwargs): self._coreimage = None self._loops = 0 super(Image, self).__init__(**kwargs) self.bind(source=self.texture_update, mipmap=self.texture_update) if self.source: self.texture_update() def texture_update(self, *largs): if not self.source: self.texture = None else: filename = resource_find(self.source) self._loops = 0 if filename is None: return Logger.error('Image: Error reading file {filename}'. format(filename=self.source)) mipmap = self.mipmap if self._coreimage is not None: self._coreimage.unbind(on_texture=self._on_tex_change) try: self._coreimage = ci = CoreImage(filename, mipmap=mipmap, anim_delay=self.anim_delay, keep_data=self.keep_data, nocache=self.nocache) except: self._coreimage = ci = None if ci: ci.bind(on_texture=self._on_tex_change) self.texture = ci.texture def on_anim_delay(self, instance, value): self._loop = 0 if self._coreimage is None: return self._coreimage.anim_delay = value if value < 0: self._coreimage.anim_reset(False) def on_texture(self, instance, value): if value is not None: self.texture_size = list(value.size) def _on_tex_change(self, *largs): # update texture from core image self.texture = self._coreimage.texture ci = self._coreimage if self.anim_loop and ci._anim_index == len(ci._image.textures) - 1: self._loops += 1 if self.anim_loop == self._loops: ci.anim_reset(False) self._loops = 0 def reload(self): '''Reload image from disk. This facilitates re-loading of images from disk in case the image content changes. .. versionadded:: 1.3.0 Usage:: im = Image(source = '1.jpg') # -- do something -- im.reload() # image will be re-loaded from disk ''' self._coreimage.remove_from_cache() olsource = self.source self.source = '' self.source = olsource def on_nocache(self, *args): if self.nocache and self._coreimage: self._coreimage.remove_from_cache() self._coreimage._nocache = True class AsyncImage(Image): '''Asynchronous Image class. See the module documentation for more information. .. note:: The AsyncImage is a specialized form of the Image class. You may want to refer to the :mod:`~kivy.loader` documentation and in particular, the :class:`~kivy.loader.ProxyImage` for more detail on how to handle events around asynchronous image loading. ''' def __init__(self, **kwargs): self._coreimage = None super(AsyncImage, self).__init__(**kwargs) global Loader if not Loader: from kivy.loader import Loader self.bind(source=self._load_source) if self.source: self._load_source() def _load_source(self, *args): source = self.source if not source: if self._coreimage is not None: self._coreimage.unbind(on_texture=self._on_tex_change) self.texture = None self._coreimage = None else: if not self.is_uri(source): source = resource_find(source) self._coreimage = image = Loader.image(source, nocache=self.nocache, mipmap=self.mipmap, anim_delay=self.anim_delay) image.bind(on_load=self._on_source_load) image.bind(on_texture=self._on_tex_change) self.texture = image.texture def _on_source_load(self, value): image = self._coreimage.image if not image: return self.texture = image.texture def is_uri(self, filename): proto = filename.split('://', 1)[0] return proto in ('http', 'https', 'ftp', 'smb') def _on_tex_change(self, *largs): if self._coreimage: self.texture = self._coreimage.texture def texture_update(self, *largs): pass
31.051913
79
0.616894
acef8e6532ab3d0c5b8ec8a51ea7415029ba17a7
1,242
py
Python
test/test_dashboard_pagination_response.py
JeremyTangCD/lm-sdk-python
2a15e055e5a3f72d2f2e4fb43bdbed203c5a9983
[ "Apache-2.0" ]
null
null
null
test/test_dashboard_pagination_response.py
JeremyTangCD/lm-sdk-python
2a15e055e5a3f72d2f2e4fb43bdbed203c5a9983
[ "Apache-2.0" ]
null
null
null
test/test_dashboard_pagination_response.py
JeremyTangCD/lm-sdk-python
2a15e055e5a3f72d2f2e4fb43bdbed203c5a9983
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ LogicMonitor REST API LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import logicmonitor_sdk from logicmonitor_sdk.models.dashboard_pagination_response import DashboardPaginationResponse # noqa: E501 from logicmonitor_sdk.rest import ApiException class TestDashboardPaginationResponse(unittest.TestCase): """DashboardPaginationResponse unit test stubs""" def setUp(self): pass def tearDown(self): pass def testDashboardPaginationResponse(self): """Test DashboardPaginationResponse""" # FIXME: construct object with mandatory attributes with example values # model = logicmonitor_sdk.models.dashboard_pagination_response.DashboardPaginationResponse() # noqa: E501 pass if __name__ == '__main__': unittest.main()
30.292683
304
0.76409
acef8f18479ec3d4fca06ac153dedd2798f89bfa
200
py
Python
eventex/subscriptions/urls.py
gustavo7lagoas/eventex_wttd
96ef7111341af391c8d97e4e0bc54fa4f668657c
[ "MIT" ]
null
null
null
eventex/subscriptions/urls.py
gustavo7lagoas/eventex_wttd
96ef7111341af391c8d97e4e0bc54fa4f668657c
[ "MIT" ]
null
null
null
eventex/subscriptions/urls.py
gustavo7lagoas/eventex_wttd
96ef7111341af391c8d97e4e0bc54fa4f668657c
[ "MIT" ]
null
null
null
from django.conf.urls import url from eventex.subscriptions.views import new, detail urlpatterns = [ url(r'^(?P<uid>[0-9A-Fa-f-]+)/$', detail, name='detail'), url(r'^$', new, name='new'), ]
22.222222
61
0.63
acef8fd52a4411251191679bb72d0fa3fd6d255e
1,689
py
Python
Sketches/MPS/BugReports/FixTests/Kamaelia/Examples/UsingChassis/PAR/par_slideshow.py
sparkslabs/kamaelia_orig
24b5f855a63421a1f7c6c7a35a7f4629ed955316
[ "Apache-2.0" ]
12
2015-10-20T10:22:01.000Z
2021-07-19T10:09:44.000Z
Sketches/MPS/BugReports/FixTests/Kamaelia/Examples/UsingChassis/PAR/par_slideshow.py
sparkslabs/kamaelia_orig
24b5f855a63421a1f7c6c7a35a7f4629ed955316
[ "Apache-2.0" ]
2
2015-10-20T10:22:55.000Z
2017-02-13T11:05:25.000Z
Sketches/MPS/BugReports/FixTests/Kamaelia/Examples/UsingChassis/PAR/par_slideshow.py
sparkslabs/kamaelia_orig
24b5f855a63421a1f7c6c7a35a7f4629ed955316
[ "Apache-2.0" ]
6
2015-03-09T12:51:59.000Z
2020-03-01T13:06:21.000Z
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1) # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://www.kamaelia.org/AUTHORS - please extend this file, # not this notice. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------- # from Kamaelia.UI.Pygame.Button import Button from Kamaelia.UI.Pygame.Image import Image from Kamaelia.Util.Chooser import Chooser from Kamaelia.Chassis.Pipeline import Pipeline from Kamaelia.Chassis.PAR import PAR import os path = "Slides" extn = ".gif" allfiles = os.listdir(path) files = list() for fname in allfiles: if fname[-len(extn):]==extn: files.append(os.path.join(path,fname)) files.sort() Pipeline( PAR( Button(caption="Next", msg="NEXT", position=(72,8)), Button(caption="Previous", msg="PREV", position=(8,8)), Button(caption="First", msg="FIRST" ,position=(256,8)), Button(caption="Last", msg="LAST", position=(320,8)), ), Chooser(items = files), Image(size=(800,600), position=(8,48)), ).run()
32.480769
78
0.669035
acef90bb4f34a16ee5b8ef3b81a7610755b2e78d
948
py
Python
Tests/dynamic_sanity/mock.py
btddg28/ironpython
8006238c19d08db5db9bada39d765143e631059e
[ "Apache-2.0" ]
2
2019-09-21T22:22:30.000Z
2020-05-09T12:45:51.000Z
Tests/dynamic_sanity/mock.py
btddg28/ironpython
8006238c19d08db5db9bada39d765143e631059e
[ "Apache-2.0" ]
null
null
null
Tests/dynamic_sanity/mock.py
btddg28/ironpython
8006238c19d08db5db9bada39d765143e631059e
[ "Apache-2.0" ]
1
2019-09-18T05:37:46.000Z
2019-09-18T05:37:46.000Z
##################################################################################### # # Copyright (c) Microsoft Corporation. All rights reserved. # # This source code is subject to terms and conditions of the Apache License, Version 2.0. A # copy of the license can be found in the License.html file at the root of this distribution. If # you cannot locate the Apache License, Version 2.0, please send an email to # ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound # by the terms of the Apache License, Version 2.0. # # You must not remove this notice, or any other, from this software. # # ##################################################################################### import random, math class Mock(object): def __getattr__(self, key): """Mock objects of this type will dynamically implement any requested member""" return random.choice(["world", math.pi]) m = Mock()
41.217391
96
0.594937
acef90ce4584ea692ebbaa4b79bafa1a65cfafa8
10,694
py
Python
numpy/typing/__init__.py
sukritingupta/numpy
2c44c93164c274a5865799eefd3e401effa948a9
[ "BSD-3-Clause" ]
1
2022-02-24T10:16:43.000Z
2022-02-24T10:16:43.000Z
numpy/typing/__init__.py
sukritingupta/numpy
2c44c93164c274a5865799eefd3e401effa948a9
[ "BSD-3-Clause" ]
null
null
null
numpy/typing/__init__.py
sukritingupta/numpy
2c44c93164c274a5865799eefd3e401effa948a9
[ "BSD-3-Clause" ]
1
2022-03-22T11:47:01.000Z
2022-03-22T11:47:01.000Z
""" ============================ Typing (:mod:`numpy.typing`) ============================ .. versionadded:: 1.20 Large parts of the NumPy API have PEP-484-style type annotations. In addition a number of type aliases are available to users, most prominently the two below: - `ArrayLike`: objects that can be converted to arrays - `DTypeLike`: objects that can be converted to dtypes .. _typing-extensions: https://pypi.org/project/typing-extensions/ Mypy plugin ----------- .. versionadded:: 1.21 .. automodule:: numpy.typing.mypy_plugin .. currentmodule:: numpy.typing Differences from the runtime NumPy API -------------------------------------- NumPy is very flexible. Trying to describe the full range of possibilities statically would result in types that are not very helpful. For that reason, the typed NumPy API is often stricter than the runtime NumPy API. This section describes some notable differences. ArrayLike ~~~~~~~~~ The `ArrayLike` type tries to avoid creating object arrays. For example, .. code-block:: python >>> np.array(x**2 for x in range(10)) array(<generator object <genexpr> at ...>, dtype=object) is valid NumPy code which will create a 0-dimensional object array. Type checkers will complain about the above example when using the NumPy types however. If you really intended to do the above, then you can either use a ``# type: ignore`` comment: .. code-block:: python >>> np.array(x**2 for x in range(10)) # type: ignore or explicitly type the array like object as `~typing.Any`: .. code-block:: python >>> from typing import Any >>> array_like: Any = (x**2 for x in range(10)) >>> np.array(array_like) array(<generator object <genexpr> at ...>, dtype=object) ndarray ~~~~~~~ It's possible to mutate the dtype of an array at runtime. For example, the following code is valid: .. code-block:: python >>> x = np.array([1, 2]) >>> x.dtype = np.bool_ This sort of mutation is not allowed by the types. Users who want to write statically typed code should instead use the `numpy.ndarray.view` method to create a view of the array with a different dtype. DTypeLike ~~~~~~~~~ The `DTypeLike` type tries to avoid creation of dtype objects using dictionary of fields like below: .. code-block:: python >>> x = np.dtype({"field1": (float, 1), "field2": (int, 3)}) Although this is valid NumPy code, the type checker will complain about it, since its usage is discouraged. Please see : :ref:`Data type objects <arrays.dtypes>` Number precision ~~~~~~~~~~~~~~~~ The precision of `numpy.number` subclasses is treated as a covariant generic parameter (see :class:`~NBitBase`), simplifying the annotating of processes involving precision-based casting. .. code-block:: python >>> from typing import TypeVar >>> import numpy as np >>> import numpy.typing as npt >>> T = TypeVar("T", bound=npt.NBitBase) >>> def func(a: "np.floating[T]", b: "np.floating[T]") -> "np.floating[T]": ... ... Consequently, the likes of `~numpy.float16`, `~numpy.float32` and `~numpy.float64` are still sub-types of `~numpy.floating`, but, contrary to runtime, they're not necessarily considered as sub-classes. Timedelta64 ~~~~~~~~~~~ The `~numpy.timedelta64` class is not considered a subclass of `~numpy.signedinteger`, the former only inheriting from `~numpy.generic` while static type checking. 0D arrays ~~~~~~~~~ During runtime numpy aggressively casts any passed 0D arrays into their corresponding `~numpy.generic` instance. Until the introduction of shape typing (see :pep:`646`) it is unfortunately not possible to make the necessary distinction between 0D and >0D arrays. While thus not strictly correct, all operations are that can potentially perform a 0D-array -> scalar cast are currently annotated as exclusively returning an `ndarray`. If it is known in advance that an operation _will_ perform a 0D-array -> scalar cast, then one can consider manually remedying the situation with either `typing.cast` or a ``# type: ignore`` comment. Record array dtypes ~~~~~~~~~~~~~~~~~~~ The dtype of `numpy.recarray`, and the `numpy.rec` functions in general, can be specified in one of two ways: * Directly via the ``dtype`` argument. * With up to five helper arguments that operate via `numpy.format_parser`: ``formats``, ``names``, ``titles``, ``aligned`` and ``byteorder``. These two approaches are currently typed as being mutually exclusive, *i.e.* if ``dtype`` is specified than one may not specify ``formats``. While this mutual exclusivity is not (strictly) enforced during runtime, combining both dtype specifiers can lead to unexpected or even downright buggy behavior. API --- """ # NOTE: The API section will be appended with additional entries # further down in this file from __future__ import annotations from numpy import ufunc from typing import TYPE_CHECKING, final if not TYPE_CHECKING: __all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] else: # Ensure that all objects within this module are accessible while # static type checking. This includes private ones, as we need them # for internal use. # # Declare to mypy that `__all__` is a list of strings without assigning # an explicit value __all__: list[str] __path__: list[str] @final # Disallow the creation of arbitrary `NBitBase` subclasses class NBitBase: """ A type representing `numpy.number` precision during static type checking. Used exclusively for the purpose static type checking, `NBitBase` represents the base of a hierarchical set of subclasses. Each subsequent subclass is herein used for representing a lower level of precision, *e.g.* ``64Bit > 32Bit > 16Bit``. .. versionadded:: 1.20 Examples -------- Below is a typical usage example: `NBitBase` is herein used for annotating a function that takes a float and integer of arbitrary precision as arguments and returns a new float of whichever precision is largest (*e.g.* ``np.float16 + np.int64 -> np.float64``). .. code-block:: python >>> from __future__ import annotations >>> from typing import TypeVar, TYPE_CHECKING >>> import numpy as np >>> import numpy.typing as npt >>> T1 = TypeVar("T1", bound=npt.NBitBase) >>> T2 = TypeVar("T2", bound=npt.NBitBase) >>> def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: ... return a + b >>> a = np.float16() >>> b = np.int64() >>> out = add(a, b) >>> if TYPE_CHECKING: ... reveal_locals() ... # note: Revealed local types are: ... # note: a: numpy.floating[numpy.typing._16Bit*] ... # note: b: numpy.signedinteger[numpy.typing._64Bit*] ... # note: out: numpy.floating[numpy.typing._64Bit*] """ def __init_subclass__(cls) -> None: allowed_names = { "NBitBase", "_256Bit", "_128Bit", "_96Bit", "_80Bit", "_64Bit", "_32Bit", "_16Bit", "_8Bit", } if cls.__name__ not in allowed_names: raise TypeError('cannot inherit from final class "NBitBase"') super().__init_subclass__() # Silence errors about subclassing a `@final`-decorated class class _256Bit(NBitBase): # type: ignore[misc] pass class _128Bit(_256Bit): # type: ignore[misc] pass class _96Bit(_128Bit): # type: ignore[misc] pass class _80Bit(_96Bit): # type: ignore[misc] pass class _64Bit(_80Bit): # type: ignore[misc] pass class _32Bit(_64Bit): # type: ignore[misc] pass class _16Bit(_32Bit): # type: ignore[misc] pass class _8Bit(_16Bit): # type: ignore[misc] pass from ._nested_sequence import _NestedSequence from ._nbit import ( _NBitByte, _NBitShort, _NBitIntC, _NBitIntP, _NBitInt, _NBitLongLong, _NBitHalf, _NBitSingle, _NBitDouble, _NBitLongDouble, ) from ._char_codes import ( _BoolCodes, _UInt8Codes, _UInt16Codes, _UInt32Codes, _UInt64Codes, _Int8Codes, _Int16Codes, _Int32Codes, _Int64Codes, _Float16Codes, _Float32Codes, _Float64Codes, _Complex64Codes, _Complex128Codes, _ByteCodes, _ShortCodes, _IntCCodes, _IntPCodes, _IntCodes, _LongLongCodes, _UByteCodes, _UShortCodes, _UIntCCodes, _UIntPCodes, _UIntCodes, _ULongLongCodes, _HalfCodes, _SingleCodes, _DoubleCodes, _LongDoubleCodes, _CSingleCodes, _CDoubleCodes, _CLongDoubleCodes, _DT64Codes, _TD64Codes, _StrCodes, _BytesCodes, _VoidCodes, _ObjectCodes, ) from ._scalars import ( _CharLike_co, _BoolLike_co, _UIntLike_co, _IntLike_co, _FloatLike_co, _ComplexLike_co, _TD64Like_co, _NumberLike_co, _ScalarLike_co, _VoidLike_co, ) from ._shape import _Shape, _ShapeLike from ._dtype_like import ( DTypeLike as DTypeLike, _DTypeLike, _SupportsDType, _VoidDTypeLike, _DTypeLikeBool, _DTypeLikeUInt, _DTypeLikeInt, _DTypeLikeFloat, _DTypeLikeComplex, _DTypeLikeTD64, _DTypeLikeDT64, _DTypeLikeObject, _DTypeLikeVoid, _DTypeLikeStr, _DTypeLikeBytes, _DTypeLikeComplex_co, ) from ._array_like import ( ArrayLike as ArrayLike, _ArrayLike, _FiniteNestedSequence, _SupportsArray, _SupportsArrayFunc, _ArrayLikeInt, _ArrayLikeBool_co, _ArrayLikeUInt_co, _ArrayLikeInt_co, _ArrayLikeFloat_co, _ArrayLikeComplex_co, _ArrayLikeNumber_co, _ArrayLikeTD64_co, _ArrayLikeDT64_co, _ArrayLikeObject_co, _ArrayLikeVoid_co, _ArrayLikeStr_co, _ArrayLikeBytes_co, ) from ._generic_alias import ( NDArray as NDArray, _DType, _GenericAlias, ) if TYPE_CHECKING: from ._ufunc import ( _UFunc_Nin1_Nout1, _UFunc_Nin2_Nout1, _UFunc_Nin1_Nout2, _UFunc_Nin2_Nout2, _GUFunc_Nin2_Nout1, ) else: # Declare the (type-check-only) ufunc subclasses as ufunc aliases during # runtime; this helps autocompletion tools such as Jedi (numpy/numpy#19834) _UFunc_Nin1_Nout1 = ufunc _UFunc_Nin2_Nout1 = ufunc _UFunc_Nin1_Nout2 = ufunc _UFunc_Nin2_Nout2 = ufunc _GUFunc_Nin2_Nout1 = ufunc # Clean up the namespace del TYPE_CHECKING, final, ufunc if __doc__ is not None: from ._add_docstring import _docstrings __doc__ += _docstrings __doc__ += '\n.. autoclass:: numpy.typing.NBitBase\n' del _docstrings from numpy._pytesttester import PytestTester test = PytestTester(__name__) del PytestTester
26.937028
83
0.682719
acef9173d90347845a8157bace85af7af576bbd0
12,416
py
Python
lib/services/autoscaling/ncloud_autoscaling/model/create_auto_scaling_group_request.py
NaverCloudPlatform/ncloud-sdk-python
5976dfabd205c615fcf57ac2f0ab67313ee6953c
[ "MIT" ]
12
2018-11-20T04:30:49.000Z
2021-11-09T12:34:26.000Z
lib/services/autoscaling/ncloud_autoscaling/model/create_auto_scaling_group_request.py
NaverCloudPlatform/ncloud-sdk-python
5976dfabd205c615fcf57ac2f0ab67313ee6953c
[ "MIT" ]
1
2019-01-24T15:56:15.000Z
2019-05-31T07:56:55.000Z
lib/services/autoscaling/ncloud_autoscaling/model/create_auto_scaling_group_request.py
NaverCloudPlatform/ncloud-sdk-python
5976dfabd205c615fcf57ac2f0ab67313ee6953c
[ "MIT" ]
6
2018-06-29T03:45:50.000Z
2022-03-18T01:51:45.000Z
# coding: utf-8 """ autoscaling Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class CreateAutoScalingGroupRequest(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'auto_scaling_group_name': 'str', 'launch_configuration_name': 'str', 'desired_capacity': 'int', 'min_size': 'int', 'max_size': 'int', 'default_cooldown': 'int', 'load_balancer_name_list': 'list[str]', 'health_check_grace_period': 'int', 'health_check_type_code': 'str', 'zone_no_list': 'list[str]' } attribute_map = { 'auto_scaling_group_name': 'autoScalingGroupName', 'launch_configuration_name': 'launchConfigurationName', 'desired_capacity': 'desiredCapacity', 'min_size': 'minSize', 'max_size': 'maxSize', 'default_cooldown': 'defaultCooldown', 'load_balancer_name_list': 'loadBalancerNameList', 'health_check_grace_period': 'healthCheckGracePeriod', 'health_check_type_code': 'healthCheckTypeCode', 'zone_no_list': 'zoneNoList' } def __init__(self, auto_scaling_group_name=None, launch_configuration_name=None, desired_capacity=None, min_size=None, max_size=None, default_cooldown=None, load_balancer_name_list=None, health_check_grace_period=None, health_check_type_code=None, zone_no_list=None): # noqa: E501 """CreateAutoScalingGroupRequest - a model defined in Swagger""" # noqa: E501 self._auto_scaling_group_name = None self._launch_configuration_name = None self._desired_capacity = None self._min_size = None self._max_size = None self._default_cooldown = None self._load_balancer_name_list = None self._health_check_grace_period = None self._health_check_type_code = None self._zone_no_list = None self.discriminator = None if auto_scaling_group_name is not None: self.auto_scaling_group_name = auto_scaling_group_name self.launch_configuration_name = launch_configuration_name if desired_capacity is not None: self.desired_capacity = desired_capacity self.min_size = min_size self.max_size = max_size if default_cooldown is not None: self.default_cooldown = default_cooldown if load_balancer_name_list is not None: self.load_balancer_name_list = load_balancer_name_list if health_check_grace_period is not None: self.health_check_grace_period = health_check_grace_period if health_check_type_code is not None: self.health_check_type_code = health_check_type_code self.zone_no_list = zone_no_list @property def auto_scaling_group_name(self): """Gets the auto_scaling_group_name of this CreateAutoScalingGroupRequest. # noqa: E501 오토스케일링그룹명 # noqa: E501 :return: The auto_scaling_group_name of this CreateAutoScalingGroupRequest. # noqa: E501 :rtype: str """ return self._auto_scaling_group_name @auto_scaling_group_name.setter def auto_scaling_group_name(self, auto_scaling_group_name): """Sets the auto_scaling_group_name of this CreateAutoScalingGroupRequest. 오토스케일링그룹명 # noqa: E501 :param auto_scaling_group_name: The auto_scaling_group_name of this CreateAutoScalingGroupRequest. # noqa: E501 :type: str """ self._auto_scaling_group_name = auto_scaling_group_name @property def launch_configuration_name(self): """Gets the launch_configuration_name of this CreateAutoScalingGroupRequest. # noqa: E501 론치설정명 # noqa: E501 :return: The launch_configuration_name of this CreateAutoScalingGroupRequest. # noqa: E501 :rtype: str """ return self._launch_configuration_name @launch_configuration_name.setter def launch_configuration_name(self, launch_configuration_name): """Sets the launch_configuration_name of this CreateAutoScalingGroupRequest. 론치설정명 # noqa: E501 :param launch_configuration_name: The launch_configuration_name of this CreateAutoScalingGroupRequest. # noqa: E501 :type: str """ if launch_configuration_name is None: raise ValueError("Invalid value for `launch_configuration_name`, must not be `None`") # noqa: E501 self._launch_configuration_name = launch_configuration_name @property def desired_capacity(self): """Gets the desired_capacity of this CreateAutoScalingGroupRequest. # noqa: E501 기대용량치 # noqa: E501 :return: The desired_capacity of this CreateAutoScalingGroupRequest. # noqa: E501 :rtype: int """ return self._desired_capacity @desired_capacity.setter def desired_capacity(self, desired_capacity): """Sets the desired_capacity of this CreateAutoScalingGroupRequest. 기대용량치 # noqa: E501 :param desired_capacity: The desired_capacity of this CreateAutoScalingGroupRequest. # noqa: E501 :type: int """ self._desired_capacity = desired_capacity @property def min_size(self): """Gets the min_size of this CreateAutoScalingGroupRequest. # noqa: E501 최소사이즈 # noqa: E501 :return: The min_size of this CreateAutoScalingGroupRequest. # noqa: E501 :rtype: int """ return self._min_size @min_size.setter def min_size(self, min_size): """Sets the min_size of this CreateAutoScalingGroupRequest. 최소사이즈 # noqa: E501 :param min_size: The min_size of this CreateAutoScalingGroupRequest. # noqa: E501 :type: int """ if min_size is None: raise ValueError("Invalid value for `min_size`, must not be `None`") # noqa: E501 self._min_size = min_size @property def max_size(self): """Gets the max_size of this CreateAutoScalingGroupRequest. # noqa: E501 최대사이즈 # noqa: E501 :return: The max_size of this CreateAutoScalingGroupRequest. # noqa: E501 :rtype: int """ return self._max_size @max_size.setter def max_size(self, max_size): """Sets the max_size of this CreateAutoScalingGroupRequest. 최대사이즈 # noqa: E501 :param max_size: The max_size of this CreateAutoScalingGroupRequest. # noqa: E501 :type: int """ if max_size is None: raise ValueError("Invalid value for `max_size`, must not be `None`") # noqa: E501 self._max_size = max_size @property def default_cooldown(self): """Gets the default_cooldown of this CreateAutoScalingGroupRequest. # noqa: E501 디폴트쿨다운타임 # noqa: E501 :return: The default_cooldown of this CreateAutoScalingGroupRequest. # noqa: E501 :rtype: int """ return self._default_cooldown @default_cooldown.setter def default_cooldown(self, default_cooldown): """Sets the default_cooldown of this CreateAutoScalingGroupRequest. 디폴트쿨다운타임 # noqa: E501 :param default_cooldown: The default_cooldown of this CreateAutoScalingGroupRequest. # noqa: E501 :type: int """ self._default_cooldown = default_cooldown @property def load_balancer_name_list(self): """Gets the load_balancer_name_list of this CreateAutoScalingGroupRequest. # noqa: E501 로드밸런서명리스트 # noqa: E501 :return: The load_balancer_name_list of this CreateAutoScalingGroupRequest. # noqa: E501 :rtype: list[str] """ return self._load_balancer_name_list @load_balancer_name_list.setter def load_balancer_name_list(self, load_balancer_name_list): """Sets the load_balancer_name_list of this CreateAutoScalingGroupRequest. 로드밸런서명리스트 # noqa: E501 :param load_balancer_name_list: The load_balancer_name_list of this CreateAutoScalingGroupRequest. # noqa: E501 :type: list[str] """ self._load_balancer_name_list = load_balancer_name_list @property def health_check_grace_period(self): """Gets the health_check_grace_period of this CreateAutoScalingGroupRequest. # noqa: E501 헬스체크보류기간 # noqa: E501 :return: The health_check_grace_period of this CreateAutoScalingGroupRequest. # noqa: E501 :rtype: int """ return self._health_check_grace_period @health_check_grace_period.setter def health_check_grace_period(self, health_check_grace_period): """Sets the health_check_grace_period of this CreateAutoScalingGroupRequest. 헬스체크보류기간 # noqa: E501 :param health_check_grace_period: The health_check_grace_period of this CreateAutoScalingGroupRequest. # noqa: E501 :type: int """ self._health_check_grace_period = health_check_grace_period @property def health_check_type_code(self): """Gets the health_check_type_code of this CreateAutoScalingGroupRequest. # noqa: E501 헬스체크유형코드 # noqa: E501 :return: The health_check_type_code of this CreateAutoScalingGroupRequest. # noqa: E501 :rtype: str """ return self._health_check_type_code @health_check_type_code.setter def health_check_type_code(self, health_check_type_code): """Sets the health_check_type_code of this CreateAutoScalingGroupRequest. 헬스체크유형코드 # noqa: E501 :param health_check_type_code: The health_check_type_code of this CreateAutoScalingGroupRequest. # noqa: E501 :type: str """ self._health_check_type_code = health_check_type_code @property def zone_no_list(self): """Gets the zone_no_list of this CreateAutoScalingGroupRequest. # noqa: E501 ZONE번호리스트 # noqa: E501 :return: The zone_no_list of this CreateAutoScalingGroupRequest. # noqa: E501 :rtype: list[str] """ return self._zone_no_list @zone_no_list.setter def zone_no_list(self, zone_no_list): """Sets the zone_no_list of this CreateAutoScalingGroupRequest. ZONE번호리스트 # noqa: E501 :param zone_no_list: The zone_no_list of this CreateAutoScalingGroupRequest. # noqa: E501 :type: list[str] """ if zone_no_list is None: raise ValueError("Invalid value for `zone_no_list`, must not be `None`") # noqa: E501 self._zone_no_list = zone_no_list def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, CreateAutoScalingGroupRequest): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
33.73913
285
0.65923
acef9277a33df07d3cac88b3a5ec9c7c2099096d
485
py
Python
system/test.py
xoosite/ido.py
811d6477560fc5f635a4d1c8817f4a4667e6c251
[ "MIT" ]
null
null
null
system/test.py
xoosite/ido.py
811d6477560fc5f635a4d1c8817f4a4667e6c251
[ "MIT" ]
null
null
null
system/test.py
xoosite/ido.py
811d6477560fc5f635a4d1c8817f4a4667e6c251
[ "MIT" ]
null
null
null
# coding=utf-8 """ __purpose__ = ... __author__ = JeeysheLu [Jeeyshe@gmail.com] [https://www.lujianxin.com/] [2020/10/14 09:46] Copyright (c) 2020 JeeysheLu This software is licensed to you under the MIT License. Looking forward to making it better. """ if __name__ == '__main__': for i in range(10): try: ok = True b = 1 / 0 except Exception as e: pass else: break if not ok: print("xx")
22.045455
92
0.562887
acef93f9d6984ffdb1c95e7bc1c7e49a43e3d063
4,532
py
Python
Mask/Attention.py
hengwei-chan/simpletransformer_keras
286633d3f2286868ecedb647f9d564c4b21eebf7
[ "MIT" ]
13
2019-04-04T09:50:25.000Z
2022-03-23T14:23:15.000Z
Mask/Attention.py
hengwei-chan/simpletransformer_keras
286633d3f2286868ecedb647f9d564c4b21eebf7
[ "MIT" ]
1
2020-03-30T03:05:13.000Z
2020-03-30T03:05:13.000Z
Mask/Attention.py
hengwei-chan/simpletransformer_keras
286633d3f2286868ecedb647f9d564c4b21eebf7
[ "MIT" ]
3
2019-11-10T13:29:07.000Z
2021-03-26T17:18:34.000Z
from keras import backend as K from keras.engine.topology import Layer from keras.layers import Permute,TimeDistributed,Dense,Reshape,Activation,Lambda,Dropout,RepeatVector import math import numpy as np import tensorflow as tf class RepeatVector4D(Layer): """ Repeats a 3D vector and outputs a 4D vector Source : https://www.reddit.com/r/learnmachinelearning/comments/5ye98e/keras_is_there_a_layer_to_go_from_3d_to_4d_tensor/ """ def __init__(self, n, **kwargs): self.n = n super(RepeatVector4D, self).__init__(**kwargs) def get_output_shape_for(self, input_shape): return (input_shape[0], self.n, input_shape[1], input_shape[2]) def call(self, x, mask=None): x = K.expand_dims(x, 1) pattern = K.stack([1, self.n, 1, 1]) return K.tile(x, pattern) class MultiHeadedAttention(): def __init__(self,d_model ,heads,dim_q,dim_v,dropout_rate,name, **kwargs): self.heads=heads self.dim_q=dim_q self.dim_v=dim_q self.query_layer = TimeDistributed(Dense(self.heads*self.dim_q,use_bias = False),name = name+'_QueryLayer') self.key_layer = TimeDistributed(Dense(self.heads*self.dim_q,use_bias = False),name = name+'_KeyLayer') self.value_layer = TimeDistributed(Dense(self.heads*self.dim_v,use_bias = False),name = name+'_ValueLayer') self.dropout_layer = Dropout(dropout_rate) self.output_layer = TimeDistributed(Dense(d_model,use_bias = False),name = name+'_AttentionOutputLayer') def shift_timestep_ahead(self,vec,last_dim,time_steps,transpose=False): vec = Reshape([-1,self.heads,last_dim])(vec) if transpose: vec = Permute([2,3,1])(vec) vec = Lambda(lambda x:K.reshape(x, [-1, last_dim, time_steps ]))(vec) else: vec = Permute([2,1,3])(vec) vec = Lambda(lambda x:K.reshape(x, [-1, time_steps, last_dim ]))(vec) return vec def shift_timestep_behind(self,vec,last_dim,time_steps): vec = Lambda(lambda x:K.reshape(x, [-1,self.heads, time_steps, last_dim ]))(vec) vec = Permute([2,1,3])(vec) vec = Lambda(lambda x:K.reshape(x, [-1,time_steps,self.heads*last_dim ]))(vec) return vec def scaledDotAttention(self,mask_layer,time_steps): attention_score = Lambda( lambda x: K.batch_dot(x[0],x[1]) / np.sqrt(self.dim_q))([self.query_vec,self.key_vec]) attention_weights = Activation('softmax')(attention_score) mask_layer_heads = RepeatVector4D(self.heads)(mask_layer) mask_layer_heads = Lambda(lambda x:K.reshape(x, [-1 ,1,time_steps]))(mask_layer_heads) attention_weights = Lambda(lambda x: x[0]*x[1])([attention_weights, mask_layer_heads]) masked_atten = attention_weights attention_weights = Lambda(lambda x : x / K.sum(x,axis=-1,keepdims=True))(attention_weights) attention_vector = Lambda( lambda x: K.batch_dot(x[0],x[1]))([attention_weights,self.value_vec]) return attention_vector, attention_weights def __call__(self, x): a = x[0] # print(x,x.get_shape()) time_steps = int(a.get_shape()[1]) # print("time steps = ",time_steps) self.query_vec = self.query_layer(a) self.key_vec = self.key_layer(a) self.value_vec = self.value_layer(a) # Reshape self.query_vec = self.shift_timestep_ahead(self.query_vec,self.dim_q,time_steps,transpose = False) self.key_vec = self.shift_timestep_ahead(self.key_vec,self.dim_q,time_steps,transpose = True) self.value_vec = self.shift_timestep_ahead(self.value_vec,self.dim_v,time_steps,transpose= False) # Attention Weights self.attention_vec,self.attention_weights = self.scaledDotAttention(x[1],time_steps) # print(self.attention_weights,self.attention_vec) self.attention_weights = Lambda( lambda x: K.reshape(x,[-1,self.heads,time_steps,time_steps]))(self.attention_weights) self.attention_vec = Lambda( lambda x: K.reshape(x,[-1,time_steps,self.dim_v]))(self.attention_vec) #Output Fully Connected Dense temp_out = self.shift_timestep_behind(self.attention_vec,self.dim_v,time_steps) temp_out = self.dropout_layer(temp_out) self.out_vec = self.output_layer(temp_out) return [self.out_vec,self.attention_weights]
40.464286
127
0.661518
acef93fc1a5c77a164cac6e5f8fa71cefd52c500
10,559
py
Python
paramz/optimization/verbose_optimization.py
mzwiessele/mzparam
897fb8b92f51c17e1dd92c60f69df718a3899d1b
[ "BSD-3-Clause" ]
49
2015-11-07T16:31:25.000Z
2022-03-17T07:40:02.000Z
paramz/optimization/verbose_optimization.py
mzwiessele/mzparam
897fb8b92f51c17e1dd92c60f69df718a3899d1b
[ "BSD-3-Clause" ]
32
2015-12-02T11:27:25.000Z
2020-09-09T06:01:03.000Z
paramz/optimization/verbose_optimization.py
mzwiessele/mzparam
897fb8b92f51c17e1dd92c60f69df718a3899d1b
[ "BSD-3-Clause" ]
27
2015-11-05T10:17:02.000Z
2021-04-23T13:46:21.000Z
# =============================================================================== # Copyright (c) 2015, Max Zwiessele # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of paramax nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # =============================================================================== from __future__ import print_function import numpy as np import sys import time def exponents(fnow, current_grad): exps = [np.abs(np.float(fnow)), 1 if current_grad is np.nan else current_grad] return np.sign(exps) * np.log10(exps).astype(int) class VerboseOptimization(object): def __init__(self, model, opt, maxiters, verbose=False, current_iteration=0, ipython_notebook=True, clear_after_finish=False): self.verbose = verbose if self.verbose: self.model = model self.iteration = current_iteration self.p_iter = self.iteration self.maxiters = maxiters self.len_maxiters = len(str(int(maxiters))) self.opt_name = opt.opt_name self.opt = opt self.model.add_observer(self, self.print_status) self.status = 'running' self.clear = clear_after_finish self.update() try: # pragma: no cover from IPython.display import display from ipywidgets import IntProgress, HTML, Box, VBox, HBox self.text = HTML(width='100%') self.progress = IntProgress(min=0, max=maxiters) #self.progresstext = Text(width='100%', disabled=True, value='0/{}'.format(maxiters)) self.model_show = HTML() self.ipython_notebook = ipython_notebook get_ipython except: # Not in Ipython notebook self.ipython_notebook = False if self.ipython_notebook: # pragma: no cover left_col = VBox( children=[self.progress, self.text], padding=2, width='40%') right_col = Box( children=[self.model_show], padding=2, width='60%') self.hor_align = HBox( children=[left_col, right_col], width='100%', orientation='horizontal') display(self.hor_align) try: self.text.set_css('width', '100%') left_col.set_css({ 'padding': '2px', 'width': "100%", }) right_col.set_css({ 'padding': '2px', }) self.hor_align.set_css({ 'width': "100%", }) self.hor_align.remove_class('vbox') self.hor_align.add_class('hbox') left_col.add_class("box-flex1") right_col.add_class('box-flex0') except: pass # self.text.add_class('box-flex2') # self.progress.add_class('box-flex1') else: self.exps = exponents(self.fnow, self.current_gradient) print('Running {} Code:'.format(self.opt_name)) print(' {3:7s} {0:{mi}s} {1:11s} {2:11s}'.format( "i", "f", "|g|", "runtime", mi=self.len_maxiters)) def __enter__(self): self.start = time.time() self._time = self.start return self def print_out(self, seconds): if seconds < 60: ms = (seconds % 1)*100 self.timestring = "{s:0>2d}s{ms:0>2d}".format( s=int(seconds), ms=int(ms)) else: m, s = divmod(seconds, 60) if m > 59: h, m = divmod(m, 60) if h > 23: d, h = divmod(h, 24) self.timestring = '{d:0>2d}d{h:0>2d}h{m:0>2d}'.format( m=int(m), h=int(h), d=int(d)) else: self.timestring = '{h:0>2d}h{m:0>2d}m{s:0>2d}'.format( m=int(m), s=int(s), h=int(h)) else: ms = (seconds % 1)*100 self.timestring = '{m:0>2d}m{s:0>2d}s{ms:0>2d}'.format( m=int(m), s=int(s), ms=int(ms)) if self.ipython_notebook: # pragma: no cover names_vals = [['optimizer', "{:s}".format(self.opt_name)], ['runtime', "{:>s}".format(self.timestring)], ['evaluation', "{:>0{l}}".format( self.iteration, l=self.len_maxiters)], ['objective', "{: > 12.3E}".format(self.fnow)], ['||gradient||', "{: >+12.3E}".format(float(self.current_gradient))], ['status', "{:s}".format(self.status)], ] #message = "Lik:{:5.3E} Grad:{:5.3E} Lik:{:5.3E} Len:{!s}".format(float(m.log_likelihood()), np.einsum('i,i->', grads, grads), float(m.likelihood.variance), " ".join(["{:3.2E}".format(l) for l in m.kern.lengthscale.values])) html_begin = """<style type="text/css"> .tg-opt {font-family:"Courier New", Courier, monospace !important;padding:2px 3px;word-break:normal;border-collapse:collapse;border-spacing:0;border-color:#DCDCDC;margin:0px auto;width:100%;} .tg-opt td{font-family:"Courier New", Courier, monospace !important;font-weight:bold;color:#444;background-color:#F7FDFA;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;} .tg-opt th{font-family:"Courier New", Courier, monospace !important;font-weight:normal;color:#fff;background-color:#26ADE4;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;} .tg-opt .tg-left{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:left;} .tg-opt .tg-right{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:right;} </style> <table class="tg-opt">""" html_end = "</table>" html_body = "" for name, val in names_vals: html_body += "<tr>" html_body += "<td class='tg-left'>{}</td>".format(name) html_body += "<td class='tg-right'>{}</td>".format(val) html_body += "</tr>" self.text.value = html_begin + html_body + html_end self.progress.value = (self.iteration+1) #self.progresstext.value = '0/{}'.format((self.iteration+1)) self.model_show.value = self.model._repr_html_() else: n_exps = exponents(self.fnow, self.current_gradient) if self.iteration - self.p_iter >= 20 * np.random.rand(): a = self.iteration >= self.p_iter * 2.78 b = np.any(n_exps < self.exps) if a or b: self.p_iter = self.iteration print('') if b: self.exps = n_exps print('\r', end=' ') print('{3:} {0:>0{mi}g} {1:> 12e} {2:> 12e}'.format(self.iteration, float(self.fnow), float(self.current_gradient), "{:>8s}".format( self.timestring), mi=self.len_maxiters), end=' ') # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r', sys.stdout.flush() def print_status(self, me, which=None): self.update() t = time.time() seconds = t-self.start #sys.stdout.write(" "*len(self.message)) if t-self._time > 1. or seconds < .2: self.print_out(seconds) self._time = t self.iteration += 1 def update(self): self.fnow = self.model.objective_function() if self.model.obj_grads is not None: grad = self.model.obj_grads self.current_gradient = np.dot(grad, grad) else: self.current_gradient = np.nan def finish(self, opt): # pragma: no cover import warnings warnings.warn('Finish now automatic, deprecating', DeprecationWarning) def __exit__(self, type, value, traceback): if self.verbose: self.status = self.opt.status self.stop = time.time() self.model.remove_observer(self) self.print_out(self.stop - self.start) if not self.ipython_notebook: print() print('Runtime: {}'.format("{:>9s}".format(self.timestring))) print('Optimization status: {0}'.format(self.status)) print() elif self.clear: # pragma: no cover self.hor_align.close() else: # pragma: no cover if 'conv' in self.status.lower(): self.progress.bar_style = 'success' elif self.iteration >= self.maxiters: self.progress.bar_style = 'warning' else: self.progress.bar_style = 'danger'
45.709957
236
0.54901
acef95191d5a7f7e06e4a83b82ada8b7f5ca0bef
4,674
py
Python
experiments/baselines/train_bert2gpt2.py
manzar96/emp_chat
2ac08ba9c45d3959816f899a2638dc0953798e10
[ "MIT" ]
null
null
null
experiments/baselines/train_bert2gpt2.py
manzar96/emp_chat
2ac08ba9c45d3959816f899a2638dc0953798e10
[ "MIT" ]
1
2020-12-07T14:57:23.000Z
2020-12-07T14:57:23.000Z
experiments/baselines/train_bert2gpt2.py
manzar96/emp_chat
2ac08ba9c45d3959816f899a2638dc0953798e10
[ "MIT" ]
1
2021-06-21T11:11:13.000Z
2021-06-21T11:11:13.000Z
import math import torch from tqdm import tqdm from torch.optim import Adam from torch.utils.data import DataLoader from transformers import BertTokenizer, GPT2Tokenizer, EncoderDecoderModel,\ EncoderDecoderConfig from core.utils.parser import get_train_parser from core.data.empdataset import EmpatheticDataset from core.data.persona import PersonaChatDataset from core.data.collators import EncoderDecoderTransformerCollatorEmpChat, \ EncoderDecoderTransformerCollatorPersChat from core.utils.transforms import ToTensor from core.trainers import EncoderDecoderTransformerTrainer DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' print(DEVICE) # get args from cmdline parser = get_train_parser() options = parser.parse_args() # load dataset if options.dataset_name == "empchat": train_dataset = EmpatheticDataset("train", options.max_hist_len) val_dataset = EmpatheticDataset("valid", options.max_hist_len) elif options.dataset_name =="persona": train_dataset = PersonaChatDataset("train", options.max_hist_len) val_dataset = PersonaChatDataset("valid", options.max_hist_len) else: raise NotImplementedError # make transforms using only bert tokenizer! bert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') # CLS token will work as BOS token bert_tokenizer.bos_token = bert_tokenizer.cls_token # SEP token will work as EOS token bert_tokenizer.eos_token = bert_tokenizer.sep_token # make sure GPT2 appends EOS in begin and end def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): outputs = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] return outputs GPT2Tokenizer.build_inputs_with_special_tokens = \ build_inputs_with_special_tokens gpt2_tokenizer = GPT2Tokenizer.from_pretrained("gpt2") # set pad_token_id to unk_token_id -> be careful here as unk_token_id == eos_token_id == bos_token_id gpt2_tokenizer.pad_token = gpt2_tokenizer.unk_token # TODO: edw pad_token_id== unk_token_id == eos_token_id == bos_token_id einai # swsto??? # we dont use map on dataset! so transforms will be [] and HuggingFace # tokenizers will be applied train_dataset.tokenizer_hist = bert_tokenizer train_dataset.tokenizer_ans = gpt2_tokenizer val_dataset.tokenizer_hist = bert_tokenizer val_dataset.tokenizer_ans = gpt2_tokenizer # load data if options.dataset_name == "empchat": collator_fn = EncoderDecoderTransformerCollatorEmpChat(device='cpu') elif "persona": collator_fn = EncoderDecoderTransformerCollatorPersChat(device='cpu') train_loader = DataLoader(train_dataset, batch_size=options.batch_size, drop_last=False, shuffle=True, collate_fn=collator_fn) val_loader = DataLoader(val_dataset, batch_size=options.batch_size, drop_last=False, shuffle=True, collate_fn=collator_fn) # create model #loipon edw mallon prepei na to kanw me to config wste na valw cross_attention #na swsw to modelo me: save_pretrained("mymodel") kai meta na to kanw load!!! if options.modelckpt is not None: model = EncoderDecoderModel.from_pretrained(options.modelckpt) else: model = EncoderDecoderModel.from_encoder_decoder_pretrained( 'bert-base-uncased', 'gpt2') model.to(DEVICE) model.config.decoder_start_token_id = gpt2_tokenizer.bos_token_id model.config.eos_token_id = gpt2_tokenizer.eos_token_id model.config.max_length = 142 model.config.min_length = 56 # #freeze some layers: # for i in range(0,12): # for p in model.encoder.encoder.layer[i].parameters(): # if p.requires_grad: # p.requires_grad = False # params and optimizer numparams = sum([p.numel() for p in model.parameters()]) train_numparams = sum([p.numel() for p in model.parameters() if p.requires_grad]) print('Total Parameters: {}'.format(numparams)) print('Trainable Parameters: {}'.format(train_numparams)) optimizer = Adam( [p for p in model.parameters() if p.requires_grad], lr=options.lr, weight_decay=1e-6) if options.optimckpt is not None: state_dict = torch.load(options.optim, map_location='cpu') optimizer.load_state_dict(state_dict) import ipdb;ipdb.set_trace() # create trainer trainer = EncoderDecoderTransformerTrainer(model=model, optimizer=optimizer, patience=5, scheduler=None, checkpoint_dir=options.ckpt, device=DEVICE) # train model trainer.fit(train_loader, val_loader, epochs=options.epochs)
37.693548
101
0.735131
acef969114c31054c1d3d3716a45830ff961580d
3,081
py
Python
datadog_lambda/tags.py
demolitionmode/datadog-lambda-python
6a236e2e1686b4f8ed35b15c8cddb315b62143b2
[ "Apache-2.0" ]
null
null
null
datadog_lambda/tags.py
demolitionmode/datadog-lambda-python
6a236e2e1686b4f8ed35b15c8cddb315b62143b2
[ "Apache-2.0" ]
null
null
null
datadog_lambda/tags.py
demolitionmode/datadog-lambda-python
6a236e2e1686b4f8ed35b15c8cddb315b62143b2
[ "Apache-2.0" ]
null
null
null
import sys from platform import python_version_tuple from datadog_lambda import __version__ from datadog_lambda.cold_start import get_cold_start_tag def _format_dd_lambda_layer_tag(): """ Formats the dd_lambda_layer tag, e.g., 'dd_lambda_layer:datadog-python27_1' """ runtime = "python{}{}".format(sys.version_info[0], sys.version_info[1]) return "dd_lambda_layer:datadog-{}_{}".format(runtime, __version__) def tag_dd_lambda_layer(tags): """ Used by lambda_metric to insert the dd_lambda_layer tag """ dd_lambda_layer_tag = _format_dd_lambda_layer_tag() if tags: return tags + [dd_lambda_layer_tag] else: return [dd_lambda_layer_tag] def parse_lambda_tags_from_arn(lambda_context): """Generate the list of lambda tags based on the data in the arn Args: lambda_context: Aws lambda context object ex: lambda_context.arn = arn:aws:lambda:us-east-1:123597598159:function:my-lambda:1 """ # Set up flag for extra testing to distinguish between a version or alias hasAlias = False # Cap the number of times to spli split_arn = lambda_context.invoked_function_arn.split(":") if len(split_arn) > 7: hasAlias = True _, _, _, region, account_id, _, function_name, alias = split_arn else: _, _, _, region, account_id, _, function_name = split_arn # Add the standard tags to a list tags = [ "region:{}".format(region), "account_id:{}".format(account_id), "functionname:{}".format(function_name), ] # Check if we have a version or alias if hasAlias: # If $Latest, drop the $ for datadog tag convention. A lambda alias can't start with $ if alias.startswith("$"): alias = alias[1:] # Versions are numeric. Aliases need the executed version tag elif not check_if_number(alias): tags.append("executedversion:{}".format(lambda_context.function_version)) # create resource tag with function name and alias/version resource = "resource:{}:{}".format(function_name, alias) else: # Resource is only the function name otherwise resource = "resource:{}".format(function_name) tags.append(resource) return tags def get_runtime_tag(): """Get the runtime tag from the current Python version """ major_version, minor_version, _ = python_version_tuple() return "runtime:python{major}.{minor}".format( major=major_version, minor=minor_version ) def get_enhanced_metrics_tags(lambda_context): """Get the list of tags to apply to enhanced metrics """ return parse_lambda_tags_from_arn(lambda_context) + [ get_cold_start_tag(), "memorysize:{}".format(lambda_context.memory_limit_in_mb), get_runtime_tag(), ] def check_if_number(alias): """ Check if the alias is a version or number. Python 2 has no easy way to test this like Python 3 """ try: float(alias) return True except ValueError: return False
31.121212
102
0.673807
acef96c5cb6991a7a7dacffb703e1d249c0149c1
1,642
py
Python
components/collector/src/source_collectors/api_source_collectors/cobertura_jenkins_plugin.py
dicksnel/quality-time
4c04f8852aa97175f2bca2b5c5391b3e09b657af
[ "Apache-2.0" ]
1
2021-02-22T07:53:36.000Z
2021-02-22T07:53:36.000Z
components/collector/src/source_collectors/api_source_collectors/cobertura_jenkins_plugin.py
greckko/quality-time
f5f0b86505b49adf1b5f3b6bf61cbe33d78d3dca
[ "Apache-2.0" ]
338
2020-10-29T04:28:09.000Z
2022-02-22T04:09:33.000Z
components/collector/src/source_collectors/api_source_collectors/cobertura_jenkins_plugin.py
dicksnel/quality-time
4c04f8852aa97175f2bca2b5c5391b3e09b657af
[ "Apache-2.0" ]
1
2022-01-06T04:07:03.000Z
2022-01-06T04:07:03.000Z
"""Cobertura Jenkins plugin coverage report collector.""" from abc import ABC from base_collectors import JenkinsPluginCollector, JenkinsPluginSourceUpToDatenessCollector from source_model import SourceMeasurement, SourceResponses class CoberturaJenkinsPluginBaseClass(JenkinsPluginCollector, ABC): # skipcq: PYL-W0223 """Base class for Cobertura Jenkins plugin collectors.""" plugin = "cobertura" depth = 2 class CoberturaJenkinsPluginCoverageBaseClass(CoberturaJenkinsPluginBaseClass): """Base class for Cobertura Jenkins plugin coverage collectors.""" coverage_type = "subclass responsibility" async def _parse_source_responses(self, responses: SourceResponses) -> SourceMeasurement: elements = (await responses[0].json())["results"]["elements"] coverage = [element for element in elements if element["name"].lower() == self.coverage_type][0] total = int(coverage["denominator"]) return SourceMeasurement(value=str(total - int(coverage["numerator"])), total=str(total)) class CoberturaJenkinsPluginUncoveredLines(CoberturaJenkinsPluginCoverageBaseClass): """Collector for Cobertura Jenkins plugin uncovered lines.""" coverage_type = "lines" class CoberturaJenkinsPluginUncoveredBranches(CoberturaJenkinsPluginCoverageBaseClass): """Collector for Cobertura Jenkins plugin uncovered branches.""" coverage_type = "conditionals" class CoberturaJenkinsPluginSourceUpToDateness( CoberturaJenkinsPluginBaseClass, JenkinsPluginSourceUpToDatenessCollector): """Collector for the up to dateness of the Cobertura Jenkins plugin coverage report."""
38.186047
104
0.778319
acef974dd872d509c0d7f48fe147407c9ede488e
913
py
Python
tests/test_team_test_creator.py
enterstudio/the-blue-alliance
b53f752fe1f059b4b6f91c841e1865a6c6b81268
[ "MIT" ]
null
null
null
tests/test_team_test_creator.py
enterstudio/the-blue-alliance
b53f752fe1f059b4b6f91c841e1865a6c6b81268
[ "MIT" ]
null
null
null
tests/test_team_test_creator.py
enterstudio/the-blue-alliance
b53f752fe1f059b4b6f91c841e1865a6c6b81268
[ "MIT" ]
null
null
null
import datetime import unittest2 from google.appengine.ext import ndb from google.appengine.ext import testbed from helpers.team.team_test_creator import TeamTestCreator from models.team import Team class TestEventTeamCreator(unittest2.TestCase): def setUp(self): self.testbed = testbed.Testbed() self.testbed.activate() self.testbed.init_datastore_v3_stub() self.testbed.init_memcache_stub() ndb.get_context().clear_cache() # Prevent data from leaking between tests self.testbed.init_taskqueue_stub(root_path=".") self.teams = [] def tearDown(self): for team in self.teams: team.key.delete() self.testbed.deactivate() def test_creates(self): self.teams.extend(TeamTestCreator.createSixTeams()) teams = Team.query().order(Team.team_number).fetch(60) self.assertEqual(len(teams), 6)
26.852941
82
0.694414
acef9776d364965a86cbf64384cd391b41646089
2,301
py
Python
data/extract_points.py
Alex-Girard/D3MapOverlays
c3228545951af0e49745e884e21a18a304bb9676
[ "MIT" ]
null
null
null
data/extract_points.py
Alex-Girard/D3MapOverlays
c3228545951af0e49745e884e21a18a304bb9676
[ "MIT" ]
null
null
null
data/extract_points.py
Alex-Girard/D3MapOverlays
c3228545951af0e49745e884e21a18a304bb9676
[ "MIT" ]
null
null
null
#!/usr/bin/env python from os import listdir from os.path import isfile, join import sys, getopt import json import pandas as pd def extractProperties(result, properties): for key in properties.keys(): result[key] = properties[key]; return result; def extractGeometry(geometry): result = {}; for key in geometry.keys(): if (key != 'properties'): result[key] = geometry[key]; else: extractProperties(result, geometry[key]); return result; def extractDataframe(inputfile): df = pd.DataFrame(); with open(inputfile, "r") as f: data = json.loads(f.read()) objects = data['objects']; for key in objects.keys(): if 'geometries' in objects[key]: for geometry in objects[key]['geometries']: row = pd.DataFrame([extractGeometry(geometry)]); df = df.append(row); else: print(f.name + ' does not have geometries'); return df; def extractToCSV(inputDir, outputDir, cols): output = None; for f in listdir(inputDir): fullFile = join(inputDir,f); if isfile(fullFile) and not isfile(join(outputDir,f)): print('Extracting data from "' + f + '" ...'); df = extractDataframe(fullFile); if len(cols) > 0: if all(x in df for x in cols): df.to_csv(join(outputDir,f), cols=cols, index=False); else: print('Columns '+ str(cols) + ' were not found in "' + f + '" ...'); else: df.to_csv(join(outputDir,f), index=False); else: print('Skipping "' + f + '" ...'); def main(argv): usage = 'extract_points.py -i <inputDir> -o <outputDir> [-d] <column_1> <column_2> ... <column_n>'; inputDir = ''; outputDir = ''; cols = None; try: opts, args = getopt.getopt(argv,"dhi:o:",["inputDir=","outputDir="]); except getopt.GetoptError: print(usage); sys.exit(2); for opt, arg in opts: if opt == '-h': print(usage); sys.exit(); elif opt in ("-i", "--inputDir"): inputDir = arg; elif opt in ("-o", "--outputDir"): outputDir = arg; elif opt in ("-d", "--default"): cols = ['Latitude','Longitude','FunctDay1']; extractToCSV(inputDir, outputDir, args); if __name__ == "__main__": main(sys.argv[1:])
29.5
102
0.581052
acef97b8bcd6d27317d3d0956750713243f31b1c
6,155
py
Python
ofxToolkit.py
jscherer26/Icarra
5bc8b298ae21dcde7e8e2253b9ed9db95fd0d164
[ "BSD-3-Clause" ]
1
2021-11-09T04:36:57.000Z
2021-11-09T04:36:57.000Z
ofxToolkit.py
jscherer26/Icarra
5bc8b298ae21dcde7e8e2253b9ed9db95fd0d164
[ "BSD-3-Clause" ]
null
null
null
ofxToolkit.py
jscherer26/Icarra
5bc8b298ae21dcde7e8e2253b9ed9db95fd0d164
[ "BSD-3-Clause" ]
2
2020-03-28T02:55:19.000Z
2021-11-09T04:37:08.000Z
# Copyright (c) 2006-2010, Jesse Liesch # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the author nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE IMPLIED # DISCLAIMED. IN NO EVENT SHALL JESSE LIESCH BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import datetime import httplib, urllib2, re ofxErrors = { 2000: "General error", 2003: "Account not found", 15500: "Signon invalid" } from brokerage import * import appGlobal def logOfx(ofx, input): # Remove userid, userpass p = re.compile("<USERID>\\s*([^< ]*)\\s*<"); ofx = p.sub("<USERID>x\n<", ofx); p = re.compile("<USERPASS>\\s*([^< ]*)\\s*<"); ofx = p.sub("<USERPASS>x\n<", ofx); p = re.compile("<ACCTID>\\s*([^< ]*)\\s*<"); ofx = p.sub("<ACCTID>x\n<", ofx); ofx = ofx.replace("\r\n", "\n") # Log app = appGlobal.getApp() if app.ofxDebugFrame: app.ofxDebugFrame.add(ofx, input) def generateOfxHeader(): return "OFXHEADER:100\nDATA:OFXSGML\nVERSION:102\nSECURITY:NONE\nENCODING:USASCII\nCHARSET:1252\nCOMPRESSION:NONE\nOLDFILEUID:NONE\nNEWFILEUID:0F7418F4-27DD-4ED1-968E-60D792642CA9\n\n<OFX>\n" def generateDate(): # TODO: check timezone... # TODO: is [-8:PST] necessary? #return "20070101" now = datetime.datetime.utcnow() return now.strftime("%Y%m%d%H%M%S.000") def generateInvestRequest(accountId, b, dtstart = False, dtend = False): ret = "<INVSTMTMSGSRQV1>\n<INVSTMTTRNRQ>\n<TRNUID>8CCCCD65-13AF-4464-8990-5A0E108ACA3E\n<CLTCOOKIE>4\n<INVSTMTRQ>\n<INVACCTFROM>\n<BROKERID>" + b.getBrokerId() + "\n" if accountId: ret += "<ACCTID>" + str(accountId) + "\n" ret += "</INVACCTFROM>\n<INCTRAN>\n" if dtstart: #ret += "<DTSTART>" + dtstart.strftime("%Y%m%d%H%M%S") + "\n" ret += "<DTEND>" + dtend.strftime("%Y%m%d%H%M%S") + "\n" else: ret += "<DTSTART>19000101\n" ret += "<INCLUDE>Y\n</INCTRAN>\n<INCOO>Y\n<INCPOS>\n<DTASOF>" + generateDate() + "\n" ret += "<INCLUDE>Y\n</INCPOS>\n<INCBAL>Y\n</INVSTMTRQ>\n</INVSTMTTRNRQ>\n</INVSTMTMSGSRQV1>\n" return ret def generateAccountRequest(): return "<SIGNUPMSGSRQV1>\n<ACCTINFOTRNRQ>\n<TRNUID>C0A84BC5-6332-4674-ACEF-6149F15423B5\n<CLTCOOKIE>4\n<ACCTINFORQ>\n<DTACCTUP>19700101000000\n</ACCTINFORQ>\n</ACCTINFOTRNRQ>\n</SIGNUPMSGSRQV1>\n" def generateOfxFooter(): return "</OFX>" def generateSignon(userId, password, b): ret = "<SIGNONMSGSRQV1>\n<SONRQ>\n<DTCLIENT>" + generateDate() + "\n" ret += "<USERID>" + str(userId) + "\n" ret += "<USERPASS>" + str(password) + "\n" ret += "<LANGUAGE>ENG\n" if b.getOrg() != "": ret += "<FI>\n<ORG>" + b.getOrg() + "\n" if b.getFid() != "": ret += "<FID>" + b.getFid() + "\n" ret += "</FI>\n" #ret += "<APPID>QWIN\n<APPVER>1500\n</SONRQ>\n</SIGNONMSGSRQV1>\n" ret += "<APPID>QWIN\n<APPVER>1900\n</SONRQ>\n</SIGNONMSGSRQV1>\n" return ret def queryServer(url, query): #print "QUERY" #print query # If not windows, replace \n with \r\n if not appGlobal.getApp().isWindows: query = query.replace("\n", "\r\n") logOfx(query, input = False) request = urllib2.Request(url, query, { "Content-type": "application/x-ofx", "Accept": "*/*, application/x-ofx" }) try: f = urllib2.urlopen(request); except urllib2.HTTPError, e: logOfx(e.read(), input = True) if hasattr(e, "reason"): return "" + e.reason else: return "" except urllib2.URLError, e: if e.reason[0] == 8: return "could not connect" else: return "" data = f.read(); logOfx(data, input = True) # Check for error code p = re.compile("<CODE>\\s*([0-9]*)\\s*<"); m = p.search(data); if m: code = int(m.group(1)); for error in ofxErrors: if code == error: return "Invalid login" else: return "" return data def getAccount(username, password, b): # First send an OFX query to get account id query = generateOfxHeader() + generateSignon(username, password, b) + generateAccountRequest() + generateOfxFooter() #print "QUERY" #print query result = queryServer(b.getUrl(), query) #print "RESULT" #print result if result == "Invalid login": print "INVALID RESULT" print result return (False, "Invalid login") # Next regex to find acctid p = re.compile("<ACCTID>\\s*([^< ]*)\\s*<"); m = p.findall(result); # If only one account, use it if len(m) == 1: return (True, m[0]) if m: return (True, m); else: print "INVALID RESULT" print result return (False, "Account not found") def getOfx(username, password, brokerage, account, status = False): if account == "" or not account: return "" if status: status.setStatus("Downloading transaction history", 20) # Download for every account specified accounts = account.split(",") response = "" for a in accounts: a.strip() query = generateOfxHeader() + generateSignon(username, password, brokerage) + generateInvestRequest(a, brokerage) + generateOfxFooter() response += queryServer(brokerage.getUrl(), query) return response
31.726804
197
0.68026
acef97e285b6b2dbfb5b3a7331e2844d9a3a8f65
2,238
py
Python
utils/preprocess_binary_word_vectors.py
Fantoni0/nmt-keras
1d19444d175a7761fdb5f4b4802bdec75b7ab984
[ "MIT" ]
16
2019-03-07T19:33:09.000Z
2021-01-07T02:10:09.000Z
utils/preprocess_binary_word_vectors.py
Fantoni0/nmt-keras
1d19444d175a7761fdb5f4b4802bdec75b7ab984
[ "MIT" ]
null
null
null
utils/preprocess_binary_word_vectors.py
Fantoni0/nmt-keras
1d19444d175a7761fdb5f4b4802bdec75b7ab984
[ "MIT" ]
8
2019-05-15T10:49:18.000Z
2021-04-12T03:28:47.000Z
# -*- coding: utf-8 -*- from __future__ import print_function import numpy as np import argparse from os.path import basename, dirname # Preprocess pretrained binary vectors # and stores them in a suitable format (.npy) def word2vec2npy(v_path, base_path_save, dest_filename): """ Preprocess pretrained binary vectors and stores them in a suitable format. :param v_path: Path to the binary vectors file. :param base_path_save: Path where the formatted vectors will be stored. :param dest_filename: Filename of the formatted vectors. """ word_vecs = dict() print ("Loading vectors from %s" % v_path) with open(v_path, "rb") as f: header = f.readline() vocab_size, layer1_size = map(int, header.split()) binary_len = np.dtype('float32').itemsize * layer1_size i = 0 print ("Vector length:", layer1_size) for _ in range(vocab_size): word = [] while True: ch = f.read(1) if ch == ' ': word = ''.join(word) break if ch != '\n': word.append(ch) word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32') i += 1 if i % 1000 == 0: print ("Processed %d vectors (%.2f %%)\r" % (i, 100 * float(i) / vocab_size),) # Store dict print ("") print ("Saving word vectors in %s" % (base_path_save + '/' + dest_filename + '.npy')) np.save(base_path_save + '/' + dest_filename + '.npy', word_vecs) print("") def parse_args(): parser = argparse.ArgumentParser("Preprocess pre-trained word embeddings.") parser.add_argument("-v", "--vectors", required=True, help="Pre-trained word embeddings file.", default="GoogleNews-vectors-negative300.bin") parser.add_argument("-d", "--destination", required=True, help="Destination file.", default='word2vec.en') return parser.parse_args() if __name__ == "__main__": args = parse_args() dest_file = basename(args.destination) base_path = dirname(args.destination) word2vec2npy(args.vectors, base_path, dest_file)
36.096774
110
0.597408
acef97f6d7716ea722e4fc58a3101fa61a6b1939
499
py
Python
dashboard/migrations/0015_auto_20200415_0855.py
LoganRamsey/project-101-node
0cb88da5c0e67d4677910bbb2971baf374a32672
[ "Unlicense" ]
null
null
null
dashboard/migrations/0015_auto_20200415_0855.py
LoganRamsey/project-101-node
0cb88da5c0e67d4677910bbb2971baf374a32672
[ "Unlicense" ]
6
2021-03-19T03:15:09.000Z
2021-06-10T19:11:10.000Z
dashboard/migrations/0015_auto_20200415_0855.py
LoganRamsey/Online-Personal-Dashboard
0cb88da5c0e67d4677910bbb2971baf374a32672
[ "Unlicense" ]
null
null
null
# Generated by Django 3.0.3 on 2020-04-15 12:55 import datetime from django.db import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('dashboard', '0014_merge_20200415_0855'), ] operations = [ migrations.AlterField( model_name='todo', name='due', field=models.DateTimeField(default=datetime.datetime(2020, 4, 22, 12, 55, 41, 235224, tzinfo=utc)), ), ]
23.761905
111
0.637275
acef98a5f37dacb4695a0f452561459b53641ca3
1,046
py
Python
setup.py
chalupaul/octavia_chicken_checker
b749107664cb5ed407963d35a99c28fda7544fd4
[ "MIT" ]
null
null
null
setup.py
chalupaul/octavia_chicken_checker
b749107664cb5ed407963d35a99c28fda7544fd4
[ "MIT" ]
null
null
null
setup.py
chalupaul/octavia_chicken_checker
b749107664cb5ed407963d35a99c28fda7544fd4
[ "MIT" ]
1
2019-09-04T22:43:14.000Z
2019-09-04T22:43:14.000Z
from setuptools import setup, find_packages from occ.core import version VERSION = version.get_version() f = open('README.md', 'r') LONG_DESCRIPTION = f.read() f.close() setup( name='occ', version=VERSION, description='Octavia Chicken Checker looks for abandoned Octavia load balancer artifacts, amphoras, and so forth. Optionally it will clean them up as well.', long_description=LONG_DESCRIPTION, long_description_content_type='text/markdown', author='Cody Bunch', author_email='cody.bunch@rackspace.com', classifiers=[ 'Development Status :: 3 - Alpha', 'Programming Language :: Python :: 3', ], url='http://github.com/bunchc/octvia_chicken_checker/', packages=find_packages(exclude=['ez_setup', 'tests*']), package_data={'occ': ['templates/*']}, include_package_data=True, install_requires=[ 'cement==3.0.4', 'jinja2', 'pyyaml', 'colorlog', ], entry_points=""" [console_scripts] occ = occ.main:main """, )
27.526316
161
0.652008
acef995f3f05a0c32733547a3be617b29ce2cfcc
13,041
py
Python
venv/Lib/site-packages/numpy/core/tests/test_longdouble.py
EkremBayar/bayar
aad1a32044da671d0b4f11908416044753360b39
[ "MIT" ]
603
2020-12-23T13:49:32.000Z
2022-03-31T23:38:03.000Z
venv/Lib/site-packages/numpy/core/tests/test_longdouble.py
EkremBayar/bayar
aad1a32044da671d0b4f11908416044753360b39
[ "MIT" ]
387
2020-12-15T14:54:04.000Z
2022-03-31T07:00:21.000Z
bot/lib/python3.7/site-packages/numpy/core/tests/test_longdouble.py
carlosrh18/DavinciBot
d73a6b7f68d7bab25d134d3f85c6b63a86c206c5
[ "MIT" ]
60
2020-07-22T14:53:10.000Z
2022-03-23T10:17:59.000Z
import warnings import pytest import numpy as np from numpy.testing import ( assert_, assert_equal, assert_raises, assert_warns, assert_array_equal, temppath, ) from numpy.core.tests._locales import CommaDecimalPointLocale LD_INFO = np.finfo(np.longdouble) longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps) _o = 1 + LD_INFO.eps string_to_longdouble_inaccurate = (_o != np.longdouble(repr(_o))) del _o def test_scalar_extraction(): """Confirm that extracting a value doesn't convert to python float""" o = 1 + LD_INFO.eps a = np.array([o, o, o]) assert_equal(a[1], o) # Conversions string -> long double # 0.1 not exactly representable in base 2 floating point. repr_precision = len(repr(np.longdouble(0.1))) # +2 from macro block starting around line 842 in scalartypes.c.src. @pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision, reason="repr precision not enough to show eps") def test_repr_roundtrip(): # We will only see eps in repr if within printing precision. o = 1 + LD_INFO.eps assert_equal(np.longdouble(repr(o)), o, "repr was %s" % repr(o)) @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_repr_roundtrip_bytes(): o = 1 + LD_INFO.eps assert_equal(np.longdouble(repr(o).encode("ascii")), o) @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") @pytest.mark.parametrize("strtype", (np.str_, np.bytes_, str, bytes)) def test_array_and_stringlike_roundtrip(strtype): """ Test that string representations of long-double roundtrip both for array casting and scalar coercion, see also gh-15608. """ o = 1 + LD_INFO.eps if strtype in (np.bytes_, bytes): o_str = strtype(repr(o).encode("ascii")) else: o_str = strtype(repr(o)) # Test that `o` is correctly coerced from the string-like assert o == np.longdouble(o_str) # Test that arrays also roundtrip correctly: o_strarr = np.asarray([o] * 3, dtype=strtype) assert (o == o_strarr.astype(np.longdouble)).all() # And array coercion and casting to string give the same as scalar repr: assert (o_strarr == o_str).all() assert (np.asarray([o] * 3).astype(strtype) == o_str).all() def test_bogus_string(): assert_raises(ValueError, np.longdouble, "spam") assert_raises(ValueError, np.longdouble, "1.0 flub") @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_fromstring(): o = 1 + LD_INFO.eps s = (" " + repr(o))*5 a = np.array([o]*5) assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a, err_msg="reading '%s'" % s) def test_fromstring_complex(): for ctype in ["complex", "cdouble", "cfloat"]: # Check spacing between separator assert_equal(np.fromstring("1, 2 , 3 ,4", sep=",", dtype=ctype), np.array([1., 2., 3., 4.])) # Real component not specified assert_equal(np.fromstring("1j, -2j, 3j, 4e1j", sep=",", dtype=ctype), np.array([1.j, -2.j, 3.j, 40.j])) # Both components specified assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype), np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) # Spaces at wrong places with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1+2 j,3", dtype=ctype, sep=","), np.array([1.])) with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1+ 2j,3", dtype=ctype, sep=","), np.array([1.])) with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1 +2j,3", dtype=ctype, sep=","), np.array([1.])) with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1+j", dtype=ctype, sep=","), np.array([1.])) with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1+", dtype=ctype, sep=","), np.array([1.])) with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1j+1", dtype=ctype, sep=","), np.array([1j])) def test_fromstring_bogus(): with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "), np.array([1., 2., 3.])) def test_fromstring_empty(): with assert_warns(DeprecationWarning): assert_equal(np.fromstring("xxxxx", sep="x"), np.array([])) def test_fromstring_missing(): with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1xx3x4x5x6", sep="x"), np.array([1])) class TestFileBased: ldbl = 1 + LD_INFO.eps tgt = np.array([ldbl]*5) out = ''.join([repr(t) + '\n' for t in tgt]) def test_fromfile_bogus(self): with temppath() as path: with open(path, 'wt') as f: f.write("1. 2. 3. flop 4.\n") with assert_warns(DeprecationWarning): res = np.fromfile(path, dtype=float, sep=" ") assert_equal(res, np.array([1., 2., 3.])) def test_fromfile_complex(self): for ctype in ["complex", "cdouble", "cfloat"]: # Check spacing between separator and only real component specified with temppath() as path: with open(path, 'wt') as f: f.write("1, 2 , 3 ,4\n") res = np.fromfile(path, dtype=ctype, sep=",") assert_equal(res, np.array([1., 2., 3., 4.])) # Real component not specified with temppath() as path: with open(path, 'wt') as f: f.write("1j, -2j, 3j, 4e1j\n") res = np.fromfile(path, dtype=ctype, sep=",") assert_equal(res, np.array([1.j, -2.j, 3.j, 40.j])) # Both components specified with temppath() as path: with open(path, 'wt') as f: f.write("1+1j,2-2j, -3+3j, -4e1+4j\n") res = np.fromfile(path, dtype=ctype, sep=",") assert_equal(res, np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) # Spaces at wrong places with temppath() as path: with open(path, 'wt') as f: f.write("1+2 j,3\n") with assert_warns(DeprecationWarning): res = np.fromfile(path, dtype=ctype, sep=",") assert_equal(res, np.array([1.])) # Spaces at wrong places with temppath() as path: with open(path, 'wt') as f: f.write("1+ 2j,3\n") with assert_warns(DeprecationWarning): res = np.fromfile(path, dtype=ctype, sep=",") assert_equal(res, np.array([1.])) # Spaces at wrong places with temppath() as path: with open(path, 'wt') as f: f.write("1 +2j,3\n") with assert_warns(DeprecationWarning): res = np.fromfile(path, dtype=ctype, sep=",") assert_equal(res, np.array([1.])) # Spaces at wrong places with temppath() as path: with open(path, 'wt') as f: f.write("1+j\n") with assert_warns(DeprecationWarning): res = np.fromfile(path, dtype=ctype, sep=",") assert_equal(res, np.array([1.])) # Spaces at wrong places with temppath() as path: with open(path, 'wt') as f: f.write("1+\n") with assert_warns(DeprecationWarning): res = np.fromfile(path, dtype=ctype, sep=",") assert_equal(res, np.array([1.])) # Spaces at wrong places with temppath() as path: with open(path, 'wt') as f: f.write("1j+1\n") with assert_warns(DeprecationWarning): res = np.fromfile(path, dtype=ctype, sep=",") assert_equal(res, np.array([1.j])) @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_fromfile(self): with temppath() as path: with open(path, 'wt') as f: f.write(self.out) res = np.fromfile(path, dtype=np.longdouble, sep="\n") assert_equal(res, self.tgt) @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_genfromtxt(self): with temppath() as path: with open(path, 'wt') as f: f.write(self.out) res = np.genfromtxt(path, dtype=np.longdouble) assert_equal(res, self.tgt) @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_loadtxt(self): with temppath() as path: with open(path, 'wt') as f: f.write(self.out) res = np.loadtxt(path, dtype=np.longdouble) assert_equal(res, self.tgt) @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_tofile_roundtrip(self): with temppath() as path: self.tgt.tofile(path, sep=" ") res = np.fromfile(path, dtype=np.longdouble, sep=" ") assert_equal(res, self.tgt) # Conversions long double -> string def test_repr_exact(): o = 1 + LD_INFO.eps assert_(repr(o) != '1') @pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_format(): o = 1 + LD_INFO.eps assert_("{0:.40g}".format(o) != '1') @pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_percent(): o = 1 + LD_INFO.eps assert_("%.40g" % o != '1') @pytest.mark.skipif(longdouble_longer_than_double, reason="array repr problem") @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_array_repr(): o = 1 + LD_INFO.eps a = np.array([o]) b = np.array([1], dtype=np.longdouble) if not np.all(a != b): raise ValueError("precision loss creating arrays") assert_(repr(a) != repr(b)) # # Locale tests: scalar types formatting should be independent of the locale # class TestCommaDecimalPointLocale(CommaDecimalPointLocale): def test_repr_roundtrip_foreign(self): o = 1.5 assert_equal(o, np.longdouble(repr(o))) def test_fromstring_foreign_repr(self): f = 1.234 a = np.fromstring(repr(f), dtype=float, sep=" ") assert_equal(a[0], f) def test_fromstring_best_effort_float(self): with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1,234", dtype=float, sep=" "), np.array([1.])) def test_fromstring_best_effort(self): with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "), np.array([1.])) def test_fromstring_foreign(self): s = "1.234" a = np.fromstring(s, dtype=np.longdouble, sep=" ") assert_equal(a[0], np.longdouble(s)) def test_fromstring_foreign_sep(self): a = np.array([1, 2, 3, 4]) b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",") assert_array_equal(a, b) def test_fromstring_foreign_value(self): with assert_warns(DeprecationWarning): b = np.fromstring("1,234", dtype=np.longdouble, sep=" ") assert_array_equal(b[0], 1) @pytest.mark.parametrize("int_val", [ # cases discussed in gh-10723 # and gh-9968 2 ** 1024, 0]) def test_longdouble_from_int(int_val): # for issue gh-9968 str_val = str(int_val) # we'll expect a RuntimeWarning on platforms # with np.longdouble equivalent to np.double # for large integer input with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) # can be inf==inf on some platforms assert np.longdouble(int_val) == np.longdouble(str_val) # we can't directly compare the int and # max longdouble value on all platforms if np.allclose(np.finfo(np.longdouble).max, np.finfo(np.double).max) and w: assert w[0].category is RuntimeWarning @pytest.mark.parametrize("bool_val", [ True, False]) def test_longdouble_from_bool(bool_val): assert np.longdouble(bool_val) == np.longdouble(int(bool_val))
35.245946
87
0.582547
acef997b701633920fac5a32f32efd4a6a0b551a
10,942
py
Python
dist/weewx-4.6.2/examples/lowBattery.py
Zilched/docker-weewx
406b32c3b40563a99a09a76b9b091700c7e46910
[ "Apache-2.0" ]
1
2016-03-17T11:39:45.000Z
2016-03-17T11:39:45.000Z
dist/weewx-4.6.2/examples/lowBattery.py
mitct02/docker-weewx
76f4e3dd9af88ba521f431b07a3762ecb2820464
[ "Apache-2.0" ]
null
null
null
dist/weewx-4.6.2/examples/lowBattery.py
mitct02/docker-weewx
76f4e3dd9af88ba521f431b07a3762ecb2820464
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2009-2021 Tom Keffer <tkeffer@gmail.com> # See the file LICENSE.txt for your rights. """Example of how to implement a low battery alarm in WeeWX. ******************************************************************************* To use this alarm, add the following somewhere in your configuration file weewx.conf: [Alarm] time_wait = 3600 count_threshold = 10 smtp_host = smtp.example.com smtp_user = myusername smtp_password = mypassword from = sally@example.com mailto = jane@example.com, bob@example.com subject = "Time to change the battery!" An email will be sent to each address in the comma separated list of recipients The example assumes an SMTP email server at smtp.example.com that requires login. If the SMTP server does not require login, leave out the lines for smtp_user and smtp_password. Setting an email "from" is optional. If not supplied, one will be filled in, but your SMTP server may or may not accept it. Setting an email "subject" is optional. If not supplied, one will be filled in. To avoid a flood of emails, one will only be sent every 3600 seconds (one hour). It will also not send an email unless the low battery indicator has been on greater than or equal to count_threshold times in an archive period. This avoids sending out an alarm if the battery is only occasionally being signaled as bad. ******************************************************************************* To enable this service: 1) Copy this file to your user directory. See https://bit.ly/33YHsqX for where your user directory is located. 2) Modify the weewx configuration file by adding this service to the option "report_services", located in section [Engine][[Services]]. [Engine] [[Services]] ... report_services = weewx.engine.StdPrint, weewx.engine.StdReport, user.lowBattery.BatteryAlarm ******************************************************************************* If you wish to use both this example and the alarm.py example, simply merge the two configuration options together under [Alarm] and add both services to report_services. ******************************************************************************* """ import logging import time import smtplib from email.mime.text import MIMEText import threading import weewx from weewx.engine import StdService from weeutil.weeutil import timestamp_to_string, option_as_list log = logging.getLogger(__name__) # Inherit from the base class StdService: class BatteryAlarm(StdService): """Service that sends email if one of the batteries is low""" battery_flags = ['txBatteryStatus', 'windBatteryStatus', 'rainBatteryStatus', 'inTempBatteryStatus', 'outTempBatteryStatus'] def __init__(self, engine, config_dict): # Pass the initialization information on to my superclass: super(BatteryAlarm, self).__init__(engine, config_dict) # This will hold the time when the last alarm message went out: self.last_msg_ts = 0 # This will hold the count of the number of times the VP2 has signaled # a low battery alarm this archive period self.alarm_count = 0 try: # Dig the needed options out of the configuration dictionary. # If a critical option is missing, an exception will be thrown and # the alarm will not be set. self.time_wait = int(config_dict['Alarm'].get('time_wait', 3600)) self.count_threshold = int(config_dict['Alarm'].get('count_threshold', 10)) self.smtp_host = config_dict['Alarm']['smtp_host'] self.smtp_user = config_dict['Alarm'].get('smtp_user') self.smtp_password = config_dict['Alarm'].get('smtp_password') self.SUBJECT = config_dict['Alarm'].get('subject', "Low battery alarm message from weewx") self.FROM = config_dict['Alarm'].get('from', 'alarm@example.com') self.TO = option_as_list(config_dict['Alarm']['mailto']) except KeyError as e: log.info("No alarm set. Missing parameter: %s", e) else: # If we got this far, it's ok to start intercepting events: self.bind(weewx.NEW_LOOP_PACKET, self.new_loop_packet) self.bind(weewx.NEW_ARCHIVE_RECORD, self.new_archive_record) log.info("LowBattery alarm enabled. Count threshold is %d", self.count_threshold) def new_loop_packet(self, event): """This function is called on each new LOOP packet.""" packet = event.packet # If any battery status flag is non-zero, a battery is low. Use dictionary comprehension # to build a new dictionary that just holds the non-zero values. low_batteries = {k : packet[k] for k in BatteryAlarm.battery_flags if k in packet and packet[k]} # If there are any low batteries, see if we need to send an alarm if low_batteries: self.alarm_count += 1 # Don't panic on the first occurrence. We must see the alarm at # least count_threshold times before sounding the alarm. if self.alarm_count >= self.count_threshold: # We've hit the threshold. However, to avoid a flood of nearly # identical emails, send a new one only if it's been a long # time since we sent the last one: if abs(time.time() - self.last_msg_ts) >= self.time_wait : # Sound the alarm! timestamp = event.packet['dateTime'] # Launch in a separate thread so it does not block the # main LOOP thread: t = threading.Thread(target=BatteryAlarm.sound_the_alarm, args=(self, timestamp, low_batteries, self.alarm_count)) t.start() # Record when the message went out: self.last_msg_ts = time.time() def new_archive_record(self, event): """This function is called on each new archive record.""" # Reset the alarm counter self.alarm_count = 0 def sound_the_alarm(self, timestamp, battery_flags, alarm_count): """This function is called when the alarm has been triggered.""" # Get the time and convert to a string: t_str = timestamp_to_string(timestamp) # Log it in the system log: log.info("Low battery status sounded at %s: %s" % (t_str, battery_flags)) # Form the message text: indicator_strings = [] for bat in battery_flags: indicator_strings.append("%s: %04x" % (bat, int(battery_flags[bat]))) msg_text = """ The low battery indicator has been seen %d times since the last archive period. Alarm sounded at %s Low battery indicators: %s """ % (alarm_count, t_str, '\n'.join(indicator_strings)) # Convert to MIME: msg = MIMEText(msg_text) # Fill in MIME headers: msg['Subject'] = self.SUBJECT msg['From'] = self.FROM msg['To'] = ','.join(self.TO) try: # First try end-to-end encryption s=smtplib.SMTP_SSL(self.smtp_host) log.debug("Using SMTP_SSL") except AttributeError: # If that doesn't work, try creating an insecure host, then upgrading s = smtplib.SMTP(self.smtp_host) try: # Be prepared to catch an exception if the server # does not support encrypted transport. s.ehlo() s.starttls() s.ehlo() log.debug("Using SMTP encrypted transport") except smtplib.SMTPException: log.debug("Using SMTP unencrypted transport") try: # If a username has been given, assume that login is required # for this host: if self.smtp_user: s.login(self.smtp_user, self.smtp_password) log.debug("Logged in as %s", self.smtp_user) # Send the email: s.sendmail(msg['From'], self.TO, msg.as_string()) # Log out of the server: s.quit() except Exception as e: log.error("Send email failed: %s", e) raise # Log sending the email: log.info("Email sent to: %s", self.TO) if __name__ == '__main__': """This section is used to test lowBattery.py. It uses a record that is guaranteed to sound a battery alert. You will need a valid weewx.conf configuration file with an [Alarm] section that has been set up as illustrated at the top of this file.""" from optparse import OptionParser import weecfg import weeutil.logger usage = """Usage: python lowBattery.py --help python lowBattery.py [CONFIG_FILE|--config=CONFIG_FILE] Arguments: CONFIG_PATH: Path to weewx.conf """ epilog = """You must be sure the WeeWX modules are in your PYTHONPATH. For example: PYTHONPATH=/home/weewx/bin python lowBattery.py --help""" # Force debug: weewx.debug = 1 # Create a command line parser: parser = OptionParser(usage=usage, epilog=epilog) parser.add_option("--config", dest="config_path", metavar="CONFIG_FILE", help="Use configuration file CONFIG_FILE.") # Parse the arguments and options (options, args) = parser.parse_args() try: config_path, config_dict = weecfg.read_config(options.config_path, args) except IOError as e: exit("Unable to open configuration file: %s" % e) print("Using configuration file %s" % config_path) # Set logging configuration: weeutil.logger.setup('lowBattery', config_dict) if 'Alarm' not in config_dict: exit("No [Alarm] section in the configuration file %s" % config_path) # This is the fake packet that we'll use pack = {'txBatteryStatus': 1.0, 'dateTime': int(time.time())} # We need the main WeeWX engine in order to bind to the event, but we don't need # for it to completely start up. So get rid of all services: config_dict['Engine']['Services'] = {} # Now we can instantiate our slim engine, using the DummyEngine class... engine = weewx.engine.DummyEngine(config_dict) # ... and set the alarm using it. alarm = BatteryAlarm(engine, config_dict) # Create a NEW_LOOP_PACKET event event = weewx.Event(weewx.NEW_LOOP_PACKET, packet=pack) # Trigger the alarm enough that we reach the threshold for count in range(alarm.count_threshold): alarm.new_loop_packet(event)
38.528169
110
0.614239
acef99fced273e9f18b6a429c99f220a8c49f8db
16,479
py
Python
trainval_net.py
emptyewer/R-FCN.pytorch
2498881b2ba7a1b511fe82b935ec422ca5fabe55
[ "MIT" ]
71
2018-08-22T02:26:42.000Z
2022-02-26T18:32:32.000Z
trainval_net.py
princewang1994/faster-rcnn.pytorch
0c8da30bfd23e61f4c7fd1299626b9d82cf8a164
[ "MIT" ]
14
2018-11-18T05:46:59.000Z
2021-04-07T06:45:33.000Z
trainval_net.py
princewang1994/faster-rcnn.pytorch
0c8da30bfd23e61f4c7fd1299626b9d82cf8a164
[ "MIT" ]
23
2018-08-22T02:26:43.000Z
2022-01-26T15:45:56.000Z
# -------------------------------------------------------- # Pytorch multi-GPU Faster R-CNN # Licensed under The MIT License [see LICENSE for details] # Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick # -------------------------------------------------------- from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import os import pdb import pprint import time import _init_paths import numpy as np import torch import torch.nn as nn from torch.autograd import Variable from torch.utils.data.sampler import Sampler from model.utils.config import cfg, cfg_from_file, cfg_from_list from model.utils.net_utils import adjust_learning_rate, save_checkpoint, clip_gradient from roi_data_layer.roibatchLoader import roibatchLoader from roi_data_layer.roidb import combined_roidb def parse_args(): """ Parse input arguments """ parser = argparse.ArgumentParser(description='Train a Fast R-CNN network') parser.add_argument('--dataset', dest='dataset', help='training dataset', default='pascal_voc', type=str) parser.add_argument('--arch', dest='arch', default='rcnn', choices=['rcnn', 'rfcn', 'couplenet']) parser.add_argument('--net', dest='net', help='vgg16, res101', default='vgg16', type=str) parser.add_argument('--start_epoch', dest='start_epoch', help='starting epoch', default=1, type=int) parser.add_argument('--epochs', dest='max_epochs', help='number of epochs to train', default=20, type=int) parser.add_argument('--disp_interval', dest='disp_interval', help='number of iterations to display', default=100, type=int) parser.add_argument('--checkpoint_interval', dest='checkpoint_interval', help='number of iterations to display', default=10000, type=int) parser.add_argument('--save_dir', dest='save_dir', help='directory to save models', default="save", type=str) parser.add_argument('--nw', dest='num_workers', help='number of worker to load data', default=4, type=int) parser.add_argument('--cuda', dest='cuda', help='whether use CUDA', action='store_true') parser.add_argument('--ls', dest='large_scale', help='whether use large imag scale', action='store_true') parser.add_argument('--mGPUs', dest='mGPUs', help='whether use multiple GPUs', action='store_true') parser.add_argument('--ohem', dest='ohem', help='Use online hard example mining for training', action='store_true') parser.add_argument('--bs', dest='batch_size', help='batch_size', default=1, type=int) parser.add_argument('--cag', dest='class_agnostic', help='whether perform class_agnostic bbox regression', action='store_true') # config optimization parser.add_argument('--o', dest='optimizer', help='training optimizer', default="sgd", type=str) parser.add_argument('--lr', dest='lr', help='starting learning rate', default=0.001, type=float) parser.add_argument('--lr_decay_step', dest='lr_decay_step', help='step to do learning rate decay, unit is epoch', default=5, type=int) parser.add_argument('--lr_decay_gamma', dest='lr_decay_gamma', help='learning rate decay ratio', default=0.1, type=float) # set training session parser.add_argument('--s', dest='session', help='training session', default=1, type=int) # resume trained model parser.add_argument('--r', dest='resume', help='resume checkpoint or not', default=False, type=bool) parser.add_argument('--checksession', dest='checksession', help='checksession to load model', default=1, type=int) parser.add_argument('--checkepoch', dest='checkepoch', help='checkepoch to load model', default=1, type=int) parser.add_argument('--checkpoint', dest='checkpoint', help='checkpoint to load model', default=0, type=int) # log and diaplay parser.add_argument('--use_tfboard', dest='use_tfboard', help='whether use tensorflow tensorboard', default=False, type=bool) args = parser.parse_args() return args class sampler(Sampler): def __init__(self, train_size, batch_size): self.num_data = train_size self.num_per_batch = int(train_size / batch_size) self.batch_size = batch_size self.range = torch.arange(0,batch_size).view(1, batch_size).long() self.leftover_flag = False if train_size % batch_size: self.leftover = torch.arange(self.num_per_batch*batch_size, train_size).long() self.leftover_flag = True def __iter__(self): rand_num = torch.randperm(self.num_per_batch).view(-1,1) * self.batch_size self.rand_num = rand_num.expand(self.num_per_batch, self.batch_size) + self.range self.rand_num_view = self.rand_num.view(-1) if self.leftover_flag: self.rand_num_view = torch.cat((self.rand_num_view, self.leftover),0) return iter(self.rand_num_view) def __len__(self): return self.num_data if __name__ == '__main__': args = parse_args() if args.arch == 'rcnn': from model.faster_rcnn.vgg16 import vgg16 from model.faster_rcnn.resnet import resnet elif args.arch == 'rfcn': from model.rfcn.resnet_atrous import resnet elif args.arch == 'couplenet': from model.couplenet.resnet_atrous import resnet print('Called with args:') print(args) if args.use_tfboard: from model.utils.logger import Logger # Set the logger logger = Logger('./logs') if args.dataset == "pascal_voc": args.imdb_name = "voc_2007_trainval" args.imdbval_name = "voc_2007_test" args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20'] elif args.dataset == "pascal_voc_0712": args.imdb_name = "voc_2007_trainval+voc_2012_trainval" args.imdbval_name = "voc_2007_test" args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20'] elif args.dataset == "coco": args.imdb_name = "coco_2014_train+coco_2014_valminusminival" args.imdbval_name = "coco_2014_minival" args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50'] elif args.dataset == "imagenet": args.imdb_name = "imagenet_train" args.imdbval_name = "imagenet_val" args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '30'] elif args.dataset == "vg": # train sizes: train, smalltrain, minitrain # train scale: ['150-50-20', '150-50-50', '500-150-80', '750-250-150', '1750-700-450', '1600-400-20'] args.imdb_name = "vg_150-50-50_minitrain" args.imdbval_name = "vg_150-50-50_minival" args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50'] args.cfg_file = "cfgs/{}_ls.yml".format(args.net) if args.large_scale else "cfgs/{}.yml".format(args.net) if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) print('Using config:') pprint.pprint(cfg) np.random.seed(cfg.RNG_SEED) #torch.backends.cudnn.benchmark = True if torch.cuda.is_available() and not args.cuda: print("WARNING: You have a CUDA device, so you should probably run with --cuda") # train set # -- Note: Use validation set and disable the flipped to enable faster loading. cfg.TRAIN.USE_FLIPPED = True cfg.USE_GPU_NMS = args.cuda imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdb_name) train_size = len(roidb) print('{:d} roidb entries'.format(len(roidb))) output_dir = os.path.join(args.save_dir, args.arch, args.net, args.dataset) if not os.path.exists(output_dir): os.makedirs(output_dir) sampler_batch = sampler(train_size, args.batch_size) dataset = roibatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \ imdb.num_classes, training=True) dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, sampler=sampler_batch, num_workers=args.num_workers) # initilize the tensor holder here. im_data = torch.FloatTensor(1) im_info = torch.FloatTensor(1) num_boxes = torch.LongTensor(1) gt_boxes = torch.FloatTensor(1) # ship to cuda if args.cuda: im_data = im_data.cuda() im_info = im_info.cuda() num_boxes = num_boxes.cuda() gt_boxes = gt_boxes.cuda() # make variable im_data = Variable(im_data) im_info = Variable(im_info) num_boxes = Variable(num_boxes) gt_boxes = Variable(gt_boxes) if args.cuda: cfg.CUDA = True # initilize the network here. if args.net == 'vgg16': model = vgg16(imdb.classes, pretrained=True, class_agnostic=args.class_agnostic) elif args.net == 'res101': model = resnet(imdb.classes, 101, pretrained=True, class_agnostic=args.class_agnostic) elif args.net == 'res50': model = resnet(imdb.classes, 50, pretrained=True, class_agnostic=args.class_agnostic) elif args.net == 'res152': model = resnet(imdb.classes, 152, pretrained=True, class_agnostic=args.class_agnostic) else: print("network is not defined") pdb.set_trace() model.create_architecture() lr = cfg.TRAIN.LEARNING_RATE lr = args.lr #tr_momentum = cfg.TRAIN.MOMENTUM #tr_momentum = args.momentum params = [] for key, value in dict(model.named_parameters()).items(): if value.requires_grad: if 'bias' in key: params += [{'params':[value],'lr':lr*(cfg.TRAIN.DOUBLE_BIAS + 1), \ 'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}] else: params += [{'params':[value],'lr':lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}] if args.optimizer == "adam": lr = lr * 0.1 optimizer = torch.optim.Adam(params) elif args.optimizer == "sgd": optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM) if args.resume: load_name = os.path.join(output_dir, 'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint)) print("loading checkpoint %s" % (load_name)) checkpoint = torch.load(load_name) args.session = checkpoint['session'] args.start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['model']) optimizer.load_state_dict(checkpoint['optimizer']) lr = optimizer.param_groups[0]['lr'] if 'pooling_mode' in checkpoint.keys(): cfg.POOLING_MODE = checkpoint['pooling_mode'] print("loaded checkpoint %s" % (load_name)) if args.mGPUs: model = nn.DataParallel(model) if args.cuda: model.cuda() iters_per_epoch = int(train_size / args.batch_size) for epoch in range(args.start_epoch, args.max_epochs + 1): dataset.resize_batch() # setting to train mode model.train() loss_temp = 0 start = time.time() if epoch % (args.lr_decay_step + 1) == 0: adjust_learning_rate(optimizer, args.lr_decay_gamma) lr *= args.lr_decay_gamma data_iter = iter(dataloader) for step in range(iters_per_epoch): data = next(data_iter) im_data.data.resize_(data[0].size()).copy_(data[0]) im_info.data.resize_(data[1].size()).copy_(data[1]) gt_boxes.data.resize_(data[2].size()).copy_(data[2]) num_boxes.data.resize_(data[3].size()).copy_(data[3]) model.zero_grad() rois, cls_prob, bbox_pred, \ rpn_loss_cls, rpn_loss_box, \ RCNN_loss_cls, RCNN_loss_bbox, \ rois_label = model(im_data, im_info, gt_boxes, num_boxes) loss = rpn_loss_cls.mean() + rpn_loss_box.mean() \ + RCNN_loss_cls.mean() + RCNN_loss_bbox.mean() loss_temp += loss.data[0] # backward optimizer.zero_grad() loss.backward() if args.net == "vgg16": clip_gradient(model, 10.) optimizer.step() if step % args.disp_interval == 0: end = time.time() if step > 0: loss_temp /= args.disp_interval if args.mGPUs: loss_rpn_cls = rpn_loss_cls.mean().data[0] loss_rpn_box = rpn_loss_box.mean().data[0] loss_rcnn_cls = RCNN_loss_cls.mean().data[0] loss_rcnn_box = RCNN_loss_bbox.mean().data[0] fg_cnt = torch.sum(rois_label.data.ne(0)) bg_cnt = rois_label.data.numel() - fg_cnt else: loss_rpn_cls = rpn_loss_cls.data[0] loss_rpn_box = rpn_loss_box.data[0] loss_rcnn_cls = RCNN_loss_cls.data[0] loss_rcnn_box = RCNN_loss_bbox.data[0] fg_cnt = torch.sum(rois_label.data.ne(0)) bg_cnt = rois_label.data.numel() - fg_cnt print("[session %d][epoch %2d][iter %4d/%4d] loss: %.4f, lr: %.2e" \ % (args.session, epoch, step, iters_per_epoch, loss_temp, lr)) print("\t\t\tfg/bg=(%d/%d), time cost: %f" % (fg_cnt, bg_cnt, end-start)) print("\t\t\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box %.4f" \ % (loss_rpn_cls, loss_rpn_box, loss_rcnn_cls, loss_rcnn_box)) if args.use_tfboard: info = { 'loss': loss_temp, 'loss_rpn_cls': loss_rpn_cls, 'loss_rpn_box': loss_rpn_box, 'loss_rcnn_cls': loss_rcnn_cls, 'loss_rcnn_box': loss_rcnn_box } for tag, value in info.items(): logger.scalar_summary(tag, value, step) loss_temp = 0 start = time.time() if args.mGPUs: save_name = os.path.join(output_dir, 'faster_rcnn_{}_{}_{}.pth'.format(args.session, epoch, step)) save_checkpoint({ 'session': args.session, 'epoch': epoch + 1, 'model': model.module.state_dict(), 'optimizer': optimizer.state_dict(), 'pooling_mode': cfg.POOLING_MODE, 'class_agnostic': args.class_agnostic, }, save_name) else: save_name = os.path.join(output_dir, 'faster_rcnn_{}_{}_{}.pth'.format(args.session, epoch, step)) save_checkpoint({ 'session': args.session, 'epoch': epoch + 1, 'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'pooling_mode': cfg.POOLING_MODE, 'class_agnostic': args.class_agnostic, }, save_name) print('save model: {}'.format(save_name)) end = time.time() print(end - start)
40.890819
120
0.578069
acef9b2416ebbf73217181ca1cd0b104d403111d
3,721
py
Python
h2o-py/h2o/model/multinomial.py
My-Technical-Architect/h2o-3
d383802fb7f9c3ec9c72b7869fe636059a333d88
[ "Apache-2.0" ]
null
null
null
h2o-py/h2o/model/multinomial.py
My-Technical-Architect/h2o-3
d383802fb7f9c3ec9c72b7869fe636059a333d88
[ "Apache-2.0" ]
null
null
null
h2o-py/h2o/model/multinomial.py
My-Technical-Architect/h2o-3
d383802fb7f9c3ec9c72b7869fe636059a333d88
[ "Apache-2.0" ]
null
null
null
# -*- encoding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals from h2o.utils.compatibility import * # NOQA from ..frame import H2OFrame import h2o from .model_base import ModelBase from h2o.utils.typechecks import assert_is_type class H2OMultinomialModel(ModelBase): def _make_model(self): return H2OMultinomialModel() def confusion_matrix(self, data): """ Returns a confusion matrix based of H2O's default prediction threshold for a dataset. :param H2OFrame data: the frame with the prediction results for which the confusion matrix should be extracted. """ assert_is_type(data, H2OFrame) j = h2o.api("POST /3/Predictions/models/%s/frames/%s" % (self._id, data.frame_id)) return j["model_metrics"][0]["cm"]["table"] def hit_ratio_table(self, train=False, valid=False, xval=False): """ Retrieve the Hit Ratios. If all are False (default), then return the training metric value. If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid", and "xval". :param train: If train is True, then return the hit ratio value for the training data. :param valid: If valid is True, then return the hit ratio value for the validation data. :param xval: If xval is True, then return the hit ratio value for the cross validation data. :return: The hit ratio for this regression model. """ tm = ModelBase._get_metrics(self, train, valid, xval) m = {} for k, v in zip(list(tm.keys()), list(tm.values())): m[k] = None if v is None else v.hit_ratio_table() return list(m.values())[0] if len(m) == 1 else m def mean_per_class_error(self, train=False, valid=False, xval=False): """ Retrieve the mean per class error across all classes If all are False (default), then return the training metric value. If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid", and "xval". :param bool train: If True, return the mean_per_class_error value for the training data. :param bool valid: If True, return the mean_per_class_error value for the validation data. :param bool xval: If True, return the mean_per_class_error value for each of the cross-validated splits. :returns: The mean_per_class_error values for the specified key(s). """ tm = ModelBase._get_metrics(self, train, valid, xval) m = {} for k, v in zip(list(tm.keys()), list(tm.values())): m[k] = None if v is None else v.mean_per_class_error() return list(m.values())[0] if len(m) == 1 else m def plot(self, timestep="AUTO", metric="AUTO", **kwargs): """ Plots training set (and validation set if available) scoring history for an H2OMultinomialModel. The timestep and metric arguments are restricted to what is available in its scoring history. :param timestep: A unit of measurement for the x-axis. :param metric: A unit of measurement for the y-axis. :returns: A scoring history plot. """ if self._model_json["algo"] in ("deeplearning", "deepwater", "drf", "gbm"): if metric == "AUTO": metric = "classification_error" elif metric not in ("logloss", "classification_error", "rmse"): raise ValueError( "metric for H2OMultinomialModel must be one of: AUTO, logloss, classification_error, rmse") self._plot(timestep=timestep, metric=metric, **kwargs)
42.770115
119
0.657888
acef9b87f51a7f65f478854f59ee1b6c48e23f24
8,820
py
Python
MA3C-push.py
whongyu/MA3C
d3b38cf42a909c0938624ba853119804efaf47eb
[ "MIT" ]
null
null
null
MA3C-push.py
whongyu/MA3C
d3b38cf42a909c0938624ba853119804efaf47eb
[ "MIT" ]
null
null
null
MA3C-push.py
whongyu/MA3C
d3b38cf42a909c0938624ba853119804efaf47eb
[ "MIT" ]
null
null
null
from make_env import make_env import networkx as nx import numpy as np import random SAMPLE_NUMS = 10 import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from copy import deepcopy import os os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE' device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") class ActorNetwork(nn.Module): def __init__(self, input_size, hidden_size, action_size): super(ActorNetwork, self).__init__() self.fc1 = nn.Linear(input_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.fc3 = nn.Linear(hidden_size, action_size) def forward(self, x): out = F.relu(self.fc1(x)) out = F.relu(self.fc2(out)) out = F.log_softmax(self.fc3(out)) return out class ValueNetwork(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(ValueNetwork, self).__init__() self.fc1 = nn.Linear(input_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.fc3 = nn.Linear(hidden_size, output_size) def forward(self, x): out = F.relu(self.fc1(x)) out = F.relu(self.fc2(out)) out = self.fc3(out) return out class Agent: def __init__(self): super(Agent).__init__() self.value_network = ValueNetwork(input_size=STATE_DIM, hidden_size=24, output_size=1).to(device) self.actor_network = ActorNetwork(STATE_DIM, 24, ACTION_DIM).to(device) self.actor_network_optim = torch.optim.Adam(self.actor_network.parameters(), lr=0.0001) self.tao = deepcopy(self.value_network).to(device) self.y = 1 self.reward = None self.shape = [] for name, param in self.value_network.named_parameters(): self.shape.append(deepcopy(param.cpu().data).numpy().shape) def update_tao(self, consensus_tao, qs): target_values = qs values = self.value_network(states_var.to(device)) criterion = nn.MSELoss() value_network_loss = criterion(values, target_values) value_network_loss.backward() torch.nn.utils.clip_grad_norm(self.value_network.parameters(), 0.5) temp = [] for name, param in self.value_network.named_parameters(): temp.append(deepcopy(param.grad)) param.grad.zero_() i = 0 for name, param in self.tao.named_parameters(): param.data = torch.from_numpy(consensus_tao[i]).to(device) - (temp[i].to(device) * 0.1) i += 1 def consensus(agents, adjacent_matrix): n_agent = len(agents) y_list = [[] for _ in range(n_agent)] tao_list = [[] for _ in range(n_agent)] reward_list = [[] for _ in range(n_agent)] for i, agent in enumerate(agents): for j, _agent in enumerate(agents): if i == j or adjacent_matrix[i, j] > 0.0: y_list[i].append(_agent.y * adjacent_matrix[i, j]) reward_list[i].append(np.array(_agent.reward) * adjacent_matrix[i, j]) tao = [] for name, param in _agent.tao.named_parameters(): tao.append(deepcopy(param.cpu().data).numpy().reshape(1, -1).squeeze(0)) tao_list[i].append(np.array(tao) * adjacent_matrix[i, j]) consensus_y = [np.sum(np.asarray(x), axis=0) for x in y_list] consensus_tao = [] for i in range(N_agent): tao = tao_list[i] temp = [[] for i in range(len(tao[0]))] for j in range(len(tao)): for k in range(len(tao[0])): temp[k].append(tao[j][k]) consensus_tao.append(np.sum(temp, axis=1)) for k in range(len(consensus_tao[i])): consensus_tao[i][k] = consensus_tao[i][k].reshape(agents[0].shape[k]) consensus_reward = [np.sum(np.asarray(x), axis=0) for x in reward_list] return consensus_tao, consensus_y, consensus_reward def learn(agent_list, states_var, next_states_var, rewards_var, actions_var, rewards, adjacent_matrix): for i in range(N_agent): agent_list[i].reward = rewards[i] consensus_tao, consensus_y, consensus_rewards = consensus(agent_list, adjacent_matrix) for i in range(N_agent): target = 0.99 * agent_list[i].value_network(next_states_var.to(device)).detach() + rewards_var[i].to(device) agent_list[i].update_tao(consensus_tao[i], target) for j in range(N_agent): temp = [] for name, param in agent_list[j].tao.named_parameters(): temp.append(param) i = 0 for name, param in agent_list[j].value_network.named_parameters(): param.data = deepcopy(temp[i]) / consensus_y[j] i += 1 agent_list[j].y = consensus_y[j] for i in range(N_agent): agent_list[i].actor_network_optim.zero_grad() log_softmax_actions = agent_list[i].actor_network(states_var.to(device)) consensus_reward = Variable(torch.Tensor(consensus_rewards[i]).view(-1, 1)) with torch.no_grad(): sigma = consensus_reward.to(device) - agent_list[i].value_network(states_var.to(device)) + 0.99 * agent_list[i].value_network( next_states_var.to(device)) actor_network_loss = - torch.mean(torch.sum(log_softmax_actions * actions_var[i].to(device), 1) * sigma.squeeze(1)) actor_network_loss.backward() torch.nn.utils.clip_grad_norm(agent_list[i].actor_network.parameters(), 0.5) agent_list[i].actor_network_optim.step() def roll_out(agent_list, task, sample_nums, init_state): states = [] next_states = [] actions = [[] for i in range(N_agent)] rewards = [[] for i in range(N_agent)] state = init_state total_reward = 0 for j in range(sample_nums): states.append(state) acts = [] for i in range(N_agent): log_softmax_action = agent_list[i].actor_network(Variable(torch.Tensor([state])).to(device)) softmax_action = torch.exp(log_softmax_action) action = np.random.choice(ACTION_DIM, p=softmax_action.cpu().data.numpy()[0]) if np.random.rand(1) >= 0.95: action = np.random.choice(ACTION_DIM) acts.append(action) one_hot_action = [int(k == action) for k in range(ACTION_DIM)] actions[i].append(one_hot_action) next_state, reward, done, _ = task.step(acts) next_states.append(next_state) for i in range(N_agent): rewards[i].append(reward[i] * coe[i]) r = np.mean(reward) total_reward += r next_state = np.squeeze(np.array(next_state).reshape((1, STATE_DIM))) state = next_state return states, next_states, actions, rewards, state, total_reward env = make_env('simple_spread_custom',benchmark=True) STATE_DIM = env.observation_space[0].shape[0] N_agent = env.n STATE_DIM *= N_agent ACTION_DIM = env.action_space[0].n seed = 270 coe = [random.uniform(0, 2) for i in range(N_agent)] for x in range(10): agent_list = [Agent() for i in range(N_agent)] for i_episode in range(210): init_state = env.reset() init_state = np.squeeze(np.array(init_state).reshape((1, STATE_DIM))) reward = 0 for step in range(100): while True: er = nx.gnm_random_graph(N_agent, 2 * N_agent, directed=True) adjacent_matrix = np.asarray(nx.to_numpy_matrix(er)) for i in range(adjacent_matrix.shape[0]): if adjacent_matrix[i][i] == 0: adjacent_matrix[i][i] = 1 col = np.sum(adjacent_matrix, axis=0) for i in range(adjacent_matrix.shape[1]): for j in range(adjacent_matrix.shape[0]): if adjacent_matrix[j][i] != 0: adjacent_matrix[j][i] = 1 / col[i] break states, next_states, actions, rewards, current_state, total_reward = roll_out(agent_list, env, SAMPLE_NUMS, init_state) reward += total_reward init_state = current_state states_var = Variable(torch.Tensor(states).view(-1, STATE_DIM)) next_states_var = Variable(torch.Tensor(next_states).view(-1, STATE_DIM)) actions_var = [] rewards_var = [] for i in range(N_agent): actions_var.append(Variable(torch.Tensor(actions[i]).view(-1, ACTION_DIM))) rewards_var.append(Variable(torch.Tensor(rewards[i]).view(-1, 1))) learn(agent_list, states_var, next_states_var, rewards_var, actions_var, rewards, adjacent_matrix) print("Epoch: %s, Reward: %s " % (i_episode, reward / 1000 ))
43.448276
138
0.623129
acef9b9335992c74a497bc60cac4f4759673d0d3
17,358
py
Python
irctest/server_tests/labeled_responses.py
FiskFan1999/ergochat_irctest
da005d7d2492bf31c4bdeb46108240766c69d0ad
[ "MIT" ]
16
2015-12-20T16:24:54.000Z
2021-06-03T18:00:03.000Z
irctest/server_tests/labeled_responses.py
FiskFan1999/ergochat_irctest
da005d7d2492bf31c4bdeb46108240766c69d0ad
[ "MIT" ]
66
2015-12-20T00:23:25.000Z
2021-08-14T09:57:04.000Z
irctest/server_tests/labeled_responses.py
FiskFan1999/ergochat_irctest
da005d7d2492bf31c4bdeb46108240766c69d0ad
[ "MIT" ]
3
2021-12-04T21:18:41.000Z
2022-03-22T01:42:36.000Z
""" This specification is a little hard to test because all labels are optional; so there may be many false positives. <https://ircv3.net/specs/extensions/labeled-response.html> """ import re import pytest from irctest import cases from irctest.numerics import ERR_UNKNOWNCOMMAND from irctest.patma import ANYDICT, AnyOptStr, NotStrRe, RemainingKeys, StrRe class LabeledResponsesTestCase(cases.BaseServerTestCase, cases.OptionalityHelper): @cases.mark_capabilities("echo-message", "batch", "labeled-response") def testLabeledPrivmsgResponsesToMultipleClients(self): self.connectClient( "foo", capabilities=["echo-message", "batch", "labeled-response"], skip_if_cap_nak=True, ) self.getMessages(1) self.connectClient( "bar", capabilities=["echo-message", "batch", "labeled-response"], skip_if_cap_nak=True, ) self.getMessages(2) self.connectClient( "carl", capabilities=["echo-message", "batch", "labeled-response"], skip_if_cap_nak=True, ) self.getMessages(3) self.connectClient( "alice", capabilities=["echo-message", "batch", "labeled-response"], skip_if_cap_nak=True, ) self.getMessages(4) self.sendLine(1, "@label=12345 PRIVMSG bar,carl,alice :hi") m = self.getMessage(1) m2 = self.getMessage(2) m3 = self.getMessage(3) m4 = self.getMessage(4) # ensure the label isn't sent to recipients self.assertMessageMatch(m2, command="PRIVMSG", tags={}) self.assertMessageMatch( m3, command="PRIVMSG", tags={}, ) self.assertMessageMatch(m4, command="PRIVMSG", tags={}) self.assertMessageMatch( m, command="BATCH", fail_msg="No BATCH echo received after sending one out" ) @cases.mark_capabilities("echo-message", "batch", "labeled-response") def testLabeledPrivmsgResponsesToClient(self): self.connectClient( "foo", capabilities=["echo-message", "batch", "labeled-response"], skip_if_cap_nak=True, ) self.getMessages(1) self.connectClient( "bar", capabilities=["echo-message", "batch", "labeled-response"], skip_if_cap_nak=True, ) self.getMessages(2) self.sendLine(1, "@label=12345 PRIVMSG bar :hi") m = self.getMessage(1) m2 = self.getMessage(2) # ensure the label isn't sent to recipient self.assertMessageMatch(m2, command="PRIVMSG", tags={}) self.assertMessageMatch(m, command="PRIVMSG", tags={"label": "12345"}) @pytest.mark.react_tag @cases.mark_capabilities("echo-message", "batch", "labeled-response") def testLabeledPrivmsgResponsesToChannel(self): self.connectClient( "foo", capabilities=["echo-message", "batch", "labeled-response"], skip_if_cap_nak=True, ) self.getMessages(1) self.connectClient( "bar", capabilities=["echo-message", "batch", "labeled-response"], skip_if_cap_nak=True, ) self.getMessages(2) # join channels self.sendLine(1, "JOIN #test") self.getMessages(1) self.sendLine(2, "JOIN #test") self.getMessages(2) self.getMessages(1) self.sendLine( 1, "@label=12345;+draft/reply=123;+draft/react=l😃l PRIVMSG #test :hi" ) ms = self.getMessage(1) mt = self.getMessage(2) # ensure the label isn't sent to recipient self.assertMessageMatch(mt, command="PRIVMSG", tags={}) # ensure sender correctly receives msg self.assertMessageMatch(ms, command="PRIVMSG", tags={"label": "12345"}) @cases.mark_capabilities("echo-message", "batch", "labeled-response") def testLabeledPrivmsgResponsesToSelf(self): self.connectClient( "foo", capabilities=["echo-message", "batch", "labeled-response"], skip_if_cap_nak=True, ) self.getMessages(1) self.sendLine(1, "@label=12345 PRIVMSG foo :hi") m1 = self.getMessage(1) m2 = self.getMessage(1) number_of_labels = 0 for m in [m1, m2]: self.assertMessageMatch( m, command="PRIVMSG", fail_msg="Got a message back that wasn't a PRIVMSG", ) if "label" in m.tags: number_of_labels += 1 self.assertEqual( m.tags["label"], "12345", m, fail_msg=( "Echo'd label doesn't match the label we sent " "(should be '12345'): {msg}" ), ) self.assertEqual( number_of_labels, 1, m1, fail_msg=( "When sending a PRIVMSG to self with echo-message, " "we only expect one message to contain the label. " "Instead, {} messages had the label" ).format(number_of_labels), ) @cases.mark_capabilities("echo-message", "batch", "labeled-response") def testLabeledNoticeResponsesToClient(self): self.connectClient( "foo", capabilities=["echo-message", "batch", "labeled-response"], skip_if_cap_nak=True, ) self.getMessages(1) self.connectClient( "bar", capabilities=["echo-message", "batch", "labeled-response"], skip_if_cap_nak=True, ) self.getMessages(2) self.sendLine(1, "@label=12345 NOTICE bar :hi") m = self.getMessage(1) m2 = self.getMessage(2) # ensure the label isn't sent to recipient self.assertMessageMatch(m2, command="NOTICE", tags={}) self.assertMessageMatch(m, command="NOTICE", tags={"label": "12345"}) @pytest.mark.react_tag @cases.mark_capabilities("echo-message", "batch", "labeled-response") def testLabeledNoticeResponsesToChannel(self): self.connectClient( "foo", capabilities=["echo-message", "batch", "labeled-response"], skip_if_cap_nak=True, ) self.getMessages(1) self.connectClient( "bar", capabilities=["echo-message", "batch", "labeled-response"], skip_if_cap_nak=True, ) self.getMessages(2) # join channels self.sendLine(1, "JOIN #test") self.getMessages(1) self.sendLine(2, "JOIN #test") self.getMessages(2) self.getMessages(1) self.sendLine( 1, "@label=12345;+draft/reply=123;+draft/react=l😃l NOTICE #test :hi" ) ms = self.getMessage(1) mt = self.getMessage(2) # ensure the label isn't sent to recipient self.assertMessageMatch(mt, command="NOTICE", tags={}) # ensure sender correctly receives msg self.assertMessageMatch(ms, command="NOTICE", tags={"label": "12345"}) @cases.mark_capabilities("echo-message", "batch", "labeled-response") def testLabeledNoticeResponsesToSelf(self): self.connectClient( "foo", capabilities=["echo-message", "batch", "labeled-response"], skip_if_cap_nak=True, ) self.getMessages(1) self.sendLine(1, "@label=12345 NOTICE foo :hi") m1 = self.getMessage(1) m2 = self.getMessage(1) number_of_labels = 0 for m in [m1, m2]: self.assertMessageMatch( m, command="NOTICE", fail_msg="Got a message back that wasn't a NOTICE" ) if "label" in m.tags: number_of_labels += 1 self.assertEqual( m.tags["label"], "12345", m, fail_msg=( "Echo'd label doesn't match the label we sent " "(should be '12345'): {msg}" ), ) self.assertEqual( number_of_labels, 1, m1, fail_msg=( "When sending a NOTICE to self with echo-message, " "we only expect one message to contain the label. " "Instead, {} messages had the label" ).format(number_of_labels), ) @pytest.mark.react_tag @cases.mark_capabilities( "echo-message", "batch", "labeled-response", "message-tags" ) def testLabeledTagMsgResponsesToClient(self): self.connectClient( "foo", capabilities=["echo-message", "batch", "labeled-response", "message-tags"], skip_if_cap_nak=True, ) self.getMessages(1) self.connectClient( "bar", capabilities=["echo-message", "batch", "labeled-response", "message-tags"], skip_if_cap_nak=True, ) self.getMessages(2) # Need to get a valid msgid because Unreal validates them self.sendLine(1, "PRIVMSG bar :hi") msgid = self.getMessage(1).tags["msgid"] assert msgid == self.getMessage(2).tags["msgid"] self.sendLine( 1, f"@label=12345;+draft/reply={msgid};+draft/react=l😃l TAGMSG bar" ) m = self.getMessage(1) m2 = self.getMessage(2) # ensure the label isn't sent to recipient self.assertMessageMatch( m2, command="TAGMSG", tags={ "+draft/reply": msgid, "+draft/react": "l😃l", RemainingKeys(NotStrRe("label")): AnyOptStr(), }, ) self.assertNotIn( "label", m2.tags, m2, fail_msg=( "When sending a TAGMSG with a label, " "the target user shouldn't receive the label " "(only the sending user should): {msg}" ), ) self.assertMessageMatch( m, command="TAGMSG", tags={ "label": "12345", "+draft/reply": msgid, "+draft/react": "l😃l", **ANYDICT, }, ) @pytest.mark.react_tag @cases.mark_capabilities( "echo-message", "batch", "labeled-response", "message-tags" ) def testLabeledTagMsgResponsesToChannel(self): self.connectClient( "foo", capabilities=["echo-message", "batch", "labeled-response", "message-tags"], skip_if_cap_nak=True, ) self.getMessages(1) self.connectClient( "bar", capabilities=["echo-message", "batch", "labeled-response", "message-tags"], skip_if_cap_nak=True, ) self.getMessages(2) # join channels self.sendLine(1, "JOIN #test") self.getMessages(1) self.sendLine(2, "JOIN #test") self.getMessages(2) self.getMessages(1) # Need to get a valid msgid because Unreal validates them self.sendLine(1, "PRIVMSG #test :hi") msgid = self.getMessage(1).tags["msgid"] assert msgid == self.getMessage(2).tags["msgid"] self.sendLine( 1, f"@label=12345;+draft/reply={msgid};+draft/react=l😃l TAGMSG #test" ) ms = self.getMessage(1) mt = self.getMessage(2) # ensure the label isn't sent to recipient self.assertMessageMatch( mt, command="TAGMSG", tags={ "+draft/reply": msgid, "+draft/react": "l😃l", RemainingKeys(NotStrRe("label")): AnyOptStr(), }, fail_msg="No TAGMSG received by the target after sending one out", ) self.assertNotIn( "label", mt.tags, mt, fail_msg=( "When sending a TAGMSG with a label, " "the target user shouldn't receive the label " "(only the sending user should): {msg}" ), ) # ensure sender correctly receives msg self.assertMessageMatch( ms, command="TAGMSG", tags={"label": "12345", "+draft/reply": msgid, **ANYDICT}, ) @pytest.mark.react_tag @cases.mark_capabilities( "echo-message", "batch", "labeled-response", "message-tags" ) def testLabeledTagMsgResponsesToSelf(self): self.connectClient( "foo", capabilities=["echo-message", "batch", "labeled-response", "message-tags"], skip_if_cap_nak=True, ) self.getMessages(1) self.sendLine(1, "@label=12345;+draft/reply=123;+draft/react=l😃l TAGMSG foo") m1 = self.getMessage(1) m2 = self.getMessage(1) number_of_labels = 0 for m in [m1, m2]: self.assertMessageMatch( m, command="TAGMSG", fail_msg="Got a message back that wasn't a TAGMSG" ) if "label" in m.tags: number_of_labels += 1 self.assertEqual( m.tags["label"], "12345", m, fail_msg=( "Echo'd label doesn't match the label we sent " "(should be '12345'): {msg}" ), ) self.assertEqual( number_of_labels, 1, m1, fail_msg=( "When sending a TAGMSG to self with echo-message, " "we only expect one message to contain the label. " "Instead, {} messages had the label" ).format(number_of_labels), ) @cases.mark_capabilities("batch", "labeled-response", "message-tags", "server-time") def testBatchedJoinMessages(self): self.connectClient( "bar", capabilities=["batch", "labeled-response", "message-tags", "server-time"], skip_if_cap_nak=True, ) self.getMessages(1) self.sendLine(1, "@label=12345 JOIN #xyz") m = self.getMessages(1) # we expect at least join and names lines, which must be batched self.assertGreaterEqual(len(m), 3) # valid BATCH start line: batch_start = m[0] self.assertMessageMatch( batch_start, command="BATCH", params=[StrRe(r"\+.*"), "labeled-response"], ) batch_id = batch_start.params[0][1:] # batch id MUST be alphanumerics and hyphens self.assertTrue( re.match(r"^[A-Za-z0-9\-]+$", batch_id) is not None, "batch id must be alphanumerics and hyphens, got %r" % (batch_id,), ) self.assertEqual(batch_start.tags.get("label"), "12345") # valid BATCH end line batch_end = m[-1] self.assertMessageMatch(batch_end, command="BATCH", params=["-" + batch_id]) # messages must have the BATCH tag for message in m[1:-1]: self.assertEqual(message.tags.get("batch"), batch_id) @cases.mark_capabilities("labeled-response") def testNoBatchForSingleMessage(self): self.connectClient( "bar", capabilities=["batch", "labeled-response"], skip_if_cap_nak=True ) self.getMessages(1) self.sendLine(1, "@label=98765 PING adhoctestline") # no BATCH should be initiated for a one-line response, # it should just be labeled m = self.getMessage(1) self.assertMessageMatch(m, command="PONG", tags={"label": "98765"}) self.assertEqual(m.params[-1], "adhoctestline") @cases.mark_capabilities("labeled-response") def testEmptyBatchForNoResponse(self): self.connectClient( "bar", capabilities=["batch", "labeled-response"], skip_if_cap_nak=True ) self.getMessages(1) # PONG never receives a response self.sendLine(1, "@label=98765 PONG adhoctestline") # labeled-response: "Servers MUST respond with a labeled # `ACK` message when a client sends a labeled command that normally # produces no response." ms = self.getMessages(1) self.assertEqual(len(ms), 1) ack = ms[0] self.assertMessageMatch(ack, command="ACK", tags={"label": "98765"}) @cases.mark_capabilities("labeled-response") def testUnknownCommand(self): self.connectClient( "bar", capabilities=["batch", "labeled-response"], skip_if_cap_nak=True ) # this command doesn't exist, but the error response should still # be labeled: self.sendLine(1, "@label=deadbeef NONEXISTENT_COMMAND") ms = self.getMessages(1) self.assertEqual(len(ms), 1) unknowncommand = ms[0] self.assertMessageMatch( unknowncommand, command=ERR_UNKNOWNCOMMAND, tags={"label": "deadbeef"} )
33.252874
88
0.547586
acef9bd2707e6ac3fce5af6e4120e1abf2b96429
52,517
py
Python
python/ccxt/async_support/kraken.py
Darkbladecr/ccxt
6d0f856a1833209b2d2bd14ca87916a561077a0f
[ "MIT" ]
5
2019-04-14T12:50:46.000Z
2020-11-16T13:18:56.000Z
python/ccxt/async_support/kraken.py
Darkbladecr/ccxt
6d0f856a1833209b2d2bd14ca87916a561077a0f
[ "MIT" ]
null
null
null
python/ccxt/async_support/kraken.py
Darkbladecr/ccxt
6d0f856a1833209b2d2bd14ca87916a561077a0f
[ "MIT" ]
3
2019-04-10T23:51:01.000Z
2021-08-30T02:40:24.000Z
# -*- coding: utf-8 -*- # PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN: # https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code from ccxt.async_support.base.exchange import Exchange # ----------------------------------------------------------------------------- try: basestring # Python 3 except NameError: basestring = str # Python 2 import base64 import hashlib import math from ccxt.base.errors import ExchangeError from ccxt.base.errors import AuthenticationError from ccxt.base.errors import PermissionDenied from ccxt.base.errors import ArgumentsRequired from ccxt.base.errors import InsufficientFunds from ccxt.base.errors import InvalidAddress from ccxt.base.errors import InvalidOrder from ccxt.base.errors import OrderNotFound from ccxt.base.errors import CancelPending from ccxt.base.errors import DDoSProtection from ccxt.base.errors import ExchangeNotAvailable from ccxt.base.errors import InvalidNonce from ccxt.base.decimal_to_precision import TRUNCATE from ccxt.base.decimal_to_precision import DECIMAL_PLACES class kraken (Exchange): def describe(self): return self.deep_extend(super(kraken, self).describe(), { 'id': 'kraken', 'name': 'Kraken', 'countries': ['US'], 'version': '0', 'rateLimit': 3000, 'certified': True, 'has': { 'createDepositAddress': True, 'fetchDepositAddress': True, 'fetchTradingFee': True, 'fetchTradingFees': True, 'CORS': False, 'fetchCurrencies': True, 'fetchTickers': True, 'fetchOHLCV': True, 'fetchOrder': True, 'fetchOpenOrders': True, 'fetchClosedOrders': True, 'fetchMyTrades': True, 'fetchWithdrawals': True, 'fetchDeposits': True, 'withdraw': True, 'fetchLedgerEntry': True, 'fetchLedger': True, }, 'marketsByAltname': {}, 'timeframes': { '1m': '1', '5m': '5', '15m': '15', '30m': '30', '1h': '60', '4h': '240', '1d': '1440', '1w': '10080', '2w': '21600', }, 'urls': { 'logo': 'https://user-images.githubusercontent.com/1294454/27766599-22709304-5ede-11e7-9de1-9f33732e1509.jpg', 'api': { 'public': 'https://api.kraken.com', 'private': 'https://api.kraken.com', 'zendesk': 'https://support.kraken.com/hc/en-us/articles/', }, 'www': 'https://www.kraken.com', 'doc': [ 'https://www.kraken.com/en-us/help/api', 'https://github.com/nothingisdead/npm-kraken-api', ], 'fees': 'https://www.kraken.com/en-us/help/fees', }, 'fees': { 'trading': { 'tierBased': True, 'percentage': True, 'taker': 0.26 / 100, 'maker': 0.16 / 100, 'tiers': { 'taker': [ [0, 0.0026], [50000, 0.0024], [100000, 0.0022], [250000, 0.0020], [500000, 0.0018], [1000000, 0.0016], [2500000, 0.0014], [5000000, 0.0012], [10000000, 0.0001], ], 'maker': [ [0, 0.0016], [50000, 0.0014], [100000, 0.0012], [250000, 0.0010], [500000, 0.0008], [1000000, 0.0006], [2500000, 0.0004], [5000000, 0.0002], [10000000, 0.0], ], }, }, # self is a bad way of hardcoding fees that change on daily basis # hardcoding is now considered obsolete, we will remove all of it eventually 'funding': { 'tierBased': False, 'percentage': False, 'withdraw': { 'BTC': 0.001, 'ETH': 0.005, 'XRP': 0.02, 'XLM': 0.00002, 'LTC': 0.02, 'DOGE': 2, 'ZEC': 0.00010, 'ICN': 0.02, 'REP': 0.01, 'ETC': 0.005, 'MLN': 0.003, 'XMR': 0.05, 'DASH': 0.005, 'GNO': 0.01, 'EOS': 0.5, 'BCH': 0.001, 'XTZ': 0.05, 'USD': 5, # if domestic wire 'EUR': 5, # if domestic wire 'CAD': 10, # CAD EFT Withdrawal 'JPY': 300, # if domestic wire }, 'deposit': { 'BTC': 0, 'ETH': 0, 'XRP': 0, 'XLM': 0, 'LTC': 0, 'DOGE': 0, 'ZEC': 0, 'ICN': 0, 'REP': 0, 'ETC': 0, 'MLN': 0, 'XMR': 0, 'DASH': 0, 'GNO': 0, 'EOS': 0, 'BCH': 0, 'XTZ': 0.05, 'USD': 5, # if domestic wire 'EUR': 0, # free deposit if EUR SEPA Deposit 'CAD': 5, # if domestic wire 'JPY': 0, # Domestic Deposit(Free, ¥5,000 deposit minimum) }, }, }, 'api': { 'zendesk': { 'get': [ # we should really refrain from putting fixed fee numbers and stop hardcoding # we will be using their web APIs to scrape all numbers from these articles '205893708-What-is-the-minimum-order-size-', '201396777-What-are-the-deposit-fees-', '201893608-What-are-the-withdrawal-fees-', ], }, 'public': { 'get': [ 'Assets', 'AssetPairs', 'Depth', 'OHLC', 'Spread', 'Ticker', 'Time', 'Trades', ], }, 'private': { 'post': [ 'AddOrder', 'Balance', 'CancelOrder', 'ClosedOrders', 'DepositAddresses', 'DepositMethods', 'DepositStatus', 'Ledgers', 'OpenOrders', 'OpenPositions', 'QueryLedgers', 'QueryOrders', 'QueryTrades', 'TradeBalance', 'TradesHistory', 'TradeVolume', 'Withdraw', 'WithdrawCancel', 'WithdrawInfo', 'WithdrawStatus', ], }, }, 'commonCurrencies': { 'XDG': 'DOGE', }, 'options': { 'cacheDepositMethodsOnFetchDepositAddress': True, # will issue up to two calls in fetchDepositAddress 'depositMethods': {}, 'delistedMarketsById': {}, # cannot withdraw/deposit these 'inactiveCurrencies': ['CAD', 'USD', 'JPY', 'GBP'], }, 'exceptions': { 'EAPI:Invalid key': AuthenticationError, 'EFunding:Unknown withdraw key': ExchangeError, 'EFunding:Invalid amount': InsufficientFunds, 'EService:Unavailable': ExchangeNotAvailable, 'EDatabase:Internal error': ExchangeNotAvailable, 'EService:Busy': ExchangeNotAvailable, 'EQuery:Unknown asset': ExchangeError, 'EAPI:Rate limit exceeded': DDoSProtection, 'EOrder:Rate limit exceeded': DDoSProtection, 'EGeneral:Internal error': ExchangeNotAvailable, 'EGeneral:Temporary lockout': DDoSProtection, 'EGeneral:Permission denied': PermissionDenied, }, }) def cost_to_precision(self, symbol, cost): return self.decimal_to_precision(cost, TRUNCATE, self.markets[symbol]['precision']['price'], DECIMAL_PLACES) def fee_to_precision(self, symbol, fee): return self.decimal_to_precision(fee, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES) async def fetch_min_order_amounts(self): html = await self.zendeskGet205893708WhatIsTheMinimumOrderSize() parts = html.split('<td class="wysiwyg-text-align-right">') numParts = len(parts) if numParts < 3: raise ExchangeError(self.id + ' fetchMinOrderAmounts HTML page markup has changed: https://support.kraken.com/hc/en-us/articles/205893708-What-is-the-minimum-order-size-') result = {} # skip the part before the header and the header itself for i in range(2, len(parts)): part = parts[i] chunks = part.split('</td>') amountAndCode = chunks[0] if amountAndCode != 'To Be Announced': pieces = amountAndCode.split(' ') numPieces = len(pieces) if numPieces == 2: amount = float(pieces[0]) code = self.common_currency_code(pieces[1]) result[code] = amount return result async def fetch_markets(self, params={}): markets = await self.publicGetAssetPairs() limits = await self.fetch_min_order_amounts() keys = list(markets['result'].keys()) result = [] for i in range(0, len(keys)): id = keys[i] market = markets['result'][id] baseId = market['base'] quoteId = market['quote'] base = baseId quote = quoteId if len(base) > 3: if (base[0] == 'X') or (base[0] == 'Z'): base = base[1:] if len(quote) > 3: if (quote[0] == 'X') or (quote[0] == 'Z'): quote = quote[1:] base = self.common_currency_code(base) quote = self.common_currency_code(quote) darkpool = id.find('.d') >= 0 symbol = market['altname'] if darkpool else (base + '/' + quote) maker = None if 'fees_maker' in market: maker = float(market['fees_maker'][0][1]) / 100 precision = { 'amount': market['lot_decimals'], 'price': market['pair_decimals'], } minAmount = math.pow(10, -precision['amount']) if base in limits: minAmount = limits[base] result.append({ 'id': id, 'symbol': symbol, 'base': base, 'quote': quote, 'baseId': baseId, 'quoteId': quoteId, 'darkpool': darkpool, 'info': market, 'altname': market['altname'], 'maker': maker, 'taker': float(market['fees'][0][1]) / 100, 'active': True, 'precision': precision, 'limits': { 'amount': { 'min': minAmount, 'max': math.pow(10, precision['amount']), }, 'price': { 'min': math.pow(10, -precision['price']), 'max': None, }, 'cost': { 'min': 0, 'max': None, }, }, }) result = self.append_inactive_markets(result) self.marketsByAltname = self.index_by(result, 'altname') return result def append_inactive_markets(self, result): # result should be an array to append to precision = {'amount': 8, 'price': 8} costLimits = {'min': 0, 'max': None} priceLimits = {'min': math.pow(10, -precision['price']), 'max': None} amountLimits = {'min': math.pow(10, -precision['amount']), 'max': math.pow(10, precision['amount'])} limits = {'amount': amountLimits, 'price': priceLimits, 'cost': costLimits} defaults = { 'darkpool': False, 'info': None, 'maker': None, 'taker': None, 'active': False, 'precision': precision, 'limits': limits, } markets = [ # {'id': 'XXLMZEUR', 'symbol': 'XLM/EUR', 'base': 'XLM', 'quote': 'EUR', 'altname': 'XLMEUR'}, ] for i in range(0, len(markets)): result.append(self.extend(defaults, markets[i])) return result async def fetch_currencies(self, params={}): response = await self.publicGetAssets(params) # # { # "error": [], # "result": { # "ADA": {"aclass": "currency", "altname": "ADA", "decimals": 8, "display_decimals": 6}, # "BCH": {"aclass": "currency", "altname": "BCH", "decimals": 10, "display_decimals": 5}, # ... # }, # } # currencies = self.safe_value(response, 'result') ids = list(currencies.keys()) result = {} for i in range(0, len(ids)): id = ids[i] currency = currencies[id] # todo: will need to rethink the fees # see: https://support.kraken.com/hc/en-us/articles/201893608-What-are-the-withdrawal-fees- # to add support for multiple withdrawal/deposit methods and # differentiated fees for each particular method code = self.common_currency_code(self.safe_string(currency, 'altname')) precision = self.safe_integer(currency, 'decimals') # assumes all currencies are active except those listed above active = not self.in_array(code, self.options['inactiveCurrencies']) result[code] = { 'id': id, 'code': code, 'info': currency, 'name': code, 'active': active, 'fee': None, 'precision': precision, 'limits': { 'amount': { 'min': math.pow(10, -precision), 'max': math.pow(10, precision), }, 'price': { 'min': math.pow(10, -precision), 'max': math.pow(10, precision), }, 'cost': { 'min': None, 'max': None, }, 'withdraw': { 'min': None, 'max': math.pow(10, precision), }, }, } return result async def fetch_trading_fees(self, params={}): await self.load_markets() self.check_required_credentials() response = await self.privatePostTradeVolume(params) tradedVolume = self.safe_float(response['result'], 'volume') tiers = self.fees['trading']['tiers'] taker = tiers['taker'][1] maker = tiers['maker'][1] for i in range(0, len(tiers['taker'])): if tradedVolume >= tiers['taker'][i][0]: taker = tiers['taker'][i][1] for i in range(0, len(tiers['maker'])): if tradedVolume >= tiers['maker'][i][0]: maker = tiers['maker'][i][1] return { 'info': response, 'maker': maker, 'taker': taker, } async def fetch_order_book(self, symbol, limit=None, params={}): await self.load_markets() market = self.market(symbol) if market['darkpool']: raise ExchangeError(self.id + ' does not provide an order book for darkpool symbol ' + symbol) request = { 'pair': market['id'], } if limit is not None: request['count'] = limit # 100 response = await self.publicGetDepth(self.extend(request, params)) orderbook = response['result'][market['id']] return self.parse_order_book(orderbook) def parse_ticker(self, ticker, market=None): timestamp = self.milliseconds() symbol = None if market: symbol = market['symbol'] baseVolume = float(ticker['v'][1]) vwap = float(ticker['p'][1]) quoteVolume = None if baseVolume is not None and vwap is not None: quoteVolume = baseVolume * vwap last = float(ticker['c'][0]) return { 'symbol': symbol, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'high': float(ticker['h'][1]), 'low': float(ticker['l'][1]), 'bid': float(ticker['b'][0]), 'bidVolume': None, 'ask': float(ticker['a'][0]), 'askVolume': None, 'vwap': vwap, 'open': self.safe_float(ticker, 'o'), 'close': last, 'last': last, 'previousClose': None, 'change': None, 'percentage': None, 'average': None, 'baseVolume': baseVolume, 'quoteVolume': quoteVolume, 'info': ticker, } async def fetch_tickers(self, symbols=None, params={}): await self.load_markets() pairs = [] for s in range(0, len(self.symbols)): symbol = self.symbols[s] market = self.markets[symbol] if market['active']: if not market['darkpool']: pairs.append(market['id']) filter = ','.join(pairs) response = await self.publicGetTicker(self.extend({ 'pair': filter, }, params)) tickers = response['result'] ids = list(tickers.keys()) result = {} for i in range(0, len(ids)): id = ids[i] market = self.markets_by_id[id] symbol = market['symbol'] ticker = tickers[id] result[symbol] = self.parse_ticker(ticker, market) return result async def fetch_ticker(self, symbol, params={}): await self.load_markets() darkpool = symbol.find('.d') >= 0 if darkpool: raise ExchangeError(self.id + ' does not provide a ticker for darkpool symbol ' + symbol) market = self.market(symbol) response = await self.publicGetTicker(self.extend({ 'pair': market['id'], }, params)) ticker = response['result'][market['id']] return self.parse_ticker(ticker, market) def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None): return [ ohlcv[0] * 1000, float(ohlcv[1]), float(ohlcv[2]), float(ohlcv[3]), float(ohlcv[4]), float(ohlcv[6]), ] async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}): await self.load_markets() market = self.market(symbol) request = { 'pair': market['id'], 'interval': self.timeframes[timeframe], } if since is not None: request['since'] = int((since - 1) / 1000) response = await self.publicGetOHLC(self.extend(request, params)) ohlcvs = response['result'][market['id']] return self.parse_ohlcvs(ohlcvs, market, timeframe, since, limit) def parse_ledger_entry_type(self, type): types = { 'trade': 'trade', 'withdrawal': 'transaction', 'deposit': 'transaction', 'transfer': 'transfer', 'margin': 'margin', } return self.safe_string(types, type, type) def parse_ledger_entry(self, item, currency=None): # {'LTFK7F-N2CUX-PNY4SX': { refid: "TSJTGT-DT7WN-GPPQMJ", # time: 1520102320.555, # type: "trade", # aclass: "currency", # asset: "XETH", # amount: "0.1087194600", # fee: "0.0000000000", # balance: "0.2855851000" }, ...} id = self.safe_string(item, 'id') direction = None account = None referenceId = self.safe_string(item, 'refid') referenceAccount = None type = self.parse_ledger_entry_type(self.safe_string(item, 'type')) code = self.safeCurrencyCode(item, 'asset', currency) amount = self.safe_float(item, 'amount') if amount < 0: direction = 'out' amount = abs(amount) else: direction = 'in' time = self.safe_float(item, 'time') timestamp = None datetime = None if time is not None: timestamp = int(time * 1000) datetime = self.iso8601(timestamp) fee = { 'cost': self.safe_float(item, 'fee'), 'currency': code, } before = None after = self.safe_float(item, 'balance') return { 'info': item, 'id': id, 'direction': direction, 'account': account, 'referenceId': referenceId, 'referenceAccount': referenceAccount, 'type': type, 'currency': code, 'amount': amount, 'before': before, 'after': after, 'timestamp': timestamp, 'datetime': datetime, 'fee': fee, } async def fetch_ledger(self, code=None, since=None, limit=None, params={}): # https://www.kraken.com/features/api#get-ledgers-info await self.load_markets() request = {} currency = None if code is not None: currency = self.currency(code) request['asset'] = currency['id'] if since is not None: request['start'] = int(since / 1000) response = await self.privatePostLedgers(self.extend(request, params)) # { error: [], # result: {ledger: {'LPUAIB-TS774-UKHP7X': { refid: "A2B4HBV-L4MDIE-JU4N3N", # time: 1520103488.314, # type: "withdrawal", # aclass: "currency", # asset: "XETH", # amount: "-0.2805800000", # fee: "0.0050000000", # balance: "0.0000051000" }, result = self.safe_value(response, 'result', {}) ledger = self.safe_value(result, 'ledger', {}) keys = list(ledger.keys()) items = [] for i in range(0, len(keys)): key = keys[i] value = ledger[key] value['id'] = key items.append(value) return self.parse_ledger(items, currency, since, limit) async def fetch_ledger_entries_by_ids(self, ids, code=None, params={}): # https://www.kraken.com/features/api#query-ledgers await self.load_markets() ids = ','.join(ids) request = self.extend({ 'id': ids, }, params) response = await self.privatePostQueryLedgers(request) # { error: [], # result: {'LPUAIB-TS774-UKHP7X': { refid: "A2B4HBV-L4MDIE-JU4N3N", # time: 1520103488.314, # type: "withdrawal", # aclass: "currency", # asset: "XETH", # amount: "-0.2805800000", # fee: "0.0050000000", # balance: "0.0000051000" }} } result = response['result'] keys = list(result.keys()) items = [] for i in range(0, len(keys)): key = keys[i] value = result[key] value['id'] = key items.append(value) return self.parse_ledger(items) async def fetch_ledger_entry(self, id, code=None, params={}): items = await self.fetchLedgerEntrysByIds([id], code, params) return items[0] def parse_trade(self, trade, market=None): timestamp = None side = None type = None price = None amount = None id = None order = None fee = None marketId = self.safe_string(trade, 'pair') foundMarket = self.find_market_by_altname_or_id(marketId) symbol = None if foundMarket is not None: market = foundMarket elif marketId is not None: # delisted market ids go here market = self.get_delisted_market_by_id(marketId) if market is not None: symbol = market['symbol'] if 'ordertxid' in trade: order = trade['ordertxid'] id = self.safe_string_2(trade, 'id', 'postxid') timestamp = int(trade['time'] * 1000) side = trade['type'] type = trade['ordertype'] price = self.safe_float(trade, 'price') amount = self.safe_float(trade, 'vol') if 'fee' in trade: currency = None if market: currency = market['quote'] fee = { 'cost': self.safe_float(trade, 'fee'), 'currency': currency, } else: timestamp = int(trade[2] * 1000) side = 'sell' if (trade[3] == 's') else 'buy' type = 'limit' if (trade[4] == 'l') else 'market' price = float(trade[0]) amount = float(trade[1]) tradeLength = len(trade) if tradeLength > 6: id = trade[6] # artificially added as per #1794 return { 'id': id, 'order': order, 'info': trade, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'symbol': symbol, 'type': type, 'side': side, 'price': price, 'amount': amount, 'cost': price * amount, 'fee': fee, } async def fetch_trades(self, symbol, since=None, limit=None, params={}): await self.load_markets() market = self.market(symbol) id = market['id'] response = await self.publicGetTrades(self.extend({ 'pair': id, }, params)) # # { # "error": [], # "result": { # "XETHXXBT": [ # ["0.032310","4.28169434",1541390792.763,"s","l",""] # ], # "last": "1541439421200678657" # } # } # result = response['result'] trades = result[id] # trades is a sorted array: last(most recent trade) goes last length = len(trades) if length <= 0: return [] lastTrade = trades[length - 1] lastTradeId = self.safe_string(result, 'last') lastTrade.append(lastTradeId) return self.parse_trades(trades, market, since, limit) async def fetch_balance(self, params={}): await self.load_markets() response = await self.privatePostBalance(params) balances = self.safe_value(response, 'result') if balances is None: raise ExchangeNotAvailable(self.id + ' fetchBalance failed due to a malformed response ' + self.json(response)) result = {'info': balances} currencies = list(balances.keys()) for c in range(0, len(currencies)): currency = currencies[c] code = currency if code in self.currencies_by_id: code = self.currencies_by_id[code]['code'] else: # X-ISO4217-A3 standard currency codes if code[0] == 'X': code = code[1:] elif code[0] == 'Z': code = code[1:] code = self.common_currency_code(code) balance = float(balances[currency]) account = { 'free': balance, 'used': 0.0, 'total': balance, } result[code] = account return self.parse_balance(result) async def create_order(self, symbol, type, side, amount, price=None, params={}): await self.load_markets() market = self.market(symbol) order = { 'pair': market['id'], 'type': side, 'ordertype': type, 'volume': self.amount_to_precision(symbol, amount), } priceIsDefined = (price is not None) marketOrder = (type == 'market') limitOrder = (type == 'limit') shouldIncludePrice = limitOrder or (not marketOrder and priceIsDefined) if shouldIncludePrice: order['price'] = self.price_to_precision(symbol, price) response = await self.privatePostAddOrder(self.extend(order, params)) id = self.safe_value(response['result'], 'txid') if id is not None: if isinstance(id, list): length = len(id) id = id if (length > 1) else id[0] return { 'info': response, 'id': id, } def find_market_by_altname_or_id(self, id): if id in self.marketsByAltname: return self.marketsByAltname[id] elif id in self.markets_by_id: return self.markets_by_id[id] return None def get_delisted_market_by_id(self, id): if id is None: return id market = self.safe_value(self.options['delistedMarketsById'], id) if market is not None: return market baseIdStart = 0 baseIdEnd = 3 quoteIdStart = 3 quoteIdEnd = 6 if len(id) == 8: baseIdEnd = 4 quoteIdStart = 4 quoteIdEnd = 8 elif len(id) == 7: baseIdEnd = 4 quoteIdStart = 4 quoteIdEnd = 7 baseId = id[baseIdStart:baseIdEnd] quoteId = id[quoteIdStart:quoteIdEnd] base = baseId quote = quoteId if len(base) > 3: if (base[0] == 'X') or (base[0] == 'Z'): base = base[1:] if len(quote) > 3: if (quote[0] == 'X') or (quote[0] == 'Z'): quote = quote[1:] base = self.common_currency_code(base) quote = self.common_currency_code(quote) symbol = base + '/' + quote market = { 'symbol': symbol, 'base': base, 'quote': quote, 'baseId': baseId, 'quoteId': quoteId, } self.options['delistedMarketsById'][id] = market return market def parse_order_status(self, status): statuses = { 'pending': 'open', # order pending book entry 'open': 'open', 'closed': 'closed', 'canceled': 'canceled', 'expired': 'expired', } return self.safe_string(statuses, status, status) def parse_order(self, order, market=None): description = order['descr'] side = description['type'] type = description['ordertype'] marketId = self.safe_string(description, 'pair') foundMarket = self.find_market_by_altname_or_id(marketId) symbol = None if foundMarket is not None: market = foundMarket elif marketId is not None: # delisted market ids go here market = self.get_delisted_market_by_id(marketId) timestamp = int(order['opentm'] * 1000) amount = self.safe_float(order, 'vol') filled = self.safe_float(order, 'vol_exec') remaining = amount - filled fee = None cost = self.safe_float(order, 'cost') price = self.safe_float(description, 'price') if (price is None) or (price == 0): price = self.safe_float(description, 'price2') if (price is None) or (price == 0): price = self.safe_float(order, 'price', price) average = self.safe_float(order, 'price') if market is not None: symbol = market['symbol'] if 'fee' in order: flags = order['oflags'] feeCost = self.safe_float(order, 'fee') fee = { 'cost': feeCost, 'rate': None, } if flags.find('fciq') >= 0: fee['currency'] = market['quote'] elif flags.find('fcib') >= 0: fee['currency'] = market['base'] status = self.parse_order_status(self.safe_string(order, 'status')) return { 'id': order['id'], 'info': order, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'lastTradeTimestamp': None, 'status': status, 'symbol': symbol, 'type': type, 'side': side, 'price': price, 'cost': cost, 'amount': amount, 'filled': filled, 'average': average, 'remaining': remaining, 'fee': fee, # 'trades': self.parse_trades(order['trades'], market), } def parse_orders(self, orders, market=None, since=None, limit=None): result = [] ids = list(orders.keys()) for i in range(0, len(ids)): id = ids[i] order = self.extend({'id': id}, orders[id]) result.append(self.parse_order(order, market)) return self.filter_by_since_limit(result, since, limit) async def fetch_order(self, id, symbol=None, params={}): await self.load_markets() response = await self.privatePostQueryOrders(self.extend({ 'trades': True, # whether or not to include trades in output(optional, default False) 'txid': id, # do not comma separate a list of ids - use fetchOrdersByIds instead # 'userref': 'optional', # restrict results to given user reference id(optional) }, params)) orders = response['result'] order = self.parse_order(self.extend({'id': id}, orders[id])) return self.extend({'info': response}, order) async def fetch_orders_by_ids(self, ids, symbol=None, params={}): await self.load_markets() response = await self.privatePostQueryOrders(self.extend({ 'trades': True, # whether or not to include trades in output(optional, default False) 'txid': ','.join(ids), # comma delimited list of transaction ids to query info about(20 maximum) }, params)) result = self.safe_value(response, 'result', {}) orders = [] orderIds = list(result.keys()) for i in range(0, len(orderIds)): id = orderIds[i] item = result[id] order = self.parse_order(self.extend({'id': id}, item)) orders.append(order) return orders async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}): await self.load_markets() request = { # 'type': 'all', # any position, closed position, closing position, no position # 'trades': False, # whether or not to include trades related to position in output # 'start': 1234567890, # starting unix timestamp or trade tx id of results(exclusive) # 'end': 1234567890, # ending unix timestamp or trade tx id of results(inclusive) # 'ofs' = result offset } if since is not None: request['start'] = int(since / 1000) response = await self.privatePostTradesHistory(self.extend(request, params)) # # { # "error": [], # "result": { # "trades": { # "GJ3NYQ-XJRTF-THZABF": { # "ordertxid": "TKH2SE-ZIF5E-CFI7LT", # "postxid": "OEN3VX-M7IF5-JNBJAM", # "pair": "XICNXETH", # "time": 1527213229.4491, # "type": "sell", # "ordertype": "limit", # "price": "0.001612", # "cost": "0.025792", # "fee": "0.000026", # "vol": "16.00000000", # "margin": "0.000000", # "misc": "" # }, # ... # }, # "count": 9760, # }, # } # trades = response['result']['trades'] ids = list(trades.keys()) for i in range(0, len(ids)): trades[ids[i]]['id'] = ids[i] result = self.parse_trades(trades, None, since, limit) if symbol is None: return result return self.filter_by_symbol(result, symbol) async def cancel_order(self, id, symbol=None, params={}): await self.load_markets() response = None try: response = await self.privatePostCancelOrder(self.extend({ 'txid': id, }, params)) except Exception as e: if self.last_http_response: if self.last_http_response.find('EOrder:Unknown order') >= 0: raise OrderNotFound(self.id + ' cancelOrder() error ' + self.last_http_response) raise e return response async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}): await self.load_markets() request = {} if since is not None: request['start'] = int(since / 1000) response = await self.privatePostOpenOrders(self.extend(request, params)) orders = self.parse_orders(response['result']['open'], None, since, limit) if symbol is None: return orders return self.filter_by_symbol(orders, symbol) async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}): await self.load_markets() request = {} if since is not None: request['start'] = int(since / 1000) response = await self.privatePostClosedOrders(self.extend(request, params)) orders = self.parse_orders(response['result']['closed'], None, since, limit) if symbol is None: return orders return self.filter_by_symbol(orders, symbol) async def fetch_deposit_methods(self, code, params={}): await self.load_markets() currency = self.currency(code) response = await self.privatePostDepositMethods(self.extend({ 'asset': currency['id'], }, params)) return response['result'] def parse_transaction_status(self, status): # IFEX transaction states statuses = { 'Initial': 'pending', 'Pending': 'pending', 'Success': 'ok', 'Settled': 'ok', 'Failure': 'failed', 'Partial': 'ok', } return self.safe_string(statuses, status, status) def parse_transaction(self, transaction, currency=None): # # fetchDeposits # # {method: "Ether(Hex)", # aclass: "currency", # asset: "XETH", # refid: "Q2CANKL-LBFVEE-U4Y2WQ", # txid: "0x57fd704dab1a73c20e24c8696099b695d596924b401b261513cfdab23…", # info: "0x615f9ba7a9575b0ab4d571b2b36b1b324bd83290", # amount: "7.9999257900", # fee: "0.0000000000", # time: 1529223212, # status: "Success" } # # fetchWithdrawals # # {method: "Ether", # aclass: "currency", # asset: "XETH", # refid: "A2BF34S-O7LBNQ-UE4Y4O", # txid: "0x288b83c6b0904d8400ef44e1c9e2187b5c8f7ea3d838222d53f701a15b5c274d", # info: "0x7cb275a5e07ba943fee972e165d80daa67cb2dd0", # amount: "9.9950000000", # fee: "0.0050000000", # time: 1530481750, # status: "Success" } # id = self.safe_string(transaction, 'refid') txid = self.safe_string(transaction, 'txid') timestamp = self.safe_integer(transaction, 'time') if timestamp is not None: timestamp = timestamp * 1000 code = None currencyId = self.safe_string(transaction, 'asset') currency = self.safe_value(self.currencies_by_id, currencyId) if currency is not None: code = currency['code'] else: code = self.common_currency_code(currencyId) address = self.safe_string(transaction, 'info') amount = self.safe_float(transaction, 'amount') status = self.parse_transaction_status(self.safe_string(transaction, 'status')) type = self.safe_string(transaction, 'type') # injected from the outside feeCost = self.safe_float(transaction, 'fee') if feeCost is None: if type == 'deposit': feeCost = 0 return { 'info': transaction, 'id': id, 'currency': code, 'amount': amount, 'address': address, 'tag': None, 'status': status, 'type': type, 'updated': None, 'txid': txid, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'fee': { 'currency': code, 'cost': feeCost, }, } def parse_transactions_by_type(self, type, transactions, code=None, since=None, limit=None): result = [] for i in range(0, len(transactions)): transaction = self.parse_transaction(self.extend({ 'type': type, }, transactions[i])) result.append(transaction) return self.filterByCurrencySinceLimit(result, code, since, limit) async def fetch_deposits(self, code=None, since=None, limit=None, params={}): await self.load_markets() # https://www.kraken.com/en-us/help/api#deposit-status if code is None: raise ArgumentsRequired(self.id + ' fetchDeposits requires a currency code argument') currency = self.currency(code) request = { 'asset': currency['id'], } response = await self.privatePostDepositStatus(self.extend(request, params)) # # { error: [], # result: [{method: "Ether(Hex)", # aclass: "currency", # asset: "XETH", # refid: "Q2CANKL-LBFVEE-U4Y2WQ", # txid: "0x57fd704dab1a73c20e24c8696099b695d596924b401b261513cfdab23…", # info: "0x615f9ba7a9575b0ab4d571b2b36b1b324bd83290", # amount: "7.9999257900", # fee: "0.0000000000", # time: 1529223212, # status: "Success" }]} # return self.parse_transactions_by_type('deposit', response['result'], code, since, limit) async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}): await self.load_markets() # https://www.kraken.com/en-us/help/api#withdraw-status if code is None: raise ArgumentsRequired(self.id + ' fetchWithdrawals requires a currency code argument') currency = self.currency(code) request = { 'asset': currency['id'], } response = await self.privatePostWithdrawStatus(self.extend(request, params)) # # { error: [], # result: [{method: "Ether", # aclass: "currency", # asset: "XETH", # refid: "A2BF34S-O7LBNQ-UE4Y4O", # txid: "0x298c83c7b0904d8400ef43e1c9e2287b518f7ea3d838822d53f704a1565c274d", # info: "0x7cb275a5e07ba943fee972e165d80daa67cb2dd0", # amount: "9.9950000000", # fee: "0.0050000000", # time: 1530481750, # status: "Success" }]} # return self.parse_transactions_by_type('withdrawal', response['result'], code, since, limit) async def create_deposit_address(self, code, params={}): request = { 'new': 'true', } response = await self.fetch_deposit_address(code, self.extend(request, params)) address = self.safe_string(response, 'address') self.check_address(address) return { 'currency': code, 'address': address, 'info': response, } async def fetch_deposit_address(self, code, params={}): await self.load_markets() currency = self.currency(code) # eslint-disable-next-line quotes method = self.safe_string(params, 'method') if method is None: if self.options['cacheDepositMethodsOnFetchDepositAddress']: # cache depositMethods if not(code in list(self.options['depositMethods'].keys())): self.options['depositMethods'][code] = await self.fetch_deposit_methods(code) method = self.options['depositMethods'][code][0]['method'] else: raise ExchangeError(self.id + ' fetchDepositAddress() requires an extra `method` parameter. Use fetchDepositMethods("' + code + '") to get a list of available deposit methods or enable the exchange property .options["cacheDepositMethodsOnFetchDepositAddress"] = True') request = { 'asset': currency['id'], 'method': method, } response = await self.privatePostDepositAddresses(self.extend(request, params)) # overwrite methods result = response['result'] numResults = len(result) if numResults < 1: raise InvalidAddress(self.id + ' privatePostDepositAddresses() returned no addresses') address = self.safe_string(result[0], 'address') tag = self.safe_string_2(result[0], 'tag', 'memo') self.check_address(address) return { 'currency': code, 'address': address, 'tag': tag, 'info': response, } async def withdraw(self, code, amount, address, tag=None, params={}): self.check_address(address) if 'key' in params: await self.load_markets() currency = self.currency(code) response = await self.privatePostWithdraw(self.extend({ 'asset': currency['id'], 'amount': amount, # 'address': address, # they don't allow withdrawals to direct addresses }, params)) return { 'info': response, 'id': response['result'], } raise ExchangeError(self.id + " withdraw requires a 'key' parameter(withdrawal key name, as set up on your account)") def sign(self, path, api='public', method='GET', params={}, headers=None, body=None): url = '/' + self.version + '/' + api + '/' + path if api == 'public': if params: url += '?' + self.urlencode(params) elif api == 'private': self.check_required_credentials() nonce = str(self.nonce()) body = self.urlencode(self.extend({'nonce': nonce}, params)) auth = self.encode(nonce + body) hash = self.hash(auth, 'sha256', 'binary') binary = self.encode(url) binhash = self.binary_concat(binary, hash) secret = base64.b64decode(self.secret) signature = self.hmac(binhash, secret, hashlib.sha512, 'base64') headers = { 'API-Key': self.apiKey, 'API-Sign': self.decode(signature), 'Content-Type': 'application/x-www-form-urlencoded', } else: url = '/' + path url = self.urls['api'][api] + url return {'url': url, 'method': method, 'body': body, 'headers': headers} def nonce(self): return self.milliseconds() def handle_errors(self, code, reason, url, method, headers, body, response): if code == 520: raise ExchangeNotAvailable(self.id + ' ' + str(code) + ' ' + reason) if body.find('Invalid order') >= 0: raise InvalidOrder(self.id + ' ' + body) if body.find('Invalid nonce') >= 0: raise InvalidNonce(self.id + ' ' + body) if body.find('Insufficient funds') >= 0: raise InsufficientFunds(self.id + ' ' + body) if body.find('Cancel pending') >= 0: raise CancelPending(self.id + ' ' + body) if body.find('Invalid arguments:volume') >= 0: raise InvalidOrder(self.id + ' ' + body) if body[0] == '{': if not isinstance(response, basestring): if 'error' in response: numErrors = len(response['error']) if numErrors: message = self.id + ' ' + self.json(response) for i in range(0, len(response['error'])): if response['error'][i] in self.exceptions: raise self.exceptions[response['error'][i]](message) raise ExchangeError(message)
40.397692
284
0.478969
acef9c8688eab69d071dffbecb027c3555dc9e1b
1,552
py
Python
setup.py
hbcarlos/lsp_parser
b9549f74a2d74a1fea43a5c4895ab2bde560381b
[ "BSD-3-Clause" ]
null
null
null
setup.py
hbcarlos/lsp_parser
b9549f74a2d74a1fea43a5c4895ab2bde560381b
[ "BSD-3-Clause" ]
null
null
null
setup.py
hbcarlos/lsp_parser
b9549f74a2d74a1fea43a5c4895ab2bde560381b
[ "BSD-3-Clause" ]
null
null
null
""" lsp_parser setup """ import setuptools from pathlib import Path from jupyter_packaging import ( get_version, get_data_files ) HERE = Path(__file__).parent.resolve() name = "lsp_parser" version = get_version((HERE / name / '_version.py')) license = (HERE / "LICENSE").read_text() long_description = (HERE / "README.md").read_text() data_files_spec = [ ("etc/jupyter/jupyter_server_config.d", "jupyter-config/server-config", "lsp_parser.json"), # For backward compatibility with notebook server ("etc/jupyter/jupyter_notebook_config.d", "jupyter-config/nb-config", "lsp_parser.json"), ] setup_args = dict( name=name, version=version, license=license, long_description=long_description, long_description_content_type="text/markdown", packages=setuptools.find_packages(), data_files=get_data_files(data_files_spec), install_requires=["jupyter_server>=1.6,<2"], zip_safe=False, include_package_data=True, python_requires=">=3.6", platforms="Linux, Mac OS X, Windows", keywords=["Jupyter", "JupyterLab", "JupyterLab3"], classifiers=[ "License :: OSI Approved :: BSD License", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Framework :: Jupyter", ], ) if __name__ == "__main__": setuptools.setup(**setup_args)
29.283019
95
0.67268
acef9cc78f112e80f65353c3df1e7e085373df91
1,645
py
Python
setup.py
tg-dyna/sphinx-docxbuilder
9a0521b68ed587bc90b589b443239729ee7b9afa
[ "MIT" ]
3
2020-09-03T18:04:31.000Z
2022-02-14T12:33:52.000Z
setup.py
tg-dyna/sphinx-docxbuilder
9a0521b68ed587bc90b589b443239729ee7b9afa
[ "MIT" ]
7
2019-07-08T13:03:59.000Z
2020-06-08T11:55:34.000Z
setup.py
tg-dyna/sphinx-docxbuilder
9a0521b68ed587bc90b589b443239729ee7b9afa
[ "MIT" ]
1
2020-06-02T11:17:58.000Z
2020-06-02T11:17:58.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- import io import os import re import datetime try: from setuptools import find_packages, setup except ImportError: from distutils.core import setup def find_packages(path='.', **kwargs): ret = [] for root, dirs, files in os.walk(path): if '__init__.py' in files: ret.append(re.sub('^[^A-z0-9_]+', '', root.replace('/', '.'))) return ret # Package meta-data. DESCRIPTION = ('An extension for a docx file generation with Sphinx ' '(Fork of https://bitbucket.org/haraisao/sphinx-docxbuilder)') # Import the README and use it as the long-description. # Note: this will only work if 'README.md' is present in your MANIFEST.in file! with io.open(os.path.join('README'), encoding='utf-8') as f: long_description = '\n' + f.read() setup( name='sphinx-docxbuilder', version=datetime.date.today().strftime(r'%Y.%m.%d'), description=DESCRIPTION, long_description=long_description, long_description_content_type='text/markdown', author='Isao Hara', author_email='isao-hara@aist.go.jp', maintainer='Liam Deacon', maintainer_email='liam.m.deacon@gmail.com', python_requires='Python>=2.7', url='https://github.com/Lightslayer/sphinx-docxbuilder', packages=find_packages(exclude=['tests']), license='MIT License', classifiers=[ # Trove classifiers # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers 'Programming Language :: Python', 'Framework :: Sphinx :: Extension', 'Topic :: Documentation :: Sphinx' ] )
31.634615
79
0.651672
acef9d224ab75cb5062c645856b4217f707c7ae2
2,996
py
Python
demo/image-classification/img_classifier.py
Austendeng/PaddleHub
b363eaedaf77d21152920cce652c719278ec809d
[ "Apache-2.0" ]
null
null
null
demo/image-classification/img_classifier.py
Austendeng/PaddleHub
b363eaedaf77d21152920cce652c719278ec809d
[ "Apache-2.0" ]
null
null
null
demo/image-classification/img_classifier.py
Austendeng/PaddleHub
b363eaedaf77d21152920cce652c719278ec809d
[ "Apache-2.0" ]
null
null
null
import argparse import os import paddle.fluid as fluid import paddlehub as hub import numpy as np # yapf: disable parser = argparse.ArgumentParser(__doc__) parser.add_argument("--num_epoch", type=int, default=1, help="Number of epoches for fine-tuning.") parser.add_argument("--use_gpu", type=bool, default=False, help="Whether use GPU for fine-tuning.") parser.add_argument("--checkpoint_dir", type=str, default="paddlehub_finetune_ckpt", help="Path to save log data.") parser.add_argument("--batch_size", type=int, default=16, help="Total examples' number in batch for training.") parser.add_argument("--module", type=str, default="resnet50", help="Module used as feature extractor.") parser.add_argument("--dataset", type=str, default="flowers", help="Dataset to finetune.") # yapf: enable. module_map = { "resnet50": "resnet_v2_50_imagenet", "resnet101": "resnet_v2_101_imagenet", "resnet152": "resnet_v2_152_imagenet", "mobilenet": "mobilenet_v2_imagenet", "nasnet": "nasnet_imagenet", "pnasnet": "pnasnet_imagenet" } def finetune(args): module = hub.Module(name=args.module) input_dict, output_dict, program = module.context(trainable=True) if args.dataset.lower() == "flowers": dataset = hub.dataset.Flowers() elif args.dataset.lower() == "dogcat": dataset = hub.dataset.DogCat() elif args.dataset.lower() == "indoor67": dataset = hub.dataset.Indoor67() elif args.dataset.lower() == "food101": dataset = hub.dataset.Food101() elif args.dataset.lower() == "stanforddogs": dataset = hub.dataset.StanfordDogs() else: raise ValueError("%s dataset is not defined" % args.dataset) data_reader = hub.reader.ImageClassificationReader( image_width=module.get_expected_image_width(), image_height=module.get_expected_image_height(), images_mean=module.get_pretrained_images_mean(), images_std=module.get_pretrained_images_std(), dataset=dataset) feature_map = output_dict["feature_map"] task = hub.create_img_cls_task( feature=feature_map, num_classes=dataset.num_labels) img = input_dict["image"] feed_list = [img.name, task.variable('label').name] config = hub.RunConfig( use_cuda=args.use_gpu, num_epoch=args.num_epoch, batch_size=args.batch_size, enable_memory_optim=False, checkpoint_dir=args.checkpoint_dir, strategy=hub.finetune.strategy.DefaultFinetuneStrategy()) hub.finetune_and_eval( task, feed_list=feed_list, data_reader=data_reader, config=config) if __name__ == "__main__": args = parser.parse_args() if not args.module in module_map: hub.logger.error("module should in %s" % module_map.keys()) exit(1) args.module = module_map[args.module] finetune(args)
37.924051
141
0.667223
acef9d5a7cf7afdc48037975e3edfc9ce11d0a2c
1,985
py
Python
server.py
mischmit/pyframe
a0bdd0da242d0164e6525dbba4e72d1c59cda109
[ "MIT" ]
null
null
null
server.py
mischmit/pyframe
a0bdd0da242d0164e6525dbba4e72d1c59cda109
[ "MIT" ]
null
null
null
server.py
mischmit/pyframe
a0bdd0da242d0164e6525dbba4e72d1c59cda109
[ "MIT" ]
null
null
null
import glob import fnmatch import os import sys import random import SimpleHTTPServer from BaseHTTPServer import HTTPServer from BaseHTTPServer import BaseHTTPRequestHandler import datetime import json import time allowedExtensions = ('.jpg', '.jpeg', '.JPEG', 'JPG') ignore_dir = "@eaDir" start_time = datetime.datetime.now() interval = 10 def get_metadata(file): return {"url": file, "time": time.ctime(os.path.getctime(file))} def get_current_image(): curIndex = (datetime.datetime.now() - start_time).seconds / interval % len(files) return files[curIndex] # Scans recursively for .jp?g files and collects def get_files(path): matches = [] for root, dirnames, filenames in os.walk(path): try: dirnames.remove(ignore_dir) except ValueError: pass matches = matches + [os.path.join(root, filename) for filename in filenames if filename.endswith(allowedExtensions)] return [m.replace("\\", "/") for m in matches] if len(sys.argv) == 2: path = sys.argv[1] else: path = "example_images" print os.path.join(os.getcwd(), path) files = get_files(path) print "Everyday im shuffelling ..." random.shuffle(files) print len(files), "Files" class TimedHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def do_GET(self): if self.path == "/metadata.json": self.send_response(200) self.end_headers() current_image = get_current_image() metadata = get_metadata(current_image) metadata["duration"] = interval self.wfile.write(json.dumps(metadata)) elif self.path == "/": self.send_response(200) self.end_headers() current_image = get_current_image() template = open("template.html").read() self.wfile.write(template) else: SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self) def serve_forever(): PORT = 8001 httpd = HTTPServer(("localhost", PORT), TimedHTTPRequestHandler) print "serving at port", PORT httpd.serve_forever() print start_time def main(): serve_forever() if __name__ == '__main__': main()
25.448718
118
0.736524
acef9daac4a7913491a1a1a4b6174ef8a8897243
45,571
py
Python
tencentcloud/vm/v20201229/models.py
PlasticMem/tencentcloud-sdk-python
666db85623d51d640a165907a19aef5fba53b38d
[ "Apache-2.0" ]
465
2018-04-27T09:54:59.000Z
2022-03-29T02:18:01.000Z
tencentcloud/vm/v20201229/models.py
PlasticMem/tencentcloud-sdk-python
666db85623d51d640a165907a19aef5fba53b38d
[ "Apache-2.0" ]
91
2018-04-27T09:48:11.000Z
2022-03-12T08:04:04.000Z
tencentcloud/vm/v20201229/models.py
PlasticMem/tencentcloud-sdk-python
666db85623d51d640a165907a19aef5fba53b38d
[ "Apache-2.0" ]
232
2018-05-02T08:02:46.000Z
2022-03-30T08:02:48.000Z
# -*- coding: utf8 -*- # Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from tencentcloud.common.abstract_model import AbstractModel class AudioResult(AbstractModel): """音频审核输出参数 """ def __init__(self): r""" :param HitFlag: 该字段用于返回审核内容是否命中审核模型;取值:0(**未命中**)、1(**命中**)。 注意:此字段可能返回 null,表示取不到有效值。 :type HitFlag: int :param Label: 该字段用于返回检测结果所对应的恶意标签。<br>返回值:**Normal**:正常,**Porn**:色情,**Abuse**:谩骂,**Ad**:广告,**Custom**:自定义违规;以及其他令人反感、不安全或不适宜的内容类型。 注意:此字段可能返回 null,表示取不到有效值。 :type Label: str :param Suggestion: 该字段用于返回后续操作建议。当您获取到判定结果后,返回值表示具体的后续建议操作。<br> 返回值:**Block**:建议屏蔽,**Review** :建议人工复审,**Pass**:建议通过 注意:此字段可能返回 null,表示取不到有效值。 :type Suggestion: str :param Score: 该字段用于返回当前标签下的置信度,取值范围:0(**置信度最低**)-100(**置信度最高** ),越高代表文本越有可能属于当前返回的标签;如:*色情 99*,则表明该文本非常有可能属于色情内容。 注意:此字段可能返回 null,表示取不到有效值。 :type Score: int :param Text: 该字段用于返回音频文件经ASR识别后的文本信息。最长可识别**5小时**的音频文件,若超出时长限制,接口将会报错。 注意:此字段可能返回 null,表示取不到有效值。 :type Text: str :param Url: 该字段用于返回音频片段存储的链接地址,该地址有效期为1天。 注意:此字段可能返回 null,表示取不到有效值。 :type Url: str :param Duration: 该字段用于返回音频文件的时长,单位为秒。 注意:此字段可能返回 null,表示取不到有效值。 :type Duration: str :param Extra: 该字段用于返回输入参数中的额外附加信息(Extra),如未配置则默认返回值为空。<br>备注:不同客户或Biztype下返回信息不同,如需配置该字段请提交工单咨询或联系售后专员处理。 注意:此字段可能返回 null,表示取不到有效值。 :type Extra: str :param TextResults: 该字段用于返回音频文件经ASR识别后产生的文本的详细审核结果。具体结果内容请参见AudioResultDetailLanguageResult数据结构的细节描述。 注意:此字段可能返回 null,表示取不到有效值。 :type TextResults: list of AudioResultDetailTextResult :param MoanResults: 该字段用于返回音频文件呻吟检测的详细审核结果。具体结果内容请参见AudioResultDetailMoanResult数据结构的细节描述。 注意:此字段可能返回 null,表示取不到有效值。 :type MoanResults: list of AudioResultDetailMoanResult :param LanguageResults: 该字段用于返回音频小语种检测的详细审核结果。具体结果内容请参见AudioResultDetailLanguageResult数据结构的细节描述。 注意:此字段可能返回 null,表示取不到有效值。 :type LanguageResults: list of AudioResultDetailLanguageResult """ self.HitFlag = None self.Label = None self.Suggestion = None self.Score = None self.Text = None self.Url = None self.Duration = None self.Extra = None self.TextResults = None self.MoanResults = None self.LanguageResults = None def _deserialize(self, params): self.HitFlag = params.get("HitFlag") self.Label = params.get("Label") self.Suggestion = params.get("Suggestion") self.Score = params.get("Score") self.Text = params.get("Text") self.Url = params.get("Url") self.Duration = params.get("Duration") self.Extra = params.get("Extra") if params.get("TextResults") is not None: self.TextResults = [] for item in params.get("TextResults"): obj = AudioResultDetailTextResult() obj._deserialize(item) self.TextResults.append(obj) if params.get("MoanResults") is not None: self.MoanResults = [] for item in params.get("MoanResults"): obj = AudioResultDetailMoanResult() obj._deserialize(item) self.MoanResults.append(obj) if params.get("LanguageResults") is not None: self.LanguageResults = [] for item in params.get("LanguageResults"): obj = AudioResultDetailLanguageResult() obj._deserialize(item) self.LanguageResults.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class AudioResultDetailLanguageResult(AbstractModel): """音频语言种类检测结果 """ def __init__(self): r""" :param Label: 该字段用于返回对应的语言种类信息。 注意:此字段可能返回 null,表示取不到有效值。 :type Label: str :param Score: 该参数用于返回当前标签下的置信度,取值范围:0(**置信度最低**)-100(**置信度最高**),越高代表音频越有可能属于当前返回的语种标签; 注意:此字段可能返回 null,表示取不到有效值。 :type Score: int :param StartTime: 该参数用于返回对应语种标签的片段在音频文件内的开始时间,单位为毫秒。 注意:此字段可能返回 null,表示取不到有效值。 :type StartTime: float :param EndTime: 该参数用于返回对应语种标签的片段在音频文件内的结束时间,单位为毫秒。 注意:此字段可能返回 null,表示取不到有效值。 :type EndTime: float :param SubLabelCode: *内测中,敬请期待* 注意:此字段可能返回 null,表示取不到有效值。 :type SubLabelCode: str """ self.Label = None self.Score = None self.StartTime = None self.EndTime = None self.SubLabelCode = None def _deserialize(self, params): self.Label = params.get("Label") self.Score = params.get("Score") self.StartTime = params.get("StartTime") self.EndTime = params.get("EndTime") self.SubLabelCode = params.get("SubLabelCode") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class AudioResultDetailMoanResult(AbstractModel): """音频呻吟审核结果 """ def __init__(self): r""" :param Label: 该字段用于返回检测结果需要检测的内容类型,此处固定为**Moan**(呻吟)以调用呻吟检测功能。 注意:此字段可能返回 null,表示取不到有效值。 :type Label: str :param Score: 该字段用于返回呻吟检测的置信度,取值范围:0(**置信度最低**)-100(**置信度最高**),越高代表音频越有可能属于呻吟内容。 :type Score: int :param StartTime: 该字段用于返回对应呻吟标签的片段在音频文件内的开始时间,单位为毫秒。 :type StartTime: float :param EndTime: 该字段用于返回对应呻吟标签的片段在音频文件内的结束时间,单位为毫秒。 :type EndTime: float :param SubLabelCode: *内测中,敬请期待* :type SubLabelCode: str """ self.Label = None self.Score = None self.StartTime = None self.EndTime = None self.SubLabelCode = None def _deserialize(self, params): self.Label = params.get("Label") self.Score = params.get("Score") self.StartTime = params.get("StartTime") self.EndTime = params.get("EndTime") self.SubLabelCode = params.get("SubLabelCode") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class AudioResultDetailTextResult(AbstractModel): """音频ASR文本审核结果 """ def __init__(self): r""" :param Label: 该字段用于返回检测结果所对应的恶意标签。<br>返回值:**Normal**:正常,**Porn**:色情,**Abuse**:谩骂,**Ad**:广告,**Custom**:自定义违规;以及其他令人反感、不安全或不适宜的内容类型。 注意:此字段可能返回 null,表示取不到有效值。 :type Label: str :param Keywords: 该字段用于返回ASR识别出的文本内容命中的关键词信息,用于标注内容违规的具体原因(如:加我微信)。该参数可能会有多个返回值,代表命中的多个关键词;若返回值为空,Score不为空,则代表识别结果所对应的恶意标签(Label)来自于语义模型判断的返回值。 注意:此字段可能返回 null,表示取不到有效值。 :type Keywords: list of str :param LibId: 该字段**仅当Label为Custom:自定义关键词时该参数有效**,用于返回自定义库的ID,以方便自定义库管理和配置。 注意:此字段可能返回 null,表示取不到有效值。 :type LibId: str :param LibName: 该字段**仅当Label为Custom:自定义关键词时该参数有效**,用于返回自定义库的名称,以方便自定义库管理和配置。 注意:此字段可能返回 null,表示取不到有效值。 :type LibName: str :param Score: 该字段用于返回当前标签下的置信度,取值范围:0(**置信度最低**)-100(**置信度最高**),越高代表文本越有可能属于当前返回的标签;如:*色情 99*,则表明该文本非常有可能属于色情内容。 注意:此字段可能返回 null,表示取不到有效值。 :type Score: int :param Suggestion: 该字段用于返回后续操作建议。当您获取到判定结果后,返回值表示具体的后续建议操作。<br> 返回值:**Block**:建议屏蔽,**Review** :建议人工复审,**Pass**:建议通过 注意:此字段可能返回 null,表示取不到有效值。 :type Suggestion: str :param LibType: 该字段用于返回自定义关键词对应的词库类型,取值为**1**(黑白库)和**2**(自定义关键词库),若未配置自定义关键词库,则默认值为1(黑白库匹配)。 注意:此字段可能返回 null,表示取不到有效值。 :type LibType: int """ self.Label = None self.Keywords = None self.LibId = None self.LibName = None self.Score = None self.Suggestion = None self.LibType = None def _deserialize(self, params): self.Label = params.get("Label") self.Keywords = params.get("Keywords") self.LibId = params.get("LibId") self.LibName = params.get("LibName") self.Score = params.get("Score") self.Suggestion = params.get("Suggestion") self.LibType = params.get("LibType") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class AudioSegments(AbstractModel): """用于返回音频片段的审核结果 """ def __init__(self): r""" :param OffsetTime: 该字段用于返回音频片段的开始时间,单位为秒。对于点播文件,该参数代表对应音频相对于完整音轨的偏移时间,如0(代表不偏移),5(音轨开始后5秒),10(音轨开始后10秒);对于直播文件,该参数则返回对应音频片段开始时的Unix时间戳,如:1594650717。 注意:此字段可能返回 null,表示取不到有效值。 :type OffsetTime: str :param Result: 该字段用于返回音频片段的具体审核结果,详细内容敬请参考AudioResult数据结构的描述。 注意:此字段可能返回 null,表示取不到有效值。 :type Result: :class:`tencentcloud.vm.v20201229.models.AudioResult` """ self.OffsetTime = None self.Result = None def _deserialize(self, params): self.OffsetTime = params.get("OffsetTime") if params.get("Result") is not None: self.Result = AudioResult() self.Result._deserialize(params.get("Result")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class BucketInfo(AbstractModel): """文件桶信息 参考腾讯云存储相关说明 https://cloud.tencent.com/document/product/436/44352 """ def __init__(self): r""" :param Bucket: 该字段用于标识腾讯云对象存储的存储桶名称,关于文件桶的详细信息敬请参考 [腾讯云存储相关说明](https://cloud.tencent.com/document/product/436/44352)。 :type Bucket: str :param Region: 该字段用于标识腾讯云对象存储的托管机房的分布地区,对象存储 COS 的数据存放在这些地域的存储桶中。 :type Region: str :param Object: 该字段用于标识腾讯云对象存储的对象Key,对象z作为基本单元被存放在存储桶中;用户可以通过腾讯云控制台、API、SDK 等多种方式管理对象。有关对象的详细描述敬请参阅相应 [产品文档](https://cloud.tencent.com/document/product/436/13324)。 :type Object: str """ self.Bucket = None self.Region = None self.Object = None def _deserialize(self, params): self.Bucket = params.get("Bucket") self.Region = params.get("Region") self.Object = params.get("Object") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CancelTaskRequest(AbstractModel): """CancelTask请求参数结构体 """ def __init__(self): r""" :param TaskId: 该字段表示创建视频审核任务后返回的任务ID(在Results参数中),用于标识需要取消的审核任务。 :type TaskId: str """ self.TaskId = None def _deserialize(self, params): self.TaskId = params.get("TaskId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CancelTaskResponse(AbstractModel): """CancelTask返回参数结构体 """ def __init__(self): r""" :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.RequestId = None def _deserialize(self, params): self.RequestId = params.get("RequestId") class CreateVideoModerationTaskRequest(AbstractModel): """CreateVideoModerationTask请求参数结构体 """ def __init__(self): r""" :param Type: 该参数用于传入审核任务的任务类型,取值:**VIDEO**(点播视频),**LIVE_VIDEO**(直播视频)。 :type Type: str :param Tasks: 该字段表示输入的视频审核任务信息,具体输入内容请参见TaskInput数据结构的详细描述。<br> 备注:最多同时可创建**10个任务**。 :type Tasks: list of TaskInput :param BizType: 该字段表示策略的具体编号,用于接口调度,在内容安全控制台中可配置。若不传入Biztype参数(留空),则代表采用默认的识别策略;传入则会在审核时根据业务场景采取不同的审核策略。<br>备注:Biztype仅为数字、字母与下划线的组合,长度为3-32个字符;不同Biztype关联不同的业务场景与识别能力策略,调用前请确认正确的Biztype。 :type BizType: str :param Seed: 可选参数,该字段表示回调签名的key信息,用于保证数据的安全性。 签名方法为在返回的HTTP头部添加 X-Signature 的字段,值为: seed + body 的 SHA256 编码和Hex字符串,在收到回调数据后,可以根据返回的body,用 **sha256(seed + body)**, 计算出 `X-Signature` 进行验证。<br>具体使用实例可参考 [回调签名示例](https://cloud.tencent.com/document/product/1265/51885)。 :type Seed: str :param CallbackUrl: 可选参数,该字段表示接受审核信息回调的地址,格式为URL链接默认格式。配置成功后,审核过程中产生的违规音视频片段将通过此接口发送。回调返回内容格式请参考 [回调签名示例](https://cloud.tencent.com/document/product/1265/51879#.E7.A4.BA.E4.BE.8B2-.E5.9B.9E.E8.B0.83.E7.AD.BE.E5.90.8D.E7.A4.BA.E4.BE.8B) <br>备注:音频默认截取时长为**15秒**,视频截帧默认为**5秒**截取一张图片;若用户自行配置截取间隔,则按照用户配置返回相应片段。 :type CallbackUrl: str :param Priority: 可选参数,该参数用于传入审核任务的优先级。当您有多个视频审核任务排队时,可以根据这个参数控制排队优先级,用于处理插队等逻辑;该参数**默认值为0**。 :type Priority: int """ self.Type = None self.Tasks = None self.BizType = None self.Seed = None self.CallbackUrl = None self.Priority = None def _deserialize(self, params): self.Type = params.get("Type") if params.get("Tasks") is not None: self.Tasks = [] for item in params.get("Tasks"): obj = TaskInput() obj._deserialize(item) self.Tasks.append(obj) self.BizType = params.get("BizType") self.Seed = params.get("Seed") self.CallbackUrl = params.get("CallbackUrl") self.Priority = params.get("Priority") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateVideoModerationTaskResponse(AbstractModel): """CreateVideoModerationTask返回参数结构体 """ def __init__(self): r""" :param Results: 该字段用于返回任务创建的结果,具体输出内容请参见TaskResult数据结构的详细描述。 注意:此字段可能返回 null,表示取不到有效值。 :type Results: list of TaskResult :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Results = None self.RequestId = None def _deserialize(self, params): if params.get("Results") is not None: self.Results = [] for item in params.get("Results"): obj = TaskResult() obj._deserialize(item) self.Results.append(obj) self.RequestId = params.get("RequestId") class DescribeTaskDetailRequest(AbstractModel): """DescribeTaskDetail请求参数结构体 """ def __init__(self): r""" :param TaskId: 该字段表示创建视频审核任务后返回的任务ID(在Results参数中),用于标识需要查询任务详情的审核任务。 <br>备注:查询接口单次最大查询量为**20条每次**。 :type TaskId: str :param ShowAllSegments: 该布尔字段表示是否展示全部的视频片段,取值:True(展示全部的视频分片)、False(只展示命中审核规则的视频分片);默认值为False。 :type ShowAllSegments: bool """ self.TaskId = None self.ShowAllSegments = None def _deserialize(self, params): self.TaskId = params.get("TaskId") self.ShowAllSegments = params.get("ShowAllSegments") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeTaskDetailResponse(AbstractModel): """DescribeTaskDetail返回参数结构体 """ def __init__(self): r""" :param TaskId: 该字段用于返回创建视频审核任务后返回的任务ID(在Results参数中),用于标识需要查询任务详情的审核任务。 注意:此字段可能返回 null,表示取不到有效值。 :type TaskId: str :param DataId: 该字段用于返回调用视频审核接口时传入的数据ID参数,方便数据的辨别和管理。 注意:此字段可能返回 null,表示取不到有效值。 :type DataId: str :param BizType: 该字段用于返回调用视频审核接口时传入的BizType参数,方便数据的辨别和管理。 注意:此字段可能返回 null,表示取不到有效值。 :type BizType: str :param Name: 该字段用于返回调用视频审核接口时传入的TaskInput参数中的任务名称,方便任务的识别与管理。 注意:此字段可能返回 null,表示取不到有效值。 :type Name: str :param Status: 该字段用于返回所查询内容的任务状态。 <br>取值:**FINISH**(任务已完成)、**PENDING** (任务等待中)、**RUNNING** (任务进行中)、**ERROR** (任务出错)、**CANCELLED** (任务已取消)。 注意:此字段可能返回 null,表示取不到有效值。 :type Status: str :param Type: 该字段用于返回调用视频审核接口时输入的视频审核类型,取值为:**VIDEO**(点播音频)和**LIVE_VIDEO**(直播音频),默认值为VIDEO。 注意:此字段可能返回 null,表示取不到有效值。 :type Type: str :param Suggestion: 该字段用于返回基于恶意标签的后续操作建议。当您获取到判定结果后,返回值表示系统推荐的后续操作;建议您按照业务所需,对不同违规类型与建议值进行处理。<br>返回值:**Block**:建议屏蔽,**Review** :建议人工复审,**Pass**:建议通过 注意:此字段可能返回 null,表示取不到有效值。 :type Suggestion: str :param Labels: 该字段用于返回检测结果所对应的恶意标签。<br>返回值:**Normal**:正常,**Porn**:色情,**Abuse**:谩骂,**Ad**:广告,**Custom**:自定义违规;以及其他令人反感、不安全或不适宜的内容类型。 注意:此字段可能返回 null,表示取不到有效值。 :type Labels: list of TaskLabel :param MediaInfo: 该字段用于返回输入媒体文件的详细信息,包括编解码格式、分片时长等信息。详细内容敬请参考MediaInfo数据结构的描述。 注意:此字段可能返回 null,表示取不到有效值。 :type MediaInfo: :class:`tencentcloud.vm.v20201229.models.MediaInfo` :param InputInfo: 该字段用于返回审核服务的媒体内容信息,主要包括传入文件类型和访问地址。 注意:此字段可能返回 null,表示取不到有效值。 :type InputInfo: :class:`tencentcloud.vm.v20201229.models.InputInfo` :param CreatedAt: 该字段用于返回被查询任务创建的时间,格式采用 ISO 8601标准。 注意:此字段可能返回 null,表示取不到有效值。 :type CreatedAt: str :param UpdatedAt: 该字段用于返回被查询任务最后更新时间,格式采用 ISO 8601标准。 注意:此字段可能返回 null,表示取不到有效值。 :type UpdatedAt: str :param ImageSegments: 该字段用于返回视频中截帧审核的结果,详细返回内容敬请参考ImageSegments数据结构的描述。<br>备注:数据有效期为24小时,如需要延长存储时间,请在已配置的COS储存桶中设置。 注意:此字段可能返回 null,表示取不到有效值。 :type ImageSegments: list of ImageSegments :param AudioSegments: 该字段用于返回视频中音频审核的结果,详细返回内容敬请参考AudioSegments数据结构的描述。<br>备注:数据有效期为24小时,如需要延长存储时间,请在已配置的COS储存桶中设置。 注意:此字段可能返回 null,表示取不到有效值。 :type AudioSegments: list of AudioSegments :param ErrorType: 当任务状态为Error时,返回对应错误的类型,取值:**DECODE_ERROR**: 解码失败。(输入资源中可能包含无法解码的视频) **URL_ERROR**:下载地址验证失败。 **TIMEOUT_ERROR**:处理超时。任务状态非Error时默认返回为空。 注意:此字段可能返回 null,表示取不到有效值。 :type ErrorType: str :param ErrorDescription: 当任务状态为Error时,该字段用于返回对应错误的详细描述,任务状态非Error时默认返回为空。 注意:此字段可能返回 null,表示取不到有效值。 :type ErrorDescription: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.TaskId = None self.DataId = None self.BizType = None self.Name = None self.Status = None self.Type = None self.Suggestion = None self.Labels = None self.MediaInfo = None self.InputInfo = None self.CreatedAt = None self.UpdatedAt = None self.ImageSegments = None self.AudioSegments = None self.ErrorType = None self.ErrorDescription = None self.RequestId = None def _deserialize(self, params): self.TaskId = params.get("TaskId") self.DataId = params.get("DataId") self.BizType = params.get("BizType") self.Name = params.get("Name") self.Status = params.get("Status") self.Type = params.get("Type") self.Suggestion = params.get("Suggestion") if params.get("Labels") is not None: self.Labels = [] for item in params.get("Labels"): obj = TaskLabel() obj._deserialize(item) self.Labels.append(obj) if params.get("MediaInfo") is not None: self.MediaInfo = MediaInfo() self.MediaInfo._deserialize(params.get("MediaInfo")) if params.get("InputInfo") is not None: self.InputInfo = InputInfo() self.InputInfo._deserialize(params.get("InputInfo")) self.CreatedAt = params.get("CreatedAt") self.UpdatedAt = params.get("UpdatedAt") if params.get("ImageSegments") is not None: self.ImageSegments = [] for item in params.get("ImageSegments"): obj = ImageSegments() obj._deserialize(item) self.ImageSegments.append(obj) if params.get("AudioSegments") is not None: self.AudioSegments = [] for item in params.get("AudioSegments"): obj = AudioSegments() obj._deserialize(item) self.AudioSegments.append(obj) self.ErrorType = params.get("ErrorType") self.ErrorDescription = params.get("ErrorDescription") self.RequestId = params.get("RequestId") class DescribeTasksRequest(AbstractModel): """DescribeTasks请求参数结构体 """ def __init__(self): r""" :param Limit: 该参数表示任务列表每页展示的任务条数,**默认值为10**(每页展示10条任务)。 :type Limit: int :param Filter: 该参数表示任务筛选器的输入参数,可根据业务类型、审核文件类型、处理建议及任务状态筛选想要查看的审核任务,具体参数内容请参见TaskFilter数据结构的详细描述。 :type Filter: :class:`tencentcloud.vm.v20201229.models.TaskFilter` :param PageToken: 该参数表示翻页时使用的Token信息,由系统自动生成,并在翻页时向下一个生成的页面传递此参数,以方便快速翻页功能的实现。当到最后一页时,该字段为空。 :type PageToken: str :param StartTime: 该参数表示任务列表的开始时间,格式为ISO8601标准的时间戳。**默认值为最近3天**,若传入该参数,则在这一时间到EndTime之间的任务将会被筛选出来。<br>备注:该参数与Filter共同起到任务筛选作用,二者作用无先后顺序。 :type StartTime: str :param EndTime: 该参数表示任务列表的结束时间,格式为ISO8601标准的时间戳。**默认值为空**,若传入该参数,则在这StartTime到这一时间之间的任务将会被筛选出来。<br>备注:该参数与Filter共同起到任务筛选作用,二者作用无先后顺序。 :type EndTime: str """ self.Limit = None self.Filter = None self.PageToken = None self.StartTime = None self.EndTime = None def _deserialize(self, params): self.Limit = params.get("Limit") if params.get("Filter") is not None: self.Filter = TaskFilter() self.Filter._deserialize(params.get("Filter")) self.PageToken = params.get("PageToken") self.StartTime = params.get("StartTime") self.EndTime = params.get("EndTime") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeTasksResponse(AbstractModel): """DescribeTasks返回参数结构体 """ def __init__(self): r""" :param Total: 该字段用于返回当前查询的任务总量,格式为int字符串。 注意:此字段可能返回 null,表示取不到有效值。 :type Total: str :param Data: 该字段用于返回当前页的任务详细数据,具体输出内容请参见TaskData数据结构的详细描述。 注意:此字段可能返回 null,表示取不到有效值。 :type Data: list of TaskData :param PageToken: 该字段用于返回翻页时使用的Token信息,由系统自动生成,并在翻页时向下一个生成的页面传递此参数,以方便快速翻页功能的实现。当到最后一页时,该字段为空。 注意:此字段可能返回 null,表示取不到有效值。 :type PageToken: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Total = None self.Data = None self.PageToken = None self.RequestId = None def _deserialize(self, params): self.Total = params.get("Total") if params.get("Data") is not None: self.Data = [] for item in params.get("Data"): obj = TaskData() obj._deserialize(item) self.Data.append(obj) self.PageToken = params.get("PageToken") self.RequestId = params.get("RequestId") class ImageResult(AbstractModel): """Result结果详情 """ def __init__(self): r""" :param HitFlag: 该参数用于标识审核内容是否命中恶意标签,取值:0(**未命中**)和1(**命中**)。 注意:此字段可能返回 null,表示取不到有效值。 :type HitFlag: int :param Label: 该字段用于返回检测结果所对应的恶意标签。<br>返回值:**Normal**:正常,**Porn**:色情,**Abuse**:谩骂,**Ad**:广告,**Custom**:自定义违规;以及其他令人反感、不安全或不适宜的内容类型。 注意:此字段可能返回 null,表示取不到有效值。 :type Label: str :param Suggestion: 该字段用于返回后续操作建议。当您获取到判定结果后,返回值表示具体的后续建议操作。<br> 返回值:**Block**:建议屏蔽,**Review** :建议人工复审,**Pass**:建议通过 注意:此字段可能返回 null,表示取不到有效值。 :type Suggestion: str :param Score: 该字段用于返回当前标签下的置信度,取值范围:0(**置信度最低**)-100(**置信度最高** ),越高代表文本越有可能属于当前返回的标签;如:*色情 -性行为 99*,则表明该文本非常有可能属于色情性行为内容。 注意:此字段可能返回 null,表示取不到有效值。 :type Score: int :param Results: 该字段用于返回图像审核结果的子结果,详细内容敬请参考ImageResultResult数据结构的描述。 注意:此字段可能返回 null,表示取不到有效值。 :type Results: list of ImageResultResult :param Url: 该字段用于返回审核结果的访问链接(URL),图片支持PNG、JPG、JPEG、BMP、GIF、WEBP格式。<br>备注:数据**默认有效期为12小时**。如您需要更长时间的保存,请在数据储存的COS桶中配置对应的储存时长。 注意:此字段可能返回 null,表示取不到有效值。 :type Url: str :param Extra: 该字段用于返回输入参数中的额外附加信息(Extra),如未配置则默认返回值为空。<br>备注:不同客户或Biztype下返回信息不同,如需配置该字段请提交工单咨询或联系售后专员处理。 注意:此字段可能返回 null,表示取不到有效值。 :type Extra: str """ self.HitFlag = None self.Label = None self.Suggestion = None self.Score = None self.Results = None self.Url = None self.Extra = None def _deserialize(self, params): self.HitFlag = params.get("HitFlag") self.Label = params.get("Label") self.Suggestion = params.get("Suggestion") self.Score = params.get("Score") if params.get("Results") is not None: self.Results = [] for item in params.get("Results"): obj = ImageResultResult() obj._deserialize(item) self.Results.append(obj) self.Url = params.get("Url") self.Extra = params.get("Extra") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ImageResultResult(AbstractModel): """图片输出结果的子结果 """ def __init__(self): r""" :param Scene: 该字段用于返回检测结果所对应的恶意场景。返回值:**Normal**:正常,**Porn**:色情,**Abuse**:谩骂,**AppLogo**:广告台标,**Custom**:自定义违规,以及其他令人反感、不安全或不适宜的内容类型。 注意:此字段可能返回 null,表示取不到有效值。 :type Scene: str :param HitFlag: 该参数用于标识审核内容是否命中恶意标签,取值:0(**未命中**)和1(**命中**)。 注意:此字段可能返回 null,表示取不到有效值。 :type HitFlag: int :param Suggestion: 该字段用于返回后续操作建议。当您获取到判定结果后,返回值表示具体的后续建议操作。<br> 返回值:**Block**:建议屏蔽,**Review** :建议人工复审,**Pass**:建议通过 注意:此字段可能返回 null,表示取不到有效值。 :type Suggestion: str :param Label: 该字段用于返回检测结果所对应的恶意标签。<br>返回值:**Normal**:正常,**Porn**:色情,**Abuse**:谩骂,**Ad**:广告,**Custom**:自定义违规;以及其他令人反感、不安全或不适宜的内容类型。 注意:此字段可能返回 null,表示取不到有效值。 :type Label: str :param SubLabel: 该字段用于返回恶意标签下对应的子标签的检测结果,如:*Porn-SexBehavior*等子标签。 注意:此字段可能返回 null,表示取不到有效值。 :type SubLabel: str :param Score: 该字段用于返回当前标签下的置信度,取值范围:0(**置信度最低**)-100(**置信度最高** ),越高代表文本越有可能属于当前返回的标签;如:*色情 -性行为 99*,则表明该文本非常有可能属于色情性行为内容。 注意:此字段可能返回 null,表示取不到有效值。 :type Score: int :param Names: 该字段用于返回审核图片在敏感场景下命中的特定对象名称列表。 注意:此字段可能返回 null,表示取不到有效值。 :type Names: list of str :param Text: 该字段用于返回图片OCR文本识别的检测结果,识别**上限在5000字节内**。 注意:此字段可能返回 null,表示取不到有效值。 :type Text: str :param Details: 该字段用于返回图像审核子结果的其他详细信息,如文本位置、自定义库等。详细返回内容敬请参考ImageResultsResultDetail数据结构的描述。 注意:此字段可能返回 null,表示取不到有效值。 :type Details: list of ImageResultsResultDetail """ self.Scene = None self.HitFlag = None self.Suggestion = None self.Label = None self.SubLabel = None self.Score = None self.Names = None self.Text = None self.Details = None def _deserialize(self, params): self.Scene = params.get("Scene") self.HitFlag = params.get("HitFlag") self.Suggestion = params.get("Suggestion") self.Label = params.get("Label") self.SubLabel = params.get("SubLabel") self.Score = params.get("Score") self.Names = params.get("Names") self.Text = params.get("Text") if params.get("Details") is not None: self.Details = [] for item in params.get("Details"): obj = ImageResultsResultDetail() obj._deserialize(item) self.Details.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ImageResultsResultDetail(AbstractModel): """具体场景下的图片识别结果 """ def __init__(self): r""" :param Name: 该字段用于返回调用视频审核接口时传入的TaskInput参数中的任务名称,方便任务的识别与管理。 注意:此字段可能返回 null,表示取不到有效值。 :type Name: str :param Text: 该字段用于返回图片OCR文本识别的检测结果,识别**上限在5000字节内**。 注意:此字段可能返回 null,表示取不到有效值。 :type Text: str :param Location: 该字段用于返回图像审核子结果的详细位置信息,如坐标、大小、旋转角度等。详细返回内容敬请参考ImageResultsResultDetailLocation数据结构的描述。 注意:此字段可能返回 null,表示取不到有效值。 :type Location: :class:`tencentcloud.vm.v20201229.models.ImageResultsResultDetailLocation` :param Label: 该字段用于返回检测结果所对应的恶意标签。<br>返回值:**Normal**:正常,**Porn**:色情,**Abuse**:谩骂,**Ad**:广告,**Custom**:自定义违规;以及其他令人反感、不安全或不适宜的内容类型。 注意:此字段可能返回 null,表示取不到有效值。 :type Label: str :param LibId: 该字段**仅当Label为Custom:自定义关键词时该参数有效**,用于返回自定义库的ID,以方便自定义库管理和配置。 注意:此字段可能返回 null,表示取不到有效值。 :type LibId: str :param LibName: 该字段**仅当Label为Custom:自定义关键词时该参数有效**,用于返回自定义库的名称,以方便自定义库管理和配置。 注意:此字段可能返回 null,表示取不到有效值。 :type LibName: str :param Keywords: 该字段用于返回检测文本命中的关键词信息,用于标注文本违规的具体原因(如:*加我微信*)。该参数可能会有多个返回值,代表命中的多个关键词;如返回值为空且Score不为空,则代表识别结果所对应的恶意标签(Label)是来自于语义模型判断的返回值。 注意:此字段可能返回 null,表示取不到有效值。 :type Keywords: list of str :param Suggestion: 该字段用于返回后续操作建议。当您获取到判定结果后,返回值表示具体的后续建议操作。<br> 返回值:**Block**:建议屏蔽,**Review** :建议人工复审,**Pass**:建议通过 注意:此字段可能返回 null,表示取不到有效值。 :type Suggestion: str :param Score: 该字段用于返回当前标签下的置信度,取值范围:0(**置信度最低**)-100(**置信度最高** ),越高代表文本越有可能属于当前返回的标签;如:*色情 99*,则表明该文本非常有可能属于色情内容。 注意:此字段可能返回 null,表示取不到有效值。 :type Score: int :param SubLabelCode: 该字段用于返回恶意标签下对应的子标签的检测结果,如:*Porn-SexBehavior*等子标签。 注意:此字段可能返回 null,表示取不到有效值。 :type SubLabelCode: str """ self.Name = None self.Text = None self.Location = None self.Label = None self.LibId = None self.LibName = None self.Keywords = None self.Suggestion = None self.Score = None self.SubLabelCode = None def _deserialize(self, params): self.Name = params.get("Name") self.Text = params.get("Text") if params.get("Location") is not None: self.Location = ImageResultsResultDetailLocation() self.Location._deserialize(params.get("Location")) self.Label = params.get("Label") self.LibId = params.get("LibId") self.LibName = params.get("LibName") self.Keywords = params.get("Keywords") self.Suggestion = params.get("Suggestion") self.Score = params.get("Score") self.SubLabelCode = params.get("SubLabelCode") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ImageResultsResultDetailLocation(AbstractModel): """图片详情位置信息 """ def __init__(self): r""" :param X: 该参数用于标识OCR检测框左上角位置的**横坐标**(x)所在的像素位置,结合剩余参数可唯一确定检测框的大小和位置。 注意:此字段可能返回 null,表示取不到有效值。 :type X: float :param Y: 该参数用于标识OCR检测框左上角位置的**纵坐标**(y)所在的像素位置,结合剩余参数可唯一确定检测框的大小和位置。 注意:此字段可能返回 null,表示取不到有效值。 :type Y: float :param Width: 该参数用于标识OCR检测框的宽度(**由左上角出发在x轴向右延伸的长度**)。结合剩余参数可唯一确定检测框的大小和位置。 注意:此字段可能返回 null,表示取不到有效值。 :type Width: int :param Height: 该参数用于标识OCR检测框的高度(**由左上角出发在y轴向下延伸的长度**)。结合剩余参数可唯一确定检测框的大小和位置。 注意:此字段可能返回 null,表示取不到有效值。 :type Height: int :param Rotate: 该参数用于标识OCR检测框的旋转角度,该参数结合X和Y两个坐标参数可唯一确定检测框的具体位置;取值:0-360(**角度制**),方向为**逆时针旋**转。 注意:此字段可能返回 null,表示取不到有效值。 :type Rotate: float """ self.X = None self.Y = None self.Width = None self.Height = None self.Rotate = None def _deserialize(self, params): self.X = params.get("X") self.Y = params.get("Y") self.Width = params.get("Width") self.Height = params.get("Height") self.Rotate = params.get("Rotate") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ImageSegments(AbstractModel): """图片段信息 """ def __init__(self): r""" :param OffsetTime: 该字段用于返回视频片段的截帧时间,单位为秒。对于点播文件,该参数代表对应截取图片相对于视频的偏移时间,如0(代表不偏移),5(视频开始后5秒),10(视频开始后10秒);对于直播文件,该参数则返回对应图片的Unix时间戳,如:1594650717。 :type OffsetTime: str :param Result: 该字段用于返回视频片段的具体截帧审核结果,详细内容敬请参考ImageResult数据结构的描述。 :type Result: :class:`tencentcloud.vm.v20201229.models.ImageResult` """ self.OffsetTime = None self.Result = None def _deserialize(self, params): self.OffsetTime = params.get("OffsetTime") if params.get("Result") is not None: self.Result = ImageResult() self.Result._deserialize(params.get("Result")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class InputInfo(AbstractModel): """输入信息详情 """ def __init__(self): r""" :param Type: 该字段表示文件访问类型,取值为**URL**(资源链接)和**COS** (腾讯云对象存储)。 注意:此字段可能返回 null,表示取不到有效值。 :type Type: str :param Url: 该字段表示文件访问的链接地址,格式为标准URL格式。<br> 备注:当Type为URL时此字段不为空。 注意:此字段可能返回 null,表示取不到有效值。 :type Url: str :param BucketInfo: 该字段表示文件访问的腾讯云存储桶信息。<br> 备注:当Type为COS时此字段不为空。 注意:此字段可能返回 null,表示取不到有效值。 :type BucketInfo: str """ self.Type = None self.Url = None self.BucketInfo = None def _deserialize(self, params): self.Type = params.get("Type") self.Url = params.get("Url") self.BucketInfo = params.get("BucketInfo") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaInfo(AbstractModel): """媒体类型 """ def __init__(self): r""" :param Duration: 该字段用于返回对传入的视频流进行分片的片段时长,单位为秒。**默认值为5秒**,支持用户自定义配置。<br>备注:仅在审核文件为流媒体时生效;此字段返回0则代表未取到有效值。 :type Duration: int """ self.Duration = None def _deserialize(self, params): self.Duration = params.get("Duration") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class StorageInfo(AbstractModel): """数据存储信息 """ def __init__(self): r""" :param Type: 该字段表示文件访问类型,取值为**URL**(资源链接)和**COS** (腾讯云对象存储);该字段应当与传入的访问类型相对应,可用于强校验并方便系统快速识别访问地址;若不传入此参数,则默认值为URL,此时系统将自动判定访问地址类型。 :type Type: str :param Url: 该字段表示文件访问的链接地址,格式为标准URL格式。<br> 备注:当Type为URL时此字段不为空,该参数与BucketInfo参数须传入其中之一 :type Url: str :param BucketInfo: 该字段表示文件访问的腾讯云存储桶信息。<br> 备注:当Type为COS时此字段不为空,该参数与Url参数须传入其中之一。 :type BucketInfo: :class:`tencentcloud.vm.v20201229.models.BucketInfo` """ self.Type = None self.Url = None self.BucketInfo = None def _deserialize(self, params): self.Type = params.get("Type") self.Url = params.get("Url") if params.get("BucketInfo") is not None: self.BucketInfo = BucketInfo() self.BucketInfo._deserialize(params.get("BucketInfo")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TaskData(AbstractModel): """任务数据 """ def __init__(self): r""" :param DataId: 该字段用于返回视频审核任务数据所对应的数据ID,方便后续查询和管理审核任务。 注意:此字段可能返回 null,表示取不到有效值。 :type DataId: str :param TaskId: 该字段用于返回视频审核任务所生成的任务ID,用于标识具体审核任务,方便后续查询和管理。 :type TaskId: str :param Status: 该字段用于返回所查询内容的任务状态。 <br>取值:**FINISH**(任务已完成)、**PENDING** (任务等待中)、**RUNNING** (任务进行中)、**ERROR** (任务出错)、**CANCELLED** (任务已取消)。 :type Status: str :param Name: 该字段用于返回视频审核任务所对应的任务名称,方便后续查询和管理审核任务。 注意:此字段可能返回 null,表示取不到有效值。 :type Name: str :param BizType: 该字段用于返回调用视频审核接口时传入的BizType参数,方便数据的辨别和管理。 注意:此字段可能返回 null,表示取不到有效值。 :type BizType: str :param Type: 该字段用于返回调用音频审核接口时输入的音频审核类型,取值为:**VIDEO**(点播视频)和**LIVE_VIDEO**(直播视频),默认值为VIDEO。 注意:此字段可能返回 null,表示取不到有效值。 :type Type: str :param Suggestion: 该字段用于返回基于恶意标签的后续操作建议。当您获取到判定结果后,返回值表示具体的后续建议操作。<br> 返回值:**Block**:建议屏蔽,**Review** :建议人工复审,**Pass**:建议通过 注意:此字段可能返回 null,表示取不到有效值。 :type Suggestion: str :param Labels: 该字段用于返回检测结果所对应的恶意标签。<br>返回值:**Normal**:正常,**Porn**:色情,**Abuse**:谩骂,**Ad**:广告,**Custom**:自定义违规;以及其他令人反感、不安全或不适宜的内容类型。 :type Labels: list of TaskLabel :param MediaInfo: 该字段用于返回输入媒体文件的详细信息,包括编码格式、分片时长等信息。详细内容敬请参考MediaInfo数据结构的描述。 注意:此字段可能返回 null,表示取不到有效值。 :type MediaInfo: :class:`tencentcloud.vm.v20201229.models.MediaInfo` :param CreatedAt: 该字段用于返回被查询任务创建的时间,格式采用 ISO 8601标准。 :type CreatedAt: str :param UpdatedAt: 该字段用于返回被查询任务最后更新时间,格式采用 ISO 8601标准。 注意:此字段可能返回 null,表示取不到有效值。 :type UpdatedAt: str """ self.DataId = None self.TaskId = None self.Status = None self.Name = None self.BizType = None self.Type = None self.Suggestion = None self.Labels = None self.MediaInfo = None self.CreatedAt = None self.UpdatedAt = None def _deserialize(self, params): self.DataId = params.get("DataId") self.TaskId = params.get("TaskId") self.Status = params.get("Status") self.Name = params.get("Name") self.BizType = params.get("BizType") self.Type = params.get("Type") self.Suggestion = params.get("Suggestion") if params.get("Labels") is not None: self.Labels = [] for item in params.get("Labels"): obj = TaskLabel() obj._deserialize(item) self.Labels.append(obj) if params.get("MediaInfo") is not None: self.MediaInfo = MediaInfo() self.MediaInfo._deserialize(params.get("MediaInfo")) self.CreatedAt = params.get("CreatedAt") self.UpdatedAt = params.get("UpdatedAt") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TaskFilter(AbstractModel): """任务筛选器 """ def __init__(self): r""" :param BizType: 该字段用于传入任务对应的业务类型供筛选器进行筛选。Biztype为策略的具体的编号,用于接口调度,在内容安全控制台中可配置。不同Biztype关联不同的业务场景与审核策略,调用前请确认正确的Biztype。Biztype仅为**数字、字母与下划线的组合**,长度为3-32个字符。<br>备注:在不传入该参数时筛选器默认不筛选业务类型。 注意:此字段可能返回 null,表示取不到有效值。 :type BizType: list of str :param Type: 该字段用于传入视频审核对应的任务类型供筛选器进行筛选,取值为:**VIDEO**(点播视频审核),**AUDIO**(点播音频审核), **LIVE_VIDEO**(直播视频审核), **LIVE_AUDIO**(直播音频审核)。<br>备注:在不传入该参数时筛选器默认不筛选任务类型。 :type Type: str :param Suggestion: 该字段用于传入视频审核对应的建议操作供筛选器进行筛选,取值为:**Block**:建议屏蔽,**Review**:建议人工复审,**Pass**:建议通过。<br>备注:在不传入该参数时筛选器默认不筛选建议操作。 :type Suggestion: str :param TaskStatus: 该字段用于传入审核任务的任务状态供筛选器进行筛选,取值为:**FINISH**(任务已完成)、**PENDING** (任务等待中)、**RUNNING** (任务进行中)、**ERROR** (任务出错)、**CANCELLED** (任务已取消)。<br>备注:在不传入该参数时筛选器默认不筛选任务状态。 :type TaskStatus: str """ self.BizType = None self.Type = None self.Suggestion = None self.TaskStatus = None def _deserialize(self, params): self.BizType = params.get("BizType") self.Type = params.get("Type") self.Suggestion = params.get("Suggestion") self.TaskStatus = params.get("TaskStatus") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TaskInput(AbstractModel): """音视频任务结构 """ def __init__(self): r""" :param DataId: 选填参数,该字段表示您为待检测对象分配的数据ID,传入后可方便您对文件进行标识和管理。<br>取值:由英文字母(大小写均可)、数字及四个特殊符号(_,-,@,#)组成,**长度不超过64个字符**。 :type DataId: str :param Name: 选填参数,该字段表示审核任务所对应的任务名称,方便后续查询和管理审核任务。 :type Name: str :param Input: 必填参数,该字段表示审核文件的访问参数,用于获取审核媒体文件,该参数内包括访问类型和访问地址。 :type Input: :class:`tencentcloud.vm.v20201229.models.StorageInfo` """ self.DataId = None self.Name = None self.Input = None def _deserialize(self, params): self.DataId = params.get("DataId") self.Name = params.get("Name") if params.get("Input") is not None: self.Input = StorageInfo() self.Input._deserialize(params.get("Input")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TaskLabel(AbstractModel): """任务输出标签 """ def __init__(self): r""" :param Label: 该字段用于返回检测结果所对应的恶意标签。<br>返回值:**Normal**:正常,**Porn**:色情,**Abuse**:谩骂,**Ad**:广告,**Custom**:自定义违规;以及其他令人反感、不安全或不适宜的内容类型。 注意:此字段可能返回 null,表示取不到有效值。 :type Label: str :param Suggestion: 该字段用于返回当前标签(Label)下的后续操作建议。当您获取到判定结果后,返回值表示系统推荐的后续操作;建议您按照业务所需,对不同违规类型与建议值进行处理。<br>返回值:**Block**:建议屏蔽,**Review** :建议人工复审,**Pass**:建议通过 注意:此字段可能返回 null,表示取不到有效值。 :type Suggestion: str :param Score: 该字段用于返回当前标签(Label)下的置信度,取值范围:0(**置信度最低**)-100(**置信度最高** ),越高代表文本越有可能属于当前返回的标签;如:*色情 99*,则表明该文本非常有可能属于色情内容;*色情 0*,则表明该文本不属于色情内容。 注意:此字段可能返回 null,表示取不到有效值。 :type Score: int """ self.Label = None self.Suggestion = None self.Score = None def _deserialize(self, params): self.Label = params.get("Label") self.Suggestion = params.get("Suggestion") self.Score = params.get("Score") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TaskResult(AbstractModel): """创建任务时的返回结果 """ def __init__(self): r""" :param DataId: 该字段用于返回创建视频审核任务时在TaskInput结构内传入的DataId,用于标识具体审核任务。 注意:此字段可能返回 null,表示取不到有效值。 :type DataId: str :param TaskId: 该字段用于返回视频审核任务所生成的任务ID,用于标识具体审核任务,方便后续查询和管理。 注意:此字段可能返回 null,表示取不到有效值。 :type TaskId: str :param Code: 该字段用于返回任务创建的状态,如返回OK则代表任务创建成功,其他返回值可参考公共错误码。 注意:此字段可能返回 null,表示取不到有效值。 :type Code: str :param Message: **仅在Code的返回值为错误码时生效**,用于返回错误的详情内容。 注意:此字段可能返回 null,表示取不到有效值。 :type Message: str """ self.DataId = None self.TaskId = None self.Code = None self.Message = None def _deserialize(self, params): self.DataId = params.get("DataId") self.TaskId = params.get("TaskId") self.Code = params.get("Code") self.Message = params.get("Message") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set))
36.196187
314
0.62937
acef9dd6ce4ecf4978be45b5952b77ee12dba8bd
36
py
Python
c2cciutils/scripts/__init__.py
camptocamp/c2cciutils
665263c0b157daefa0386c7508b8b3156cd42fee
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
c2cciutils/scripts/__init__.py
camptocamp/c2cciutils
665263c0b157daefa0386c7508b8b3156cd42fee
[ "BSD-2-Clause-FreeBSD" ]
53
2020-12-01T10:13:19.000Z
2022-03-08T14:23:12.000Z
c2cciutils/scripts/__init__.py
camptocamp/c2cciutils
665263c0b157daefa0386c7508b8b3156cd42fee
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
""" The scripts main functions. """
9
27
0.638889
acef9e2e785180c0acdd21913a2fef860773aadb
2,808
py
Python
src/api/views/api_v1/bp_user.py
chinanala/liuli
cfa3b50aacdb14e4c913b296ac5f4fa0dd180578
[ "Apache-2.0" ]
null
null
null
src/api/views/api_v1/bp_user.py
chinanala/liuli
cfa3b50aacdb14e4c913b296ac5f4fa0dd180578
[ "Apache-2.0" ]
2
2021-12-29T03:41:00.000Z
2021-12-29T05:41:47.000Z
src/api/views/api_v1/bp_user.py
chinanala/liuli
cfa3b50aacdb14e4c913b296ac5f4fa0dd180578
[ "Apache-2.0" ]
null
null
null
""" Created by howie.hu at 2022-04-12. Description: 用户API Changelog: all notable changes to this file will be documented """ import datetime from flask import Blueprint, current_app, request from flask_jwt_extended import create_access_token from src.api.common import ( ResponseCode, ResponseField, ResponseReply, UniResponse, jwt_required, response_handle, ) from src.databases import MongodbBase, mongodb_find from src.utils import LOGGER, md5_encryption bp_user = Blueprint("user", __name__, url_prefix="/user") @bp_user.route("/token_valid", methods=["POST"], strict_slashes=False) @jwt_required() def token_valid(): """验证jwt是否有效 eg: { "username": "liuli" } Returns: Response: 响应类 """ return response_handle(request=request, dict_value=UniResponse.SUCCESS) @bp_user.route("/login", methods=["POST"], strict_slashes=False) def login(): """用户登录接口 eg: { "username": "liuli", "password": "liuli" } Token Demo: "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJmcmVzaCI6ZmFsc2UsImlhdCI6MTYyNzc1MDQ1OCwianRpIjoiNzJjZjZkYzYtZDE5NS00NGRhLTg2NWUtNmNhZmY3MTdkMjMwIiwidHlwZSI6ImFjY2VzcyIsInN1YiI6MTU3Njc5NTY4OTAsIm5iZiI6MTYyNzc1MDQ1OH0.xwUuyTYoXFIymE6RqnEuuteyFbYiMmY72YYtIUMfqNY" Returns: Response: Flask响应类 """ # 获取基本配置 mongodb_base: MongodbBase = current_app.config["mongodb_base"] app_logger: LOGGER = current_app.config["app_logger"] coll = mongodb_base.get_collection(coll_name="liuli_user") # 获取基础数据 post_data: dict = request.json username = post_data.get("username") or "" password = md5_encryption(f"{post_data.get('password')}") user_db_res = mongodb_find( coll_conn=coll, filter_dict={"username": username, "password": password}, return_dict={"_id": 0}, ) user_info_list = user_db_res["info"] if username and password and user_db_res["status"] and len(user_info_list) == 1: # 半年过期一次 259200 expires_delta = datetime.timedelta(minutes=259200) access_token = create_access_token( identity=username, expires_delta=expires_delta ) result = { ResponseField.DATA: {"token": access_token, "username": username}, ResponseField.MESSAGE: ResponseReply.SUCCESS, ResponseField.STATUS: ResponseCode.SUCCESS, } else: result = { ResponseField.DATA: {}, ResponseField.MESSAGE: ResponseReply.USER_LOGIN_ERROR, ResponseField.STATUS: ResponseCode.USER_LOGIN_ERROR, } err_info = f"login failed! request data -> {post_data}, db response info -> {user_db_res}" app_logger.error(err_info) return response_handle(request=request, dict_value=result)
32.275862
258
0.689815
acef9f0672c45393754c4467d893801c598d7a01
15,740
py
Python
grafeas/grafeas_v1/types/vulnerability.py
googleapis/python-grafeas
89d54545ff6c05e223e4b03d311df119da3d94c2
[ "Apache-2.0" ]
9
2020-09-19T17:50:00.000Z
2022-03-24T11:37:27.000Z
grafeas/grafeas_v1/types/vulnerability.py
googleapis/python-grafeas
89d54545ff6c05e223e4b03d311df119da3d94c2
[ "Apache-2.0" ]
58
2020-02-07T01:01:45.000Z
2022-03-22T16:52:19.000Z
grafeas/grafeas_v1/types/vulnerability.py
googleapis/python-grafeas
89d54545ff6c05e223e4b03d311df119da3d94c2
[ "Apache-2.0" ]
6
2020-02-07T00:38:27.000Z
2022-01-29T08:11:14.000Z
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from grafeas.grafeas_v1.types import common from grafeas.grafeas_v1.types import cvss from grafeas.grafeas_v1.types import package __protobuf__ = proto.module( package="grafeas.v1", manifest={"Severity", "VulnerabilityNote", "VulnerabilityOccurrence",}, ) class Severity(proto.Enum): r"""Note provider assigned severity/impact ranking.""" SEVERITY_UNSPECIFIED = 0 MINIMAL = 1 LOW = 2 MEDIUM = 3 HIGH = 4 CRITICAL = 5 class VulnerabilityNote(proto.Message): r"""A security vulnerability that can be found in resources. Attributes: cvss_score (float): The CVSS score of this vulnerability. CVSS score is on a scale of 0 - 10 where 0 indicates low severity and 10 indicates high severity. severity (grafeas.grafeas_v1.types.Severity): The note provider assigned severity of this vulnerability. details (Sequence[grafeas.grafeas_v1.types.VulnerabilityNote.Detail]): Details of all known distros and packages affected by this vulnerability. cvss_v3 (grafeas.grafeas_v1.types.CVSSv3): The full description of the CVSSv3 for this vulnerability. windows_details (Sequence[grafeas.grafeas_v1.types.VulnerabilityNote.WindowsDetail]): Windows details get their own format because the information format and model don't match a normal detail. Specifically Windows updates are done as patches, thus Windows vulnerabilities really are a missing package, rather than a package being at an incorrect version. source_update_time (google.protobuf.timestamp_pb2.Timestamp): The time this information was last changed at the source. This is an upstream timestamp from the underlying information source - e.g. Ubuntu security tracker. """ class Detail(proto.Message): r"""A detail for a distro and package affected by this vulnerability and its associated fix (if one is available). Attributes: severity_name (str): The distro assigned severity of this vulnerability. description (str): A vendor-specific description of this vulnerability. package_type (str): The type of package; whether native or non native (e.g., ruby gems, node.js packages, etc.). affected_cpe_uri (str): Required. The `CPE URI <https://cpe.mitre.org/specification/>`__ this vulnerability affects. affected_package (str): Required. The package this vulnerability affects. affected_version_start (grafeas.grafeas_v1.types.Version): The version number at the start of an interval in which this vulnerability exists. A vulnerability can affect a package between version numbers that are disjoint sets of intervals (example: [1.0.0-1.1.0], [2.4.6-2.4.8] and [4.5.6-4.6.8]) each of which will be represented in its own Detail. If a specific affected version is provided by a vulnerability database, affected_version_start and affected_version_end will be the same in that Detail. affected_version_end (grafeas.grafeas_v1.types.Version): The version number at the end of an interval in which this vulnerability exists. A vulnerability can affect a package between version numbers that are disjoint sets of intervals (example: [1.0.0-1.1.0], [2.4.6-2.4.8] and [4.5.6-4.6.8]) each of which will be represented in its own Detail. If a specific affected version is provided by a vulnerability database, affected_version_start and affected_version_end will be the same in that Detail. fixed_cpe_uri (str): The distro recommended `CPE URI <https://cpe.mitre.org/specification/>`__ to update to that contains a fix for this vulnerability. It is possible for this to be different from the affected_cpe_uri. fixed_package (str): The distro recommended package to update to that contains a fix for this vulnerability. It is possible for this to be different from the affected_package. fixed_version (grafeas.grafeas_v1.types.Version): The distro recommended version to update to that contains a fix for this vulnerability. Setting this to VersionKind.MAXIMUM means no such version is yet available. is_obsolete (bool): Whether this detail is obsolete. Occurrences are expected not to point to obsolete details. source_update_time (google.protobuf.timestamp_pb2.Timestamp): The time this information was last changed at the source. This is an upstream timestamp from the underlying information source - e.g. Ubuntu security tracker. source (str): The source from which the information in this Detail was obtained. vendor (str): The name of the vendor of the product. """ severity_name = proto.Field(proto.STRING, number=1,) description = proto.Field(proto.STRING, number=2,) package_type = proto.Field(proto.STRING, number=3,) affected_cpe_uri = proto.Field(proto.STRING, number=4,) affected_package = proto.Field(proto.STRING, number=5,) affected_version_start = proto.Field( proto.MESSAGE, number=6, message=package.Version, ) affected_version_end = proto.Field( proto.MESSAGE, number=7, message=package.Version, ) fixed_cpe_uri = proto.Field(proto.STRING, number=8,) fixed_package = proto.Field(proto.STRING, number=9,) fixed_version = proto.Field(proto.MESSAGE, number=10, message=package.Version,) is_obsolete = proto.Field(proto.BOOL, number=11,) source_update_time = proto.Field( proto.MESSAGE, number=12, message=timestamp_pb2.Timestamp, ) source = proto.Field(proto.STRING, number=13,) vendor = proto.Field(proto.STRING, number=14,) class WindowsDetail(proto.Message): r""" Attributes: cpe_uri (str): Required. The `CPE URI <https://cpe.mitre.org/specification/>`__ this vulnerability affects. name (str): Required. The name of this vulnerability. description (str): The description of this vulnerability. fixing_kbs (Sequence[grafeas.grafeas_v1.types.VulnerabilityNote.WindowsDetail.KnowledgeBase]): Required. The names of the KBs which have hotfixes to mitigate this vulnerability. Note that there may be multiple hotfixes (and thus multiple KBs) that mitigate a given vulnerability. Currently any listed KBs presence is considered a fix. """ class KnowledgeBase(proto.Message): r""" Attributes: name (str): The KB name (generally of the form KB[0-9]+ (e.g., KB123456)). url (str): A link to the KB in the [Windows update catalog] (https://www.catalog.update.microsoft.com/). """ name = proto.Field(proto.STRING, number=1,) url = proto.Field(proto.STRING, number=2,) cpe_uri = proto.Field(proto.STRING, number=1,) name = proto.Field(proto.STRING, number=2,) description = proto.Field(proto.STRING, number=3,) fixing_kbs = proto.RepeatedField( proto.MESSAGE, number=4, message="VulnerabilityNote.WindowsDetail.KnowledgeBase", ) cvss_score = proto.Field(proto.FLOAT, number=1,) severity = proto.Field(proto.ENUM, number=2, enum="Severity",) details = proto.RepeatedField(proto.MESSAGE, number=3, message=Detail,) cvss_v3 = proto.Field(proto.MESSAGE, number=4, message=cvss.CVSSv3,) windows_details = proto.RepeatedField( proto.MESSAGE, number=5, message=WindowsDetail, ) source_update_time = proto.Field( proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp, ) class VulnerabilityOccurrence(proto.Message): r"""An occurrence of a severity vulnerability on a resource. Attributes: type_ (str): The type of package; whether native or non native (e.g., ruby gems, node.js packages, etc.). severity (grafeas.grafeas_v1.types.Severity): Output only. The note provider assigned severity of this vulnerability. cvss_score (float): Output only. The CVSS score of this vulnerability. CVSS score is on a scale of 0 - 10 where 0 indicates low severity and 10 indicates high severity. cvssv3 (grafeas.grafeas_v1.types.VulnerabilityOccurrence.CVSSV3): The cvss v3 score for the vulnerability. package_issue (Sequence[grafeas.grafeas_v1.types.VulnerabilityOccurrence.PackageIssue]): Required. The set of affected locations and their fixes (if available) within the associated resource. short_description (str): Output only. A one sentence description of this vulnerability. long_description (str): Output only. A detailed description of this vulnerability. related_urls (Sequence[grafeas.grafeas_v1.types.RelatedUrl]): Output only. URLs related to this vulnerability. effective_severity (grafeas.grafeas_v1.types.Severity): The distro assigned severity for this vulnerability when it is available, otherwise this is the note provider assigned severity. When there are multiple PackageIssues for this vulnerability, they can have different effective severities because some might be provided by the distro while others are provided by the language ecosystem for a language pack. For this reason, it is advised to use the effective severity on the PackageIssue level. In the case where multiple PackageIssues have differing effective severities, this field should be the highest severity for any of the PackageIssues. fix_available (bool): Output only. Whether at least one of the affected packages has a fix available. """ class CVSSV3(proto.Message): r"""The CVSS v3 score for this vulnerability. Attributes: base_score (float): The base score for for this vulnerability according to cvss v3. severity (grafeas.grafeas_v1.types.Severity): The severity rating assigned to this vulnerability by vulnerability provider. """ base_score = proto.Field(proto.FLOAT, number=1,) severity = proto.Field(proto.ENUM, number=2, enum="Severity",) class PackageIssue(proto.Message): r"""A detail for a distro and package this vulnerability occurrence was found in and its associated fix (if one is available). Attributes: affected_cpe_uri (str): Required. The `CPE URI <https://cpe.mitre.org/specification/>`__ this vulnerability was found in. affected_package (str): Required. The package this vulnerability was found in. affected_version (grafeas.grafeas_v1.types.Version): Required. The version of the package that is installed on the resource affected by this vulnerability. fixed_cpe_uri (str): The `CPE URI <https://cpe.mitre.org/specification/>`__ this vulnerability was fixed in. It is possible for this to be different from the affected_cpe_uri. fixed_package (str): The package this vulnerability was fixed in. It is possible for this to be different from the affected_package. fixed_version (grafeas.grafeas_v1.types.Version): Required. The version of the package this vulnerability was fixed in. Setting this to VersionKind.MAXIMUM means no fix is yet available. fix_available (bool): Output only. Whether a fix is available for this package. package_type (str): The type of package (e.g. OS, MAVEN, GO). effective_severity (grafeas.grafeas_v1.types.Severity): The distro or language system assigned severity for this vulnerability when that is available and note provider assigned severity when it is not available. """ affected_cpe_uri = proto.Field(proto.STRING, number=1,) affected_package = proto.Field(proto.STRING, number=2,) affected_version = proto.Field( proto.MESSAGE, number=3, message=package.Version, ) fixed_cpe_uri = proto.Field(proto.STRING, number=4,) fixed_package = proto.Field(proto.STRING, number=5,) fixed_version = proto.Field(proto.MESSAGE, number=6, message=package.Version,) fix_available = proto.Field(proto.BOOL, number=7,) package_type = proto.Field(proto.STRING, number=8,) effective_severity = proto.Field(proto.ENUM, number=9, enum="Severity",) type_ = proto.Field(proto.STRING, number=1,) severity = proto.Field(proto.ENUM, number=2, enum="Severity",) cvss_score = proto.Field(proto.FLOAT, number=3,) cvssv3 = proto.Field(proto.MESSAGE, number=10, message=CVSSV3,) package_issue = proto.RepeatedField(proto.MESSAGE, number=4, message=PackageIssue,) short_description = proto.Field(proto.STRING, number=5,) long_description = proto.Field(proto.STRING, number=6,) related_urls = proto.RepeatedField( proto.MESSAGE, number=7, message=common.RelatedUrl, ) effective_severity = proto.Field(proto.ENUM, number=8, enum="Severity",) fix_available = proto.Field(proto.BOOL, number=9,) __all__ = tuple(sorted(__protobuf__.manifest))
44.843305
106
0.624333
acef9fc5105d6423303f8ca32d72215f78241359
4,156
py
Python
python/ray/tune/examples/mnist_pytorch.py
alex-petrenko/ray
dfc94ce7bcd5d9d008822efdeec17c3f6bb9c606
[ "Apache-2.0" ]
1
2020-01-20T07:28:19.000Z
2020-01-20T07:28:19.000Z
python/ray/tune/examples/mnist_pytorch.py
ashuein/ray
bcc379556b135ee2e472b0e4b388c9e1f8274dc9
[ "Apache-2.0" ]
4
2019-03-04T13:03:24.000Z
2019-06-06T11:25:07.000Z
python/ray/tune/examples/mnist_pytorch.py
ashuein/ray
bcc379556b135ee2e472b0e4b388c9e1f8274dc9
[ "Apache-2.0" ]
1
2020-04-30T09:06:20.000Z
2020-04-30T09:06:20.000Z
# Original Code here: # https://github.com/pytorch/examples/blob/master/mnist/main.py from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import argparse import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms import ray from ray import tune from ray.tune import track from ray.tune.schedulers import AsyncHyperBandScheduler # Change these values if you want the training to run quicker or slower. EPOCH_SIZE = 512 TEST_SIZE = 256 class Net(nn.Module): def __init__(self, config): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 3, kernel_size=3) self.fc = nn.Linear(192, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 3)) x = x.view(-1, 192) x = self.fc(x) return F.log_softmax(x, dim=1) def train(model, optimizer, train_loader, device): model.train() for batch_idx, (data, target) in enumerate(train_loader): if batch_idx * len(data) > EPOCH_SIZE: return data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() def test(model, data_loader, device): model.eval() correct = 0 total = 0 with torch.no_grad(): for batch_idx, (data, target) in enumerate(data_loader): if batch_idx * len(data) > TEST_SIZE: break data, target = data.to(device), target.to(device) outputs = model(data) _, predicted = torch.max(outputs.data, 1) total += target.size(0) correct += (predicted == target).sum().item() return correct / total def get_data_loaders(): mnist_transforms = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))]) train_loader = torch.utils.data.DataLoader( datasets.MNIST( "~/data", train=True, download=True, transform=mnist_transforms), batch_size=64, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.MNIST("~/data", train=False, transform=mnist_transforms), batch_size=64, shuffle=True) return train_loader, test_loader def train_mnist(config): use_cuda = config.get("use_gpu") and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") train_loader, test_loader = get_data_loaders() model = Net(config).to(device) optimizer = optim.SGD( model.parameters(), lr=config["lr"], momentum=config["momentum"]) while True: train(model, optimizer, train_loader, device) acc = test(model, test_loader, device) track.log(mean_accuracy=acc) if __name__ == "__main__": parser = argparse.ArgumentParser(description="PyTorch MNIST Example") parser.add_argument( "--cuda", action="store_true", default=False, help="Enables GPU training") parser.add_argument( "--smoke-test", action="store_true", help="Finish quickly for testing") parser.add_argument( "--ray-redis-address", help="Address of Ray cluster for seamless distributed execution.") args = parser.parse_args() if args.ray_redis_address: ray.init(redis_address=args.ray_redis_address) sched = AsyncHyperBandScheduler( time_attr="training_iteration", metric="mean_accuracy") tune.run( train_mnist, name="exp", scheduler=sched, stop={ "mean_accuracy": 0.98, "training_iteration": 5 if args.smoke_test else 20 }, resources_per_trial={ "cpu": 2, "gpu": int(args.cuda) }, num_samples=1 if args.smoke_test else 10, config={ "lr": tune.sample_from(lambda spec: 10**(-10 * np.random.rand())), "momentum": tune.uniform(0.1, 0.9), "use_gpu": int(args.cuda) })
30.785185
79
0.63282
acefa12970b6b2770fb86b8fbccb1dbac538390b
2,664
py
Python
764 Largest Plus Sign.py
krishna13052001/LeetCode
cd6ec626bea61f0bd9e8493622074f9e69a7a1c3
[ "MIT" ]
872
2015-06-15T12:02:41.000Z
2022-03-30T08:44:35.000Z
764 Largest Plus Sign.py
nadeemshaikh-github/LeetCode
3fb14aeea62a960442e47dfde9f964c7ffce32be
[ "MIT" ]
8
2015-06-21T15:11:59.000Z
2022-02-01T11:22:34.000Z
764 Largest Plus Sign.py
nadeemshaikh-github/LeetCode
3fb14aeea62a960442e47dfde9f964c7ffce32be
[ "MIT" ]
328
2015-06-28T03:10:35.000Z
2022-03-29T11:05:28.000Z
#!/usr/bin/python3 """ In a 2D grid from (0, 0) to (N-1, N-1), every cell contains a 1, except those cells in the given list mines which are 0. What is the largest axis-aligned plus sign of 1s contained in the grid? Return the order of the plus sign. If there is none, return 0. An "axis-aligned plus sign of 1s of order k" has some center grid[x][y] = 1 along with 4 arms of length k-1 going up, down, left, and right, and made of 1s. This is demonstrated in the diagrams below. Note that there could be 0s or 1s beyond the arms of the plus sign, only the relevant area of the plus sign is checked for 1s. Examples of Axis-Aligned Plus Signs of Order k: Order 1: 000 010 000 Order 2: 00000 00100 01110 00100 00000 Order 3: 0000000 0001000 0001000 0111110 0001000 0001000 0000000 Example 1: Input: N = 5, mines = [[4, 2]] Output: 2 Explanation: 11111 11111 11111 11111 11011 In the above grid, the largest plus sign can only be order 2. One of them is marked in bold. Example 2: Input: N = 2, mines = [] Output: 1 Explanation: There is no plus sign of order 2, but there is of order 1. Example 3: Input: N = 1, mines = [[0, 0]] Output: 0 Explanation: There is no plus sign, so return 0. Note: N will be an integer in the range [1, 500]. mines will have length at most 5000. mines[i] will be length 2 and consist of integers in the range [0, N-1]. (Additionally, programs submitted in C, C++, or C# will be judged with a slightly smaller time limit.) """ from typing import List class Solution: def orderOfLargestPlusSign(self, N: int, mines: List[List[int]]) -> int: """ < ^ > V four directions Let F[i][j][k] be the number of consecutive 1 including G[i][j] it self """ G = [[1 for _ in range(N)] for _ in range(N)] for i, j in mines: G[i][j] = 0 F = [[[G[i][j] for _ in range(4)] for j in range(N)] for i in range(N)] for i in range(N): for j in range(N): if j - 1 >= 0 and G[i][j] == 1: F[i][j][0] = F[i][j-1][0] + 1 if i - 1 >= 0 and G[i][j] == 1: F[i][j][1] = F[i-1][j][1] + 1 for i in range(N-1, -1, -1): for j in range(N-1, -1, -1): if j + 1 < N and G[i][j] == 1: F[i][j][2] = F[i][j+1][2] + 1 if i + 1 < N and G[i][j] == 1: F[i][j][3] = F[i+1][j][3] + 1 ret = 0 for i in range(N): for j in range(N): ret = max(ret, min(F[i][j])) return ret if __name__ == "__main__": assert Solution().orderOfLargestPlusSign(5, [[4, 2]]) == 2
25.132075
80
0.579204
acefa2017f1dcd5c020838b00cd61d3abad7a8aa
8,725
py
Python
aib/init/tables/in_ledger_params.py
FrankMillman/AccInABox
fc4cd26bf525c1bbe8e541d9339c69b0adbad546
[ "MIT" ]
3
2015-02-25T19:44:43.000Z
2020-12-18T05:49:09.000Z
aib/init/tables/in_ledger_params.py
FrankMillman/AccInABox
fc4cd26bf525c1bbe8e541d9339c69b0adbad546
[ "MIT" ]
1
2019-11-20T12:31:34.000Z
2019-11-20T12:31:35.000Z
aib/init/tables/in_ledger_params.py
FrankMillman/AccInABox
fc4cd26bf525c1bbe8e541d9339c69b0adbad546
[ "MIT" ]
1
2020-06-07T06:25:19.000Z
2020-06-07T06:25:19.000Z
# table definition table = { 'table_name' : 'in_ledger_params', 'module_id' : 'in', 'short_descr' : 'In warehouses', 'long_descr' : 'Inventory warehouses with parameters', 'sub_types' : None, 'sub_trans' : None, 'sequence' : None, 'tree_params' : None, 'roll_params' : None, 'indexes' : None, 'ledger_col' : None, 'defn_company' : None, 'data_company' : None, 'read_only' : False, } # column definitions cols = [] cols.append ({ 'col_name' : 'row_id', 'data_type' : 'AUTO', 'short_descr': 'Row id', 'long_descr' : 'Row id', 'col_head' : 'Row', 'key_field' : 'Y', 'data_source': 'gen', 'condition' : None, 'allow_null' : False, 'allow_amend': False, 'max_len' : 0, 'db_scale' : 0, 'scale_ptr' : None, 'dflt_val' : None, 'dflt_rule' : None, 'col_checks' : None, 'fkey' : None, 'choices' : None, }) cols.append ({ 'col_name' : 'created_id', 'data_type' : 'INT', 'short_descr': 'Created id', 'long_descr' : 'Created row id', 'col_head' : 'Created', 'key_field' : 'N', 'data_source': 'gen', 'condition' : None, 'allow_null' : False, 'allow_amend': False, 'max_len' : 0, 'db_scale' : 0, 'scale_ptr' : None, 'dflt_val' : '0', 'dflt_rule' : None, 'col_checks' : None, 'fkey' : None, 'choices' : None, }) cols.append ({ 'col_name' : 'deleted_id', 'data_type' : 'INT', 'short_descr': 'Deleted id', 'long_descr' : 'Deleted row id', 'col_head' : 'Deleted', 'key_field' : 'N', 'data_source': 'gen', 'condition' : None, 'allow_null' : False, 'allow_amend': False, 'max_len' : 0, 'db_scale' : 0, 'scale_ptr' : None, 'dflt_val' : '0', 'dflt_rule' : None, 'col_checks' : None, 'fkey' : None, 'choices' : None, }) cols.append ({ 'col_name' : 'ledger_id', 'data_type' : 'TEXT', # 'short_descr': 'Ledger id', # 'long_descr' : 'Ledger id', # 'col_head' : 'Ledger', 'short_descr': 'Warehouse code', 'long_descr' : 'Warehouse code', 'col_head' : 'Wh code', 'key_field' : 'A', 'data_source': 'input', 'condition' : None, 'allow_null' : False, 'allow_amend': False, 'max_len' : 20, 'db_scale' : 0, 'scale_ptr' : None, 'dflt_val' : None, 'dflt_rule' : None, 'col_checks' : None, 'fkey' : None, 'choices' : None, }) cols.append ({ 'col_name' : 'descr', 'data_type' : 'TEXT', # 'short_descr': 'Description', # 'long_descr' : 'Description', # 'col_head' : 'Description', 'short_descr': 'Warehouse name', 'long_descr' : 'Warehouse name', 'col_head' : 'Name', 'key_field' : 'N', 'data_source': 'input', 'condition' : None, 'allow_null' : False, 'allow_amend': True, 'max_len' : 30, 'db_scale' : 0, 'scale_ptr' : None, 'dflt_val' : None, 'dflt_rule' : None, 'col_checks' : None, 'fkey' : None, 'choices' : None, }) cols.append ({ 'col_name' : 'gl_code_id', 'data_type' : 'INT', 'short_descr': 'Gl control a/c', 'long_descr' : 'Gl control a/c', 'col_head' : 'Gl code', 'key_field' : 'N', 'data_source': 'null_if', 'condition' : [['where', '', '_param.gl_integration', 'is', '$False', '']], 'allow_null' : True, # null means 'not integrated to g/l' 'allow_amend': [['where', '', '$value', 'is', '$None', '']], 'max_len' : 0, 'db_scale' : 0, 'scale_ptr' : None, 'dflt_val' : None, 'dflt_rule' : None, 'col_checks' : None, 'fkey' : ['gl_codes', 'row_id', 'gl_code', 'gl_code', False, 'gl_codes'], 'choices' : None, }) cols.append ({ 'col_name' : 'location_row_id', 'data_type' : 'INT', 'short_descr': 'Location row id', 'long_descr' : 'Location row id', 'col_head' : 'Location', 'key_field' : 'N', 'data_source': 'dflt_if', 'condition' : [['where', '', '_param.location_row_id', 'is not', '$None', '']], 'allow_null' : False, 'allow_amend': False, 'max_len' : 0, 'db_scale' : 0, 'scale_ptr' : None, 'dflt_val' : None, 'dflt_rule' : ( '<case>' '<compare test="[[`if`, ``, `_param.location_row_id`, `is not`, `$None`, ``]]">' '<fld_val name="_param.location_row_id"/>' '</compare>' '<compare test="[[`if`, ``, `gl_code_id`, `is not`, `$None`, ``]]">' '<case>' '<compare test="[[`if`, ``, `gl_code_id>valid_loc_ids>is_leaf`, `is`, `$True`, ``]]">' '<fld_val name="gl_code_id>valid_loc_ids"/>' '</compare>' '</case>' '</compare>' '<default>' '<fld_val name="_param.dflt_loc_row_id"/>' '</default>' '</case>' ), 'col_checks' : [ [ 'location_code', 'Invalid location', [ ['check', '', '_param.gl_integration', 'is', '$False', ''], ['or', '', '$value', 'pyfunc', 'db.checks.valid_loc_id,"gl_code_id"', ''], ], ], ], 'fkey' : ['adm_locations', 'row_id', 'location_id', 'location_id', False, 'locs'], 'choices' : None, }) cols.append ({ 'col_name' : 'currency_id', 'data_type' : 'INT', 'short_descr': 'Currency id', 'long_descr' : 'Currency id - if specified, all transactions will share this currency', 'col_head' : 'Curr', 'key_field' : 'N', 'data_source': 'dflt_if', 'condition' : [['where', '', '_param.currency_id', 'is not', '$None', '']], 'allow_null' : False, 'allow_amend': False, 'max_len' : 0, 'db_scale' : 0, 'scale_ptr' : None, 'dflt_val' : '{_param.currency_id}', 'dflt_rule' : None, 'col_checks' : None, 'fkey' : ['adm_currencies', 'row_id', 'currency', 'currency', False, 'curr'], 'choices' : None, }) cols.append ({ 'col_name' : 'allow_neg_stock', 'data_type' : 'BOOL', 'short_descr': 'Allow negetive stock?', 'long_descr' : 'Allow stock balance to go below 0?', 'col_head' : 'Neg?', 'key_field' : 'N', 'data_source': 'input', 'condition' : None, 'allow_null' : False, 'allow_amend': False, 'max_len' : 0, 'db_scale' : 0, 'scale_ptr' : None, 'dflt_val' : 'false', 'dflt_rule' : None, 'col_checks' : None, 'fkey' : None, 'choices' : None, }) # virtual column definitions virt = [] # virt.append ({ # 'col_name' : 'module_id', # 'data_type' : 'TEXT', # 'short_descr': 'Module', # 'long_descr' : 'Module id', # 'col_head' : '', # 'sql' : "'in'", # }) # virt.append ({ # 'col_name' : 'module_row_id', # 'data_type' : 'INT', # 'short_descr': 'Module row id', # 'long_descr' : 'Module row id', # 'col_head' : '', # 'sql' : "SELECT b.row_id FROM {company}.db_modules b WHERE b.module_id = 'in'", # }) """ virt.append ({ 'col_name' : 'wh_code', 'data_type' : 'TEXT', 'short_descr': 'Warehouse code', 'long_descr' : 'Warehouse code', 'col_head' : 'Wh code', 'sql' : 'SELECT a.ledger_id', }) virt.append ({ 'col_name' : 'name', 'data_type' : 'TEXT', 'short_descr': 'Warehouse name', 'long_descr' : 'Warehouse name', 'col_head' : 'Name', 'sql' : 'SELECT a.descr', }) """ # cursor definitions cursors = [] """ cursors.append({ 'cursor_name': 'in_ledg', 'descr': 'Warehouses', 'columns': [ ['ledger_id', 100, False, False], ['descr', 260, True, True], ], 'filter': [], 'sequence': [['ledger_id', False]], }) """ cursors.append({ 'cursor_name': 'whouse', 'title': 'Maintain warehouse params', 'columns': [ ['ledger_id', 100, False, False], ['descr', 240, True, False], ], 'filter': [], 'sequence': [['ledger_id', False]], 'formview_name': 'in_params', }) # actions actions = [] actions.append([ 'after_insert',( '<pyfunc name="db.cache.ledger_inserted"/>' '<case>' '<compare test="[[`check`, ``, `_param.gl_integration`, `is`, `$True`, ``]]">' '<pyfunc name="custom.gl_funcs.setup_ctrl"/>' '</compare>' '</case>' ) ]) actions.append([ 'after_commit', '<pyfunc name="db.cache.ledger_updated"/>' ])
28.054662
100
0.502693
acefa2bae636c45e2da12bbc63a32f9e9bfcb9b2
6,347
py
Python
core/confdb/syntax/media/sources/video/base.py
prorevizor/noc
37e44b8afc64318b10699c06a1138eee9e7d6a4e
[ "BSD-3-Clause" ]
84
2017-10-22T11:01:39.000Z
2022-02-27T03:43:48.000Z
core/confdb/syntax/media/sources/video/base.py
prorevizor/noc
37e44b8afc64318b10699c06a1138eee9e7d6a4e
[ "BSD-3-Clause" ]
22
2017-12-11T07:21:56.000Z
2021-09-23T02:53:50.000Z
core/confdb/syntax/media/sources/video/base.py
prorevizor/noc
37e44b8afc64318b10699c06a1138eee9e7d6a4e
[ "BSD-3-Clause" ]
23
2017-12-06T06:59:52.000Z
2022-02-24T00:02:25.000Z
# ---------------------------------------------------------------------- # ConfDB media sources video syntax # ---------------------------------------------------------------------- # Copyright (C) 2007-2019 The NOC Project # See LICENSE for details # ---------------------------------------------------------------------- # NOC modules from ....defs import DEF from ....patterns import ANY, INTEGER, BOOL MEDIA_SOURCES_VIDEO_SYNTAX = DEF( "video", [ DEF( ANY, [ DEF( "settings", [ DEF( "brightness", [ DEF( INTEGER, name="brightness", required=True, gen="make_video_brightness", ) ], ), DEF( "saturation", [ DEF( INTEGER, name="saturation", required=True, gen="make_video_saturation", ) ], ), DEF( "contrast", [ DEF( INTEGER, name="contrast", required=True, gen="make_video_contrast", ) ], ), DEF( "sharpness", [ DEF( INTEGER, name="sharpness", required=True, gen="make_video_sharpness", ) ], ), DEF( "white-balance", [ DEF( "admin-status", [ DEF( BOOL, required=True, name="admin_status", gen="make_video_white_balance_admin_status", ) ], ), DEF("auto", gen="make_video_white_balance_auto"), DEF( "cr-gain", [ DEF( INTEGER, name="cr_gain", required=True, gen="make_video_white_balance_cr_gain", ) ], ), DEF( "gb-gain", [ DEF( INTEGER, name="gb_gain", required=True, gen="make_video_white_balance_gb_gain", ) ], ), ], ), DEF( "black-light-compensation", [ DEF( "admin-status", [ DEF( BOOL, required=True, name="admin_status", gen="make_video_black_light_compensation_admin_status", ) ], ) ], ), DEF( "wide-dynamic-range", [ DEF( "admin-status", [ DEF( BOOL, required=True, name="admin_status", gen="make_video_wide_dynamic_range_admin_status", ) ], ), DEF( "level", [ DEF( INTEGER, name="level", required=True, gen="make_video_wide_dynamic_range_level", ) ], ), ], ), ], ) ], name="name", multi=True, ) ], )
40.948387
99
0.173625
acefa52848f0ccee227069928b58dc3f1d3d6ac3
143,621
py
Python
sdk/python/pulumi_azure/media/_inputs.py
henriktao/pulumi-azure
f1cbcf100b42b916da36d8fe28be3a159abaf022
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure/media/_inputs.py
henriktao/pulumi-azure
f1cbcf100b42b916da36d8fe28be3a159abaf022
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure/media/_inputs.py
henriktao/pulumi-azure
f1cbcf100b42b916da36d8fe28be3a159abaf022
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = [ 'AssetFilterPresentationTimeRangeArgs', 'AssetFilterTrackSelectionArgs', 'AssetFilterTrackSelectionConditionArgs', 'ContentKeyPolicyPolicyOptionArgs', 'ContentKeyPolicyPolicyOptionFairplayConfigurationArgs', 'ContentKeyPolicyPolicyOptionFairplayConfigurationOfflineRentalConfigurationArgs', 'ContentKeyPolicyPolicyOptionPlayreadyConfigurationLicenseArgs', 'ContentKeyPolicyPolicyOptionPlayreadyConfigurationLicensePlayRightArgs', 'ContentKeyPolicyPolicyOptionTokenRestrictionArgs', 'ContentKeyPolicyPolicyOptionTokenRestrictionRequiredClaimArgs', 'JobInputAssetArgs', 'JobOutputAssetArgs', 'LiveEventCrossSiteAccessPolicyArgs', 'LiveEventEncodingArgs', 'LiveEventInputArgs', 'LiveEventInputEndpointArgs', 'LiveEventInputIpAccessControlAllowArgs', 'LiveEventPreviewArgs', 'LiveEventPreviewEndpointArgs', 'LiveEventPreviewIpAccessControlAllowArgs', 'ServiceAccountIdentityArgs', 'ServiceAccountKeyDeliveryAccessControlArgs', 'ServiceAccountStorageAccountArgs', 'StreamingEndpointAccessControlArgs', 'StreamingEndpointAccessControlAkamaiSignatureHeaderAuthenticationKeyArgs', 'StreamingEndpointAccessControlIpAllowArgs', 'StreamingEndpointCrossSiteAccessPolicyArgs', 'StreamingLocatorContentKeyArgs', 'StreamingPolicyCommonEncryptionCbcsArgs', 'StreamingPolicyCommonEncryptionCbcsDefaultContentKeyArgs', 'StreamingPolicyCommonEncryptionCbcsDrmFairplayArgs', 'StreamingPolicyCommonEncryptionCbcsEnabledProtocolsArgs', 'StreamingPolicyCommonEncryptionCencArgs', 'StreamingPolicyCommonEncryptionCencDefaultContentKeyArgs', 'StreamingPolicyCommonEncryptionCencDrmPlayreadyArgs', 'StreamingPolicyCommonEncryptionCencEnabledProtocolsArgs', 'StreamingPolicyNoEncryptionEnabledProtocolsArgs', 'TransformOutputArgs', 'TransformOutputAudioAnalyzerPresetArgs', 'TransformOutputBuiltinPresetArgs', 'TransformOutputFaceDetectorPresetArgs', 'TransformOutputVideoAnalyzerPresetArgs', ] @pulumi.input_type class AssetFilterPresentationTimeRangeArgs: def __init__(__self__, *, end_in_units: Optional[pulumi.Input[int]] = None, force_end: Optional[pulumi.Input[bool]] = None, live_backoff_in_units: Optional[pulumi.Input[int]] = None, presentation_window_in_units: Optional[pulumi.Input[int]] = None, start_in_units: Optional[pulumi.Input[int]] = None, unit_timescale_in_miliseconds: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[int] end_in_units: The absolute end time boundary. Applies to Video on Demand (VoD). For the Live Streaming presentation, it is silently ignored and applied when the presentation ends and the stream becomes VoD. This is a long value that represents an absolute end point of the presentation, rounded to the closest next GOP start. The unit is defined by `unit_timescale_in_miliseconds`, so an `end_in_units` of 180 would be for 3 minutes. Use `start_in_units` and `end_in_units` to trim the fragments that will be in the playlist (manifest). For example, `start_in_units` set to 20 and `end_in_units` set to 60 using `unit_timescale_in_miliseconds` in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. :param pulumi.Input[bool] force_end: Indicates whether the `end_in_units` property must be present. If true, `end_in_units` must be specified or a bad request code is returned. Applies to Live Streaming only. Allowed values: false, true. :param pulumi.Input[int] live_backoff_in_units: The relative to end right edge. Applies to Live Streaming only. This value defines the latest live position that a client can seek to. Using this property, you can delay live playback position and create a server-side buffer for players. The unit is defined by `unit_timescale_in_miliseconds`. The maximum live back off duration is 300 seconds. For example, a value of 20 means that the latest available content is 20 seconds delayed from the real live edge. :param pulumi.Input[int] presentation_window_in_units: The relative to end sliding window. Applies to Live Streaming only. Use `presentation_window_in_units` to apply a sliding window of fragments to include in a playlist. The unit is defined by `unit_timescale_in_miliseconds`. For example, set `presentation_window_in_units` to 120 to apply a two-minute sliding window. Media within 2 minutes of the live edge will be included in the playlist. If a fragment straddles the boundary, the entire fragment will be included in the playlist. The minimum presentation window duration is 60 seconds. :param pulumi.Input[int] start_in_units: The absolute start time boundary. Applies to Video on Demand (VoD) or Live Streaming. This is a long value that represents an absolute start point of the stream. The value gets rounded to the closest next GOP start. The unit is defined by `unit_timescale_in_miliseconds`, so a `start_in_units` of 15 would be for 15 seconds. Use `start_in_units` and `end_in_units` to trim the fragments that will be in the playlist (manifest). For example, `start_in_units` set to 20 and `end_in_units` set to 60 using `unit_timescale_in_miliseconds` in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. :param pulumi.Input[int] unit_timescale_in_miliseconds: Specified as the number of miliseconds in one unit timescale. For example, if you want to set a `start_in_units` at 30 seconds, you would use a value of 30 when using the `unit_timescale_in_miliseconds` in 1000. Or if you want to set `start_in_units` in 30 miliseconds, you would use a value of 30 when using the `unit_timescale_in_miliseconds` in 1. Applies timescale to `start_in_units`, `start_timescale` and `presentation_window_in_timescale` and `live_backoff_in_timescale`. """ if end_in_units is not None: pulumi.set(__self__, "end_in_units", end_in_units) if force_end is not None: pulumi.set(__self__, "force_end", force_end) if live_backoff_in_units is not None: pulumi.set(__self__, "live_backoff_in_units", live_backoff_in_units) if presentation_window_in_units is not None: pulumi.set(__self__, "presentation_window_in_units", presentation_window_in_units) if start_in_units is not None: pulumi.set(__self__, "start_in_units", start_in_units) if unit_timescale_in_miliseconds is not None: pulumi.set(__self__, "unit_timescale_in_miliseconds", unit_timescale_in_miliseconds) @property @pulumi.getter(name="endInUnits") def end_in_units(self) -> Optional[pulumi.Input[int]]: """ The absolute end time boundary. Applies to Video on Demand (VoD). For the Live Streaming presentation, it is silently ignored and applied when the presentation ends and the stream becomes VoD. This is a long value that represents an absolute end point of the presentation, rounded to the closest next GOP start. The unit is defined by `unit_timescale_in_miliseconds`, so an `end_in_units` of 180 would be for 3 minutes. Use `start_in_units` and `end_in_units` to trim the fragments that will be in the playlist (manifest). For example, `start_in_units` set to 20 and `end_in_units` set to 60 using `unit_timescale_in_miliseconds` in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. """ return pulumi.get(self, "end_in_units") @end_in_units.setter def end_in_units(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "end_in_units", value) @property @pulumi.getter(name="forceEnd") def force_end(self) -> Optional[pulumi.Input[bool]]: """ Indicates whether the `end_in_units` property must be present. If true, `end_in_units` must be specified or a bad request code is returned. Applies to Live Streaming only. Allowed values: false, true. """ return pulumi.get(self, "force_end") @force_end.setter def force_end(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "force_end", value) @property @pulumi.getter(name="liveBackoffInUnits") def live_backoff_in_units(self) -> Optional[pulumi.Input[int]]: """ The relative to end right edge. Applies to Live Streaming only. This value defines the latest live position that a client can seek to. Using this property, you can delay live playback position and create a server-side buffer for players. The unit is defined by `unit_timescale_in_miliseconds`. The maximum live back off duration is 300 seconds. For example, a value of 20 means that the latest available content is 20 seconds delayed from the real live edge. """ return pulumi.get(self, "live_backoff_in_units") @live_backoff_in_units.setter def live_backoff_in_units(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "live_backoff_in_units", value) @property @pulumi.getter(name="presentationWindowInUnits") def presentation_window_in_units(self) -> Optional[pulumi.Input[int]]: """ The relative to end sliding window. Applies to Live Streaming only. Use `presentation_window_in_units` to apply a sliding window of fragments to include in a playlist. The unit is defined by `unit_timescale_in_miliseconds`. For example, set `presentation_window_in_units` to 120 to apply a two-minute sliding window. Media within 2 minutes of the live edge will be included in the playlist. If a fragment straddles the boundary, the entire fragment will be included in the playlist. The minimum presentation window duration is 60 seconds. """ return pulumi.get(self, "presentation_window_in_units") @presentation_window_in_units.setter def presentation_window_in_units(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "presentation_window_in_units", value) @property @pulumi.getter(name="startInUnits") def start_in_units(self) -> Optional[pulumi.Input[int]]: """ The absolute start time boundary. Applies to Video on Demand (VoD) or Live Streaming. This is a long value that represents an absolute start point of the stream. The value gets rounded to the closest next GOP start. The unit is defined by `unit_timescale_in_miliseconds`, so a `start_in_units` of 15 would be for 15 seconds. Use `start_in_units` and `end_in_units` to trim the fragments that will be in the playlist (manifest). For example, `start_in_units` set to 20 and `end_in_units` set to 60 using `unit_timescale_in_miliseconds` in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. """ return pulumi.get(self, "start_in_units") @start_in_units.setter def start_in_units(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "start_in_units", value) @property @pulumi.getter(name="unitTimescaleInMiliseconds") def unit_timescale_in_miliseconds(self) -> Optional[pulumi.Input[int]]: """ Specified as the number of miliseconds in one unit timescale. For example, if you want to set a `start_in_units` at 30 seconds, you would use a value of 30 when using the `unit_timescale_in_miliseconds` in 1000. Or if you want to set `start_in_units` in 30 miliseconds, you would use a value of 30 when using the `unit_timescale_in_miliseconds` in 1. Applies timescale to `start_in_units`, `start_timescale` and `presentation_window_in_timescale` and `live_backoff_in_timescale`. """ return pulumi.get(self, "unit_timescale_in_miliseconds") @unit_timescale_in_miliseconds.setter def unit_timescale_in_miliseconds(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "unit_timescale_in_miliseconds", value) @pulumi.input_type class AssetFilterTrackSelectionArgs: def __init__(__self__, *, conditions: pulumi.Input[Sequence[pulumi.Input['AssetFilterTrackSelectionConditionArgs']]]): """ :param pulumi.Input[Sequence[pulumi.Input['AssetFilterTrackSelectionConditionArgs']]] conditions: One or more `condition` blocks as defined above. """ pulumi.set(__self__, "conditions", conditions) @property @pulumi.getter def conditions(self) -> pulumi.Input[Sequence[pulumi.Input['AssetFilterTrackSelectionConditionArgs']]]: """ One or more `condition` blocks as defined above. """ return pulumi.get(self, "conditions") @conditions.setter def conditions(self, value: pulumi.Input[Sequence[pulumi.Input['AssetFilterTrackSelectionConditionArgs']]]): pulumi.set(self, "conditions", value) @pulumi.input_type class AssetFilterTrackSelectionConditionArgs: def __init__(__self__, *, operation: Optional[pulumi.Input[str]] = None, property: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] operation: The condition operation to test a track property against. Supported values are `Equal` and `NotEqual`. :param pulumi.Input[str] property: The track property to compare. Supported values are `Bitrate`, `FourCC`, `Language`, `Name` and `Type`. Check [documentation](https://docs.microsoft.com/en-us/azure/media-services/latest/filters-concept) for more details. :param pulumi.Input[str] value: The track property value to match or not match. """ if operation is not None: pulumi.set(__self__, "operation", operation) if property is not None: pulumi.set(__self__, "property", property) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def operation(self) -> Optional[pulumi.Input[str]]: """ The condition operation to test a track property against. Supported values are `Equal` and `NotEqual`. """ return pulumi.get(self, "operation") @operation.setter def operation(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "operation", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: """ The track property value to match or not match. """ return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) @property @pulumi.getter def property(self) -> Optional[pulumi.Input[str]]: """ The track property to compare. Supported values are `Bitrate`, `FourCC`, `Language`, `Name` and `Type`. Check [documentation](https://docs.microsoft.com/en-us/azure/media-services/latest/filters-concept) for more details. """ return pulumi.get(self, "property") @property.setter def property(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "property", value) @pulumi.input_type class ContentKeyPolicyPolicyOptionArgs: def __init__(__self__, *, name: pulumi.Input[str], clear_key_configuration_enabled: Optional[pulumi.Input[bool]] = None, fairplay_configuration: Optional[pulumi.Input['ContentKeyPolicyPolicyOptionFairplayConfigurationArgs']] = None, open_restriction_enabled: Optional[pulumi.Input[bool]] = None, playready_configuration_licenses: Optional[pulumi.Input[Sequence[pulumi.Input['ContentKeyPolicyPolicyOptionPlayreadyConfigurationLicenseArgs']]]] = None, token_restriction: Optional[pulumi.Input['ContentKeyPolicyPolicyOptionTokenRestrictionArgs']] = None, widevine_configuration_template: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] name: The name which should be used for this Policy Option. :param pulumi.Input[bool] clear_key_configuration_enabled: Enable a configuration for non-DRM keys. :param pulumi.Input['ContentKeyPolicyPolicyOptionFairplayConfigurationArgs'] fairplay_configuration: A `fairplay_configuration` block as defined above. Check license requirements here https://docs.microsoft.com/en-us/azure/media-services/latest/fairplay-license-overview. :param pulumi.Input[bool] open_restriction_enabled: Enable an open restriction. License or key will be delivered on every request. :param pulumi.Input[Sequence[pulumi.Input['ContentKeyPolicyPolicyOptionPlayreadyConfigurationLicenseArgs']]] playready_configuration_licenses: One or more `playready_configuration_license` blocks as defined above. :param pulumi.Input['ContentKeyPolicyPolicyOptionTokenRestrictionArgs'] token_restriction: A `token_restriction` block as defined below. :param pulumi.Input[str] widevine_configuration_template: The Widevine template. """ pulumi.set(__self__, "name", name) if clear_key_configuration_enabled is not None: pulumi.set(__self__, "clear_key_configuration_enabled", clear_key_configuration_enabled) if fairplay_configuration is not None: pulumi.set(__self__, "fairplay_configuration", fairplay_configuration) if open_restriction_enabled is not None: pulumi.set(__self__, "open_restriction_enabled", open_restriction_enabled) if playready_configuration_licenses is not None: pulumi.set(__self__, "playready_configuration_licenses", playready_configuration_licenses) if token_restriction is not None: pulumi.set(__self__, "token_restriction", token_restriction) if widevine_configuration_template is not None: pulumi.set(__self__, "widevine_configuration_template", widevine_configuration_template) @property @pulumi.getter def name(self) -> pulumi.Input[str]: """ The name which should be used for this Policy Option. """ return pulumi.get(self, "name") @name.setter def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) @property @pulumi.getter(name="clearKeyConfigurationEnabled") def clear_key_configuration_enabled(self) -> Optional[pulumi.Input[bool]]: """ Enable a configuration for non-DRM keys. """ return pulumi.get(self, "clear_key_configuration_enabled") @clear_key_configuration_enabled.setter def clear_key_configuration_enabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "clear_key_configuration_enabled", value) @property @pulumi.getter(name="fairplayConfiguration") def fairplay_configuration(self) -> Optional[pulumi.Input['ContentKeyPolicyPolicyOptionFairplayConfigurationArgs']]: """ A `fairplay_configuration` block as defined above. Check license requirements here https://docs.microsoft.com/en-us/azure/media-services/latest/fairplay-license-overview. """ return pulumi.get(self, "fairplay_configuration") @fairplay_configuration.setter def fairplay_configuration(self, value: Optional[pulumi.Input['ContentKeyPolicyPolicyOptionFairplayConfigurationArgs']]): pulumi.set(self, "fairplay_configuration", value) @property @pulumi.getter(name="openRestrictionEnabled") def open_restriction_enabled(self) -> Optional[pulumi.Input[bool]]: """ Enable an open restriction. License or key will be delivered on every request. """ return pulumi.get(self, "open_restriction_enabled") @open_restriction_enabled.setter def open_restriction_enabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "open_restriction_enabled", value) @property @pulumi.getter(name="playreadyConfigurationLicenses") def playready_configuration_licenses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContentKeyPolicyPolicyOptionPlayreadyConfigurationLicenseArgs']]]]: """ One or more `playready_configuration_license` blocks as defined above. """ return pulumi.get(self, "playready_configuration_licenses") @playready_configuration_licenses.setter def playready_configuration_licenses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ContentKeyPolicyPolicyOptionPlayreadyConfigurationLicenseArgs']]]]): pulumi.set(self, "playready_configuration_licenses", value) @property @pulumi.getter(name="tokenRestriction") def token_restriction(self) -> Optional[pulumi.Input['ContentKeyPolicyPolicyOptionTokenRestrictionArgs']]: """ A `token_restriction` block as defined below. """ return pulumi.get(self, "token_restriction") @token_restriction.setter def token_restriction(self, value: Optional[pulumi.Input['ContentKeyPolicyPolicyOptionTokenRestrictionArgs']]): pulumi.set(self, "token_restriction", value) @property @pulumi.getter(name="widevineConfigurationTemplate") def widevine_configuration_template(self) -> Optional[pulumi.Input[str]]: """ The Widevine template. """ return pulumi.get(self, "widevine_configuration_template") @widevine_configuration_template.setter def widevine_configuration_template(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "widevine_configuration_template", value) @pulumi.input_type class ContentKeyPolicyPolicyOptionFairplayConfigurationArgs: def __init__(__self__, *, ask: Optional[pulumi.Input[str]] = None, offline_rental_configuration: Optional[pulumi.Input['ContentKeyPolicyPolicyOptionFairplayConfigurationOfflineRentalConfigurationArgs']] = None, pfx: Optional[pulumi.Input[str]] = None, pfx_password: Optional[pulumi.Input[str]] = None, rental_and_lease_key_type: Optional[pulumi.Input[str]] = None, rental_duration_seconds: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[str] ask: The key that must be used as FairPlay Application Secret key. :param pulumi.Input['ContentKeyPolicyPolicyOptionFairplayConfigurationOfflineRentalConfigurationArgs'] offline_rental_configuration: A `offline_rental_configuration` block as defined below. :param pulumi.Input[str] pfx: The Base64 representation of FairPlay certificate in PKCS 12 (pfx) format (including private key). :param pulumi.Input[str] pfx_password: The password encrypting FairPlay certificate in PKCS 12 (pfx) format. :param pulumi.Input[str] rental_and_lease_key_type: The rental and lease key type. Supported values are `DualExpiry`, `PersistentLimited`, `PersistentUnlimited` or `Undefined`. :param pulumi.Input[int] rental_duration_seconds: The rental duration. Must be greater than 0. """ if ask is not None: pulumi.set(__self__, "ask", ask) if offline_rental_configuration is not None: pulumi.set(__self__, "offline_rental_configuration", offline_rental_configuration) if pfx is not None: pulumi.set(__self__, "pfx", pfx) if pfx_password is not None: pulumi.set(__self__, "pfx_password", pfx_password) if rental_and_lease_key_type is not None: pulumi.set(__self__, "rental_and_lease_key_type", rental_and_lease_key_type) if rental_duration_seconds is not None: pulumi.set(__self__, "rental_duration_seconds", rental_duration_seconds) @property @pulumi.getter def ask(self) -> Optional[pulumi.Input[str]]: """ The key that must be used as FairPlay Application Secret key. """ return pulumi.get(self, "ask") @ask.setter def ask(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ask", value) @property @pulumi.getter(name="offlineRentalConfiguration") def offline_rental_configuration(self) -> Optional[pulumi.Input['ContentKeyPolicyPolicyOptionFairplayConfigurationOfflineRentalConfigurationArgs']]: """ A `offline_rental_configuration` block as defined below. """ return pulumi.get(self, "offline_rental_configuration") @offline_rental_configuration.setter def offline_rental_configuration(self, value: Optional[pulumi.Input['ContentKeyPolicyPolicyOptionFairplayConfigurationOfflineRentalConfigurationArgs']]): pulumi.set(self, "offline_rental_configuration", value) @property @pulumi.getter def pfx(self) -> Optional[pulumi.Input[str]]: """ The Base64 representation of FairPlay certificate in PKCS 12 (pfx) format (including private key). """ return pulumi.get(self, "pfx") @pfx.setter def pfx(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "pfx", value) @property @pulumi.getter(name="pfxPassword") def pfx_password(self) -> Optional[pulumi.Input[str]]: """ The password encrypting FairPlay certificate in PKCS 12 (pfx) format. """ return pulumi.get(self, "pfx_password") @pfx_password.setter def pfx_password(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "pfx_password", value) @property @pulumi.getter(name="rentalAndLeaseKeyType") def rental_and_lease_key_type(self) -> Optional[pulumi.Input[str]]: """ The rental and lease key type. Supported values are `DualExpiry`, `PersistentLimited`, `PersistentUnlimited` or `Undefined`. """ return pulumi.get(self, "rental_and_lease_key_type") @rental_and_lease_key_type.setter def rental_and_lease_key_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "rental_and_lease_key_type", value) @property @pulumi.getter(name="rentalDurationSeconds") def rental_duration_seconds(self) -> Optional[pulumi.Input[int]]: """ The rental duration. Must be greater than 0. """ return pulumi.get(self, "rental_duration_seconds") @rental_duration_seconds.setter def rental_duration_seconds(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "rental_duration_seconds", value) @pulumi.input_type class ContentKeyPolicyPolicyOptionFairplayConfigurationOfflineRentalConfigurationArgs: def __init__(__self__, *, playback_duration_seconds: Optional[pulumi.Input[int]] = None, storage_duration_seconds: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[int] playback_duration_seconds: Playback duration. :param pulumi.Input[int] storage_duration_seconds: Storage duration. """ if playback_duration_seconds is not None: pulumi.set(__self__, "playback_duration_seconds", playback_duration_seconds) if storage_duration_seconds is not None: pulumi.set(__self__, "storage_duration_seconds", storage_duration_seconds) @property @pulumi.getter(name="playbackDurationSeconds") def playback_duration_seconds(self) -> Optional[pulumi.Input[int]]: """ Playback duration. """ return pulumi.get(self, "playback_duration_seconds") @playback_duration_seconds.setter def playback_duration_seconds(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "playback_duration_seconds", value) @property @pulumi.getter(name="storageDurationSeconds") def storage_duration_seconds(self) -> Optional[pulumi.Input[int]]: """ Storage duration. """ return pulumi.get(self, "storage_duration_seconds") @storage_duration_seconds.setter def storage_duration_seconds(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "storage_duration_seconds", value) @pulumi.input_type class ContentKeyPolicyPolicyOptionPlayreadyConfigurationLicenseArgs: def __init__(__self__, *, allow_test_devices: Optional[pulumi.Input[bool]] = None, begin_date: Optional[pulumi.Input[str]] = None, content_key_location_from_header_enabled: Optional[pulumi.Input[bool]] = None, content_key_location_from_key_id: Optional[pulumi.Input[str]] = None, content_type: Optional[pulumi.Input[str]] = None, expiration_date: Optional[pulumi.Input[str]] = None, grace_period: Optional[pulumi.Input[str]] = None, license_type: Optional[pulumi.Input[str]] = None, play_right: Optional[pulumi.Input['ContentKeyPolicyPolicyOptionPlayreadyConfigurationLicensePlayRightArgs']] = None, relative_begin_date: Optional[pulumi.Input[str]] = None, relative_expiration_date: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[bool] allow_test_devices: A flag indicating whether test devices can use the license. :param pulumi.Input[str] begin_date: The begin date of license. :param pulumi.Input[bool] content_key_location_from_header_enabled: Specifies that the content key ID is in the PlayReady header. :param pulumi.Input[str] content_key_location_from_key_id: The content key ID. Specifies that the content key ID is specified in the PlayReady configuration. :param pulumi.Input[str] content_type: The PlayReady content type. Supported values are `UltraVioletDownload`, `UltraVioletStreaming` or `Unspecified`. :param pulumi.Input[str] expiration_date: The expiration date of license. :param pulumi.Input[str] grace_period: The grace period of license. :param pulumi.Input[str] license_type: The license type. Supported values are `NonPersistent` or `Persistent`. :param pulumi.Input['ContentKeyPolicyPolicyOptionPlayreadyConfigurationLicensePlayRightArgs'] play_right: A `play_right` block as defined above. :param pulumi.Input[str] relative_begin_date: The relative begin date of license. :param pulumi.Input[str] relative_expiration_date: The relative expiration date of license. """ if allow_test_devices is not None: pulumi.set(__self__, "allow_test_devices", allow_test_devices) if begin_date is not None: pulumi.set(__self__, "begin_date", begin_date) if content_key_location_from_header_enabled is not None: pulumi.set(__self__, "content_key_location_from_header_enabled", content_key_location_from_header_enabled) if content_key_location_from_key_id is not None: pulumi.set(__self__, "content_key_location_from_key_id", content_key_location_from_key_id) if content_type is not None: pulumi.set(__self__, "content_type", content_type) if expiration_date is not None: pulumi.set(__self__, "expiration_date", expiration_date) if grace_period is not None: pulumi.set(__self__, "grace_period", grace_period) if license_type is not None: pulumi.set(__self__, "license_type", license_type) if play_right is not None: pulumi.set(__self__, "play_right", play_right) if relative_begin_date is not None: pulumi.set(__self__, "relative_begin_date", relative_begin_date) if relative_expiration_date is not None: pulumi.set(__self__, "relative_expiration_date", relative_expiration_date) @property @pulumi.getter(name="allowTestDevices") def allow_test_devices(self) -> Optional[pulumi.Input[bool]]: """ A flag indicating whether test devices can use the license. """ return pulumi.get(self, "allow_test_devices") @allow_test_devices.setter def allow_test_devices(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "allow_test_devices", value) @property @pulumi.getter(name="beginDate") def begin_date(self) -> Optional[pulumi.Input[str]]: """ The begin date of license. """ return pulumi.get(self, "begin_date") @begin_date.setter def begin_date(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "begin_date", value) @property @pulumi.getter(name="contentKeyLocationFromHeaderEnabled") def content_key_location_from_header_enabled(self) -> Optional[pulumi.Input[bool]]: """ Specifies that the content key ID is in the PlayReady header. """ return pulumi.get(self, "content_key_location_from_header_enabled") @content_key_location_from_header_enabled.setter def content_key_location_from_header_enabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "content_key_location_from_header_enabled", value) @property @pulumi.getter(name="contentKeyLocationFromKeyId") def content_key_location_from_key_id(self) -> Optional[pulumi.Input[str]]: """ The content key ID. Specifies that the content key ID is specified in the PlayReady configuration. """ return pulumi.get(self, "content_key_location_from_key_id") @content_key_location_from_key_id.setter def content_key_location_from_key_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "content_key_location_from_key_id", value) @property @pulumi.getter(name="contentType") def content_type(self) -> Optional[pulumi.Input[str]]: """ The PlayReady content type. Supported values are `UltraVioletDownload`, `UltraVioletStreaming` or `Unspecified`. """ return pulumi.get(self, "content_type") @content_type.setter def content_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "content_type", value) @property @pulumi.getter(name="expirationDate") def expiration_date(self) -> Optional[pulumi.Input[str]]: """ The expiration date of license. """ return pulumi.get(self, "expiration_date") @expiration_date.setter def expiration_date(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "expiration_date", value) @property @pulumi.getter(name="gracePeriod") def grace_period(self) -> Optional[pulumi.Input[str]]: """ The grace period of license. """ return pulumi.get(self, "grace_period") @grace_period.setter def grace_period(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "grace_period", value) @property @pulumi.getter(name="licenseType") def license_type(self) -> Optional[pulumi.Input[str]]: """ The license type. Supported values are `NonPersistent` or `Persistent`. """ return pulumi.get(self, "license_type") @license_type.setter def license_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "license_type", value) @property @pulumi.getter(name="playRight") def play_right(self) -> Optional[pulumi.Input['ContentKeyPolicyPolicyOptionPlayreadyConfigurationLicensePlayRightArgs']]: """ A `play_right` block as defined above. """ return pulumi.get(self, "play_right") @play_right.setter def play_right(self, value: Optional[pulumi.Input['ContentKeyPolicyPolicyOptionPlayreadyConfigurationLicensePlayRightArgs']]): pulumi.set(self, "play_right", value) @property @pulumi.getter(name="relativeBeginDate") def relative_begin_date(self) -> Optional[pulumi.Input[str]]: """ The relative begin date of license. """ return pulumi.get(self, "relative_begin_date") @relative_begin_date.setter def relative_begin_date(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "relative_begin_date", value) @property @pulumi.getter(name="relativeExpirationDate") def relative_expiration_date(self) -> Optional[pulumi.Input[str]]: """ The relative expiration date of license. """ return pulumi.get(self, "relative_expiration_date") @relative_expiration_date.setter def relative_expiration_date(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "relative_expiration_date", value) @pulumi.input_type class ContentKeyPolicyPolicyOptionPlayreadyConfigurationLicensePlayRightArgs: def __init__(__self__, *, agc_and_color_stripe_restriction: Optional[pulumi.Input[int]] = None, allow_passing_video_content_to_unknown_output: Optional[pulumi.Input[str]] = None, analog_video_opl: Optional[pulumi.Input[int]] = None, compressed_digital_audio_opl: Optional[pulumi.Input[int]] = None, digital_video_only_content_restriction: Optional[pulumi.Input[bool]] = None, first_play_expiration: Optional[pulumi.Input[str]] = None, image_constraint_for_analog_component_video_restriction: Optional[pulumi.Input[bool]] = None, image_constraint_for_analog_computer_monitor_restriction: Optional[pulumi.Input[bool]] = None, scms_restriction: Optional[pulumi.Input[int]] = None, uncompressed_digital_audio_opl: Optional[pulumi.Input[int]] = None, uncompressed_digital_video_opl: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[int] agc_and_color_stripe_restriction: Configures Automatic Gain Control (AGC) and Color Stripe in the license. Must be between 0 and 3 inclusive. :param pulumi.Input[str] allow_passing_video_content_to_unknown_output: Configures Unknown output handling settings of the license. Supported values are `Allowed`, `AllowedWithVideoConstriction` or `NotAllowed`. :param pulumi.Input[int] analog_video_opl: Specifies the output protection level for compressed digital audio. Supported values are 100, 150 or 200. :param pulumi.Input[int] compressed_digital_audio_opl: Specifies the output protection level for compressed digital audio.Supported values are 100, 150 or 200. :param pulumi.Input[bool] digital_video_only_content_restriction: Enables the Image Constraint For Analog Component Video Restriction in the license. :param pulumi.Input[str] first_play_expiration: The amount of time that the license is valid after the license is first used to play content. :param pulumi.Input[bool] image_constraint_for_analog_component_video_restriction: Enables the Image Constraint For Analog Component Video Restriction in the license. :param pulumi.Input[bool] image_constraint_for_analog_computer_monitor_restriction: Enables the Image Constraint For Analog Component Video Restriction in the license. :param pulumi.Input[int] scms_restriction: Configures the Serial Copy Management System (SCMS) in the license. Must be between 0 and 3 inclusive. :param pulumi.Input[int] uncompressed_digital_audio_opl: Specifies the output protection level for uncompressed digital audio. Supported values are 100, 150, 250 or 300. :param pulumi.Input[int] uncompressed_digital_video_opl: Specifies the output protection level for uncompressed digital video. Supported values are 100, 150, 250 or 300. """ if agc_and_color_stripe_restriction is not None: pulumi.set(__self__, "agc_and_color_stripe_restriction", agc_and_color_stripe_restriction) if allow_passing_video_content_to_unknown_output is not None: pulumi.set(__self__, "allow_passing_video_content_to_unknown_output", allow_passing_video_content_to_unknown_output) if analog_video_opl is not None: pulumi.set(__self__, "analog_video_opl", analog_video_opl) if compressed_digital_audio_opl is not None: pulumi.set(__self__, "compressed_digital_audio_opl", compressed_digital_audio_opl) if digital_video_only_content_restriction is not None: pulumi.set(__self__, "digital_video_only_content_restriction", digital_video_only_content_restriction) if first_play_expiration is not None: pulumi.set(__self__, "first_play_expiration", first_play_expiration) if image_constraint_for_analog_component_video_restriction is not None: pulumi.set(__self__, "image_constraint_for_analog_component_video_restriction", image_constraint_for_analog_component_video_restriction) if image_constraint_for_analog_computer_monitor_restriction is not None: pulumi.set(__self__, "image_constraint_for_analog_computer_monitor_restriction", image_constraint_for_analog_computer_monitor_restriction) if scms_restriction is not None: pulumi.set(__self__, "scms_restriction", scms_restriction) if uncompressed_digital_audio_opl is not None: pulumi.set(__self__, "uncompressed_digital_audio_opl", uncompressed_digital_audio_opl) if uncompressed_digital_video_opl is not None: pulumi.set(__self__, "uncompressed_digital_video_opl", uncompressed_digital_video_opl) @property @pulumi.getter(name="agcAndColorStripeRestriction") def agc_and_color_stripe_restriction(self) -> Optional[pulumi.Input[int]]: """ Configures Automatic Gain Control (AGC) and Color Stripe in the license. Must be between 0 and 3 inclusive. """ return pulumi.get(self, "agc_and_color_stripe_restriction") @agc_and_color_stripe_restriction.setter def agc_and_color_stripe_restriction(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "agc_and_color_stripe_restriction", value) @property @pulumi.getter(name="allowPassingVideoContentToUnknownOutput") def allow_passing_video_content_to_unknown_output(self) -> Optional[pulumi.Input[str]]: """ Configures Unknown output handling settings of the license. Supported values are `Allowed`, `AllowedWithVideoConstriction` or `NotAllowed`. """ return pulumi.get(self, "allow_passing_video_content_to_unknown_output") @allow_passing_video_content_to_unknown_output.setter def allow_passing_video_content_to_unknown_output(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "allow_passing_video_content_to_unknown_output", value) @property @pulumi.getter(name="analogVideoOpl") def analog_video_opl(self) -> Optional[pulumi.Input[int]]: """ Specifies the output protection level for compressed digital audio. Supported values are 100, 150 or 200. """ return pulumi.get(self, "analog_video_opl") @analog_video_opl.setter def analog_video_opl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "analog_video_opl", value) @property @pulumi.getter(name="compressedDigitalAudioOpl") def compressed_digital_audio_opl(self) -> Optional[pulumi.Input[int]]: """ Specifies the output protection level for compressed digital audio.Supported values are 100, 150 or 200. """ return pulumi.get(self, "compressed_digital_audio_opl") @compressed_digital_audio_opl.setter def compressed_digital_audio_opl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "compressed_digital_audio_opl", value) @property @pulumi.getter(name="digitalVideoOnlyContentRestriction") def digital_video_only_content_restriction(self) -> Optional[pulumi.Input[bool]]: """ Enables the Image Constraint For Analog Component Video Restriction in the license. """ return pulumi.get(self, "digital_video_only_content_restriction") @digital_video_only_content_restriction.setter def digital_video_only_content_restriction(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "digital_video_only_content_restriction", value) @property @pulumi.getter(name="firstPlayExpiration") def first_play_expiration(self) -> Optional[pulumi.Input[str]]: """ The amount of time that the license is valid after the license is first used to play content. """ return pulumi.get(self, "first_play_expiration") @first_play_expiration.setter def first_play_expiration(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "first_play_expiration", value) @property @pulumi.getter(name="imageConstraintForAnalogComponentVideoRestriction") def image_constraint_for_analog_component_video_restriction(self) -> Optional[pulumi.Input[bool]]: """ Enables the Image Constraint For Analog Component Video Restriction in the license. """ return pulumi.get(self, "image_constraint_for_analog_component_video_restriction") @image_constraint_for_analog_component_video_restriction.setter def image_constraint_for_analog_component_video_restriction(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "image_constraint_for_analog_component_video_restriction", value) @property @pulumi.getter(name="imageConstraintForAnalogComputerMonitorRestriction") def image_constraint_for_analog_computer_monitor_restriction(self) -> Optional[pulumi.Input[bool]]: """ Enables the Image Constraint For Analog Component Video Restriction in the license. """ return pulumi.get(self, "image_constraint_for_analog_computer_monitor_restriction") @image_constraint_for_analog_computer_monitor_restriction.setter def image_constraint_for_analog_computer_monitor_restriction(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "image_constraint_for_analog_computer_monitor_restriction", value) @property @pulumi.getter(name="scmsRestriction") def scms_restriction(self) -> Optional[pulumi.Input[int]]: """ Configures the Serial Copy Management System (SCMS) in the license. Must be between 0 and 3 inclusive. """ return pulumi.get(self, "scms_restriction") @scms_restriction.setter def scms_restriction(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "scms_restriction", value) @property @pulumi.getter(name="uncompressedDigitalAudioOpl") def uncompressed_digital_audio_opl(self) -> Optional[pulumi.Input[int]]: """ Specifies the output protection level for uncompressed digital audio. Supported values are 100, 150, 250 or 300. """ return pulumi.get(self, "uncompressed_digital_audio_opl") @uncompressed_digital_audio_opl.setter def uncompressed_digital_audio_opl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "uncompressed_digital_audio_opl", value) @property @pulumi.getter(name="uncompressedDigitalVideoOpl") def uncompressed_digital_video_opl(self) -> Optional[pulumi.Input[int]]: """ Specifies the output protection level for uncompressed digital video. Supported values are 100, 150, 250 or 300. """ return pulumi.get(self, "uncompressed_digital_video_opl") @uncompressed_digital_video_opl.setter def uncompressed_digital_video_opl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "uncompressed_digital_video_opl", value) @pulumi.input_type class ContentKeyPolicyPolicyOptionTokenRestrictionArgs: def __init__(__self__, *, audience: Optional[pulumi.Input[str]] = None, issuer: Optional[pulumi.Input[str]] = None, open_id_connect_discovery_document: Optional[pulumi.Input[str]] = None, primary_rsa_token_key_exponent: Optional[pulumi.Input[str]] = None, primary_rsa_token_key_modulus: Optional[pulumi.Input[str]] = None, primary_symmetric_token_key: Optional[pulumi.Input[str]] = None, primary_x509_token_key_raw: Optional[pulumi.Input[str]] = None, required_claims: Optional[pulumi.Input[Sequence[pulumi.Input['ContentKeyPolicyPolicyOptionTokenRestrictionRequiredClaimArgs']]]] = None, token_type: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] audience: The audience for the token. :param pulumi.Input[str] issuer: The token issuer. :param pulumi.Input[str] open_id_connect_discovery_document: The OpenID connect discovery document. :param pulumi.Input[str] primary_rsa_token_key_exponent: The RSA Parameter exponent. :param pulumi.Input[str] primary_rsa_token_key_modulus: The RSA Parameter modulus. :param pulumi.Input[str] primary_symmetric_token_key: The key value of the key. Specifies a symmetric key for token validation. :param pulumi.Input[str] primary_x509_token_key_raw: The raw data field of a certificate in PKCS 12 format (X509Certificate2 in .NET). Specifies a certificate for token validation. :param pulumi.Input[Sequence[pulumi.Input['ContentKeyPolicyPolicyOptionTokenRestrictionRequiredClaimArgs']]] required_claims: One or more `required_claim` blocks as defined above. :param pulumi.Input[str] token_type: The type of token. Supported values are `Jwt` or `Swt`. """ if audience is not None: pulumi.set(__self__, "audience", audience) if issuer is not None: pulumi.set(__self__, "issuer", issuer) if open_id_connect_discovery_document is not None: pulumi.set(__self__, "open_id_connect_discovery_document", open_id_connect_discovery_document) if primary_rsa_token_key_exponent is not None: pulumi.set(__self__, "primary_rsa_token_key_exponent", primary_rsa_token_key_exponent) if primary_rsa_token_key_modulus is not None: pulumi.set(__self__, "primary_rsa_token_key_modulus", primary_rsa_token_key_modulus) if primary_symmetric_token_key is not None: pulumi.set(__self__, "primary_symmetric_token_key", primary_symmetric_token_key) if primary_x509_token_key_raw is not None: pulumi.set(__self__, "primary_x509_token_key_raw", primary_x509_token_key_raw) if required_claims is not None: pulumi.set(__self__, "required_claims", required_claims) if token_type is not None: pulumi.set(__self__, "token_type", token_type) @property @pulumi.getter def audience(self) -> Optional[pulumi.Input[str]]: """ The audience for the token. """ return pulumi.get(self, "audience") @audience.setter def audience(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "audience", value) @property @pulumi.getter def issuer(self) -> Optional[pulumi.Input[str]]: """ The token issuer. """ return pulumi.get(self, "issuer") @issuer.setter def issuer(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "issuer", value) @property @pulumi.getter(name="openIdConnectDiscoveryDocument") def open_id_connect_discovery_document(self) -> Optional[pulumi.Input[str]]: """ The OpenID connect discovery document. """ return pulumi.get(self, "open_id_connect_discovery_document") @open_id_connect_discovery_document.setter def open_id_connect_discovery_document(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "open_id_connect_discovery_document", value) @property @pulumi.getter(name="primaryRsaTokenKeyExponent") def primary_rsa_token_key_exponent(self) -> Optional[pulumi.Input[str]]: """ The RSA Parameter exponent. """ return pulumi.get(self, "primary_rsa_token_key_exponent") @primary_rsa_token_key_exponent.setter def primary_rsa_token_key_exponent(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "primary_rsa_token_key_exponent", value) @property @pulumi.getter(name="primaryRsaTokenKeyModulus") def primary_rsa_token_key_modulus(self) -> Optional[pulumi.Input[str]]: """ The RSA Parameter modulus. """ return pulumi.get(self, "primary_rsa_token_key_modulus") @primary_rsa_token_key_modulus.setter def primary_rsa_token_key_modulus(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "primary_rsa_token_key_modulus", value) @property @pulumi.getter(name="primarySymmetricTokenKey") def primary_symmetric_token_key(self) -> Optional[pulumi.Input[str]]: """ The key value of the key. Specifies a symmetric key for token validation. """ return pulumi.get(self, "primary_symmetric_token_key") @primary_symmetric_token_key.setter def primary_symmetric_token_key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "primary_symmetric_token_key", value) @property @pulumi.getter(name="primaryX509TokenKeyRaw") def primary_x509_token_key_raw(self) -> Optional[pulumi.Input[str]]: """ The raw data field of a certificate in PKCS 12 format (X509Certificate2 in .NET). Specifies a certificate for token validation. """ return pulumi.get(self, "primary_x509_token_key_raw") @primary_x509_token_key_raw.setter def primary_x509_token_key_raw(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "primary_x509_token_key_raw", value) @property @pulumi.getter(name="requiredClaims") def required_claims(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContentKeyPolicyPolicyOptionTokenRestrictionRequiredClaimArgs']]]]: """ One or more `required_claim` blocks as defined above. """ return pulumi.get(self, "required_claims") @required_claims.setter def required_claims(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ContentKeyPolicyPolicyOptionTokenRestrictionRequiredClaimArgs']]]]): pulumi.set(self, "required_claims", value) @property @pulumi.getter(name="tokenType") def token_type(self) -> Optional[pulumi.Input[str]]: """ The type of token. Supported values are `Jwt` or `Swt`. """ return pulumi.get(self, "token_type") @token_type.setter def token_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "token_type", value) @pulumi.input_type class ContentKeyPolicyPolicyOptionTokenRestrictionRequiredClaimArgs: def __init__(__self__, *, type: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] type: Token claim type. :param pulumi.Input[str] value: Token claim value. """ if type is not None: pulumi.set(__self__, "type", type) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[str]]: """ Token claim type. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "type", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: """ Token claim value. """ return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) @pulumi.input_type class JobInputAssetArgs: def __init__(__self__, *, name: pulumi.Input[str], label: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] name: The name of the input Asset. Changing this forces a new Media Job to be created. :param pulumi.Input[str] label: A label that is assigned to a JobInputClip, that is used to satisfy a reference used in the Transform. For example, a Transform can be authored so as to take an image file with the label 'xyz' and apply it as an overlay onto the input video before it is encoded. When submitting a Job, exactly one of the JobInputs should be the image file, and it should have the label 'xyz'. """ pulumi.set(__self__, "name", name) if label is not None: pulumi.set(__self__, "label", label) @property @pulumi.getter def name(self) -> pulumi.Input[str]: """ The name of the input Asset. Changing this forces a new Media Job to be created. """ return pulumi.get(self, "name") @name.setter def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) @property @pulumi.getter def label(self) -> Optional[pulumi.Input[str]]: """ A label that is assigned to a JobInputClip, that is used to satisfy a reference used in the Transform. For example, a Transform can be authored so as to take an image file with the label 'xyz' and apply it as an overlay onto the input video before it is encoded. When submitting a Job, exactly one of the JobInputs should be the image file, and it should have the label 'xyz'. """ return pulumi.get(self, "label") @label.setter def label(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "label", value) @pulumi.input_type class JobOutputAssetArgs: def __init__(__self__, *, name: pulumi.Input[str], label: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] name: The name of the output Asset. Changing this forces a new Media Job to be created. :param pulumi.Input[str] label: A label that is assigned to a JobOutput in order to help uniquely identify it. This is useful when your Transform has more than one TransformOutput, whereby your Job has more than one JobOutput. In such cases, when you submit the Job, you will add two or more JobOutputs, in the same order as TransformOutputs in the Transform. Subsequently, when you retrieve the Job, either through events or on a GET request, you can use the label to easily identify the JobOutput. If a label is not provided, a default value of '{presetName}_{outputIndex}' will be used, where the preset name is the name of the preset in the corresponding TransformOutput and the output index is the relative index of the this JobOutput within the Job. Note that this index is the same as the relative index of the corresponding TransformOutput within its Transform. """ pulumi.set(__self__, "name", name) if label is not None: pulumi.set(__self__, "label", label) @property @pulumi.getter def name(self) -> pulumi.Input[str]: """ The name of the output Asset. Changing this forces a new Media Job to be created. """ return pulumi.get(self, "name") @name.setter def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) @property @pulumi.getter def label(self) -> Optional[pulumi.Input[str]]: """ A label that is assigned to a JobOutput in order to help uniquely identify it. This is useful when your Transform has more than one TransformOutput, whereby your Job has more than one JobOutput. In such cases, when you submit the Job, you will add two or more JobOutputs, in the same order as TransformOutputs in the Transform. Subsequently, when you retrieve the Job, either through events or on a GET request, you can use the label to easily identify the JobOutput. If a label is not provided, a default value of '{presetName}_{outputIndex}' will be used, where the preset name is the name of the preset in the corresponding TransformOutput and the output index is the relative index of the this JobOutput within the Job. Note that this index is the same as the relative index of the corresponding TransformOutput within its Transform. """ return pulumi.get(self, "label") @label.setter def label(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "label", value) @pulumi.input_type class LiveEventCrossSiteAccessPolicyArgs: def __init__(__self__, *, client_access_policy: Optional[pulumi.Input[str]] = None, cross_domain_policy: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] client_access_policy: The content of clientaccesspolicy.xml used by Silverlight. :param pulumi.Input[str] cross_domain_policy: The content of the Cross Domain Policy (`crossdomain.xml`). """ if client_access_policy is not None: pulumi.set(__self__, "client_access_policy", client_access_policy) if cross_domain_policy is not None: pulumi.set(__self__, "cross_domain_policy", cross_domain_policy) @property @pulumi.getter(name="clientAccessPolicy") def client_access_policy(self) -> Optional[pulumi.Input[str]]: """ The content of clientaccesspolicy.xml used by Silverlight. """ return pulumi.get(self, "client_access_policy") @client_access_policy.setter def client_access_policy(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "client_access_policy", value) @property @pulumi.getter(name="crossDomainPolicy") def cross_domain_policy(self) -> Optional[pulumi.Input[str]]: """ The content of the Cross Domain Policy (`crossdomain.xml`). """ return pulumi.get(self, "cross_domain_policy") @cross_domain_policy.setter def cross_domain_policy(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "cross_domain_policy", value) @pulumi.input_type class LiveEventEncodingArgs: def __init__(__self__, *, key_frame_interval: Optional[pulumi.Input[str]] = None, preset_name: Optional[pulumi.Input[str]] = None, stretch_mode: Optional[pulumi.Input[str]] = None, type: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] key_frame_interval: Use an `ISO 8601` time value between 0.5 to 20 seconds to specify the output fragment length for the video and audio tracks of an encoding live event. For example, use `PT2S` to indicate 2 seconds. For the video track it also defines the key frame interval, or the length of a GoP (group of pictures). If this value is not set for an encoding live event, the fragment duration defaults to 2 seconds. The value cannot be set for pass-through live events. :param pulumi.Input[str] preset_name: The optional encoding preset name, used when `type` is not `None`. If the `type` is set to `Standard`, then the default preset name is `Default720p`. Else if the `type` is set to `Premium1080p`, the default preset is `Default1080p`. Changing this forces a new resource to be created. :param pulumi.Input[str] stretch_mode: Specifies how the input video will be resized to fit the desired output resolution(s). Allowed values are `None`, `AutoFit` or `AutoSize`. Default is `None`. :param pulumi.Input[str] type: Live event type. Allowed values are `None`, `Premium1080p` or `Standard`. When set to `None`, the service simply passes through the incoming video and audio layer(s) to the output. When `type` is set to `Standard` or `Premium1080p`, a live encoder transcodes the incoming stream into multiple bitrates or layers. Defaults to `None`. Changing this forces a new resource to be created. """ if key_frame_interval is not None: pulumi.set(__self__, "key_frame_interval", key_frame_interval) if preset_name is not None: pulumi.set(__self__, "preset_name", preset_name) if stretch_mode is not None: pulumi.set(__self__, "stretch_mode", stretch_mode) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="keyFrameInterval") def key_frame_interval(self) -> Optional[pulumi.Input[str]]: """ Use an `ISO 8601` time value between 0.5 to 20 seconds to specify the output fragment length for the video and audio tracks of an encoding live event. For example, use `PT2S` to indicate 2 seconds. For the video track it also defines the key frame interval, or the length of a GoP (group of pictures). If this value is not set for an encoding live event, the fragment duration defaults to 2 seconds. The value cannot be set for pass-through live events. """ return pulumi.get(self, "key_frame_interval") @key_frame_interval.setter def key_frame_interval(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key_frame_interval", value) @property @pulumi.getter(name="presetName") def preset_name(self) -> Optional[pulumi.Input[str]]: """ The optional encoding preset name, used when `type` is not `None`. If the `type` is set to `Standard`, then the default preset name is `Default720p`. Else if the `type` is set to `Premium1080p`, the default preset is `Default1080p`. Changing this forces a new resource to be created. """ return pulumi.get(self, "preset_name") @preset_name.setter def preset_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "preset_name", value) @property @pulumi.getter(name="stretchMode") def stretch_mode(self) -> Optional[pulumi.Input[str]]: """ Specifies how the input video will be resized to fit the desired output resolution(s). Allowed values are `None`, `AutoFit` or `AutoSize`. Default is `None`. """ return pulumi.get(self, "stretch_mode") @stretch_mode.setter def stretch_mode(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "stretch_mode", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[str]]: """ Live event type. Allowed values are `None`, `Premium1080p` or `Standard`. When set to `None`, the service simply passes through the incoming video and audio layer(s) to the output. When `type` is set to `Standard` or `Premium1080p`, a live encoder transcodes the incoming stream into multiple bitrates or layers. Defaults to `None`. Changing this forces a new resource to be created. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "type", value) @pulumi.input_type class LiveEventInputArgs: def __init__(__self__, *, access_token: Optional[pulumi.Input[str]] = None, endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['LiveEventInputEndpointArgs']]]] = None, ip_access_control_allows: Optional[pulumi.Input[Sequence[pulumi.Input['LiveEventInputIpAccessControlAllowArgs']]]] = None, key_frame_interval_duration: Optional[pulumi.Input[str]] = None, streaming_protocol: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] access_token: A UUID in string form to uniquely identify the stream. If omitted, the service will generate a unique value. Changing this forces a new value to be created. :param pulumi.Input[Sequence[pulumi.Input['LiveEventInputIpAccessControlAllowArgs']]] ip_access_control_allows: One or more `ip_access_control_allow` blocks as defined below. :param pulumi.Input[str] key_frame_interval_duration: ISO 8601 time duration of the key frame interval duration of the input. This value sets the `EXT-X-TARGETDURATION` property in the HLS output. For example, use PT2S to indicate 2 seconds. This field cannot be set when `type` is set to `Encoding`. :param pulumi.Input[str] streaming_protocol: The input protocol for the live event. Allowed values are `FragmentedMP4` and `RTMP`. Changing this forces a new resource to be created. """ if access_token is not None: pulumi.set(__self__, "access_token", access_token) if endpoints is not None: pulumi.set(__self__, "endpoints", endpoints) if ip_access_control_allows is not None: pulumi.set(__self__, "ip_access_control_allows", ip_access_control_allows) if key_frame_interval_duration is not None: pulumi.set(__self__, "key_frame_interval_duration", key_frame_interval_duration) if streaming_protocol is not None: pulumi.set(__self__, "streaming_protocol", streaming_protocol) @property @pulumi.getter(name="accessToken") def access_token(self) -> Optional[pulumi.Input[str]]: """ A UUID in string form to uniquely identify the stream. If omitted, the service will generate a unique value. Changing this forces a new value to be created. """ return pulumi.get(self, "access_token") @access_token.setter def access_token(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "access_token", value) @property @pulumi.getter def endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LiveEventInputEndpointArgs']]]]: return pulumi.get(self, "endpoints") @endpoints.setter def endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LiveEventInputEndpointArgs']]]]): pulumi.set(self, "endpoints", value) @property @pulumi.getter(name="ipAccessControlAllows") def ip_access_control_allows(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LiveEventInputIpAccessControlAllowArgs']]]]: """ One or more `ip_access_control_allow` blocks as defined below. """ return pulumi.get(self, "ip_access_control_allows") @ip_access_control_allows.setter def ip_access_control_allows(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LiveEventInputIpAccessControlAllowArgs']]]]): pulumi.set(self, "ip_access_control_allows", value) @property @pulumi.getter(name="keyFrameIntervalDuration") def key_frame_interval_duration(self) -> Optional[pulumi.Input[str]]: """ ISO 8601 time duration of the key frame interval duration of the input. This value sets the `EXT-X-TARGETDURATION` property in the HLS output. For example, use PT2S to indicate 2 seconds. This field cannot be set when `type` is set to `Encoding`. """ return pulumi.get(self, "key_frame_interval_duration") @key_frame_interval_duration.setter def key_frame_interval_duration(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key_frame_interval_duration", value) @property @pulumi.getter(name="streamingProtocol") def streaming_protocol(self) -> Optional[pulumi.Input[str]]: """ The input protocol for the live event. Allowed values are `FragmentedMP4` and `RTMP`. Changing this forces a new resource to be created. """ return pulumi.get(self, "streaming_protocol") @streaming_protocol.setter def streaming_protocol(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "streaming_protocol", value) @pulumi.input_type class LiveEventInputEndpointArgs: def __init__(__self__, *, protocol: Optional[pulumi.Input[str]] = None, url: Optional[pulumi.Input[str]] = None): if protocol is not None: pulumi.set(__self__, "protocol", protocol) if url is not None: pulumi.set(__self__, "url", url) @property @pulumi.getter def protocol(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "protocol") @protocol.setter def protocol(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "protocol", value) @property @pulumi.getter def url(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "url") @url.setter def url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url", value) @pulumi.input_type class LiveEventInputIpAccessControlAllowArgs: def __init__(__self__, *, address: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, subnet_prefix_length: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[str] address: The IP address or CIDR range. :param pulumi.Input[str] name: The friendly name for the IP address range. :param pulumi.Input[int] subnet_prefix_length: The subnet mask prefix length (see CIDR notation). """ if address is not None: pulumi.set(__self__, "address", address) if name is not None: pulumi.set(__self__, "name", name) if subnet_prefix_length is not None: pulumi.set(__self__, "subnet_prefix_length", subnet_prefix_length) @property @pulumi.getter def address(self) -> Optional[pulumi.Input[str]]: """ The IP address or CIDR range. """ return pulumi.get(self, "address") @address.setter def address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "address", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The friendly name for the IP address range. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="subnetPrefixLength") def subnet_prefix_length(self) -> Optional[pulumi.Input[int]]: """ The subnet mask prefix length (see CIDR notation). """ return pulumi.get(self, "subnet_prefix_length") @subnet_prefix_length.setter def subnet_prefix_length(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "subnet_prefix_length", value) @pulumi.input_type class LiveEventPreviewArgs: def __init__(__self__, *, alternative_media_id: Optional[pulumi.Input[str]] = None, endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['LiveEventPreviewEndpointArgs']]]] = None, ip_access_control_allows: Optional[pulumi.Input[Sequence[pulumi.Input['LiveEventPreviewIpAccessControlAllowArgs']]]] = None, preview_locator: Optional[pulumi.Input[str]] = None, streaming_policy_name: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] alternative_media_id: An alternative media identifier associated with the streaming locator created for the preview. The identifier can be used in the `CustomLicenseAcquisitionUrlTemplate` or the `CustomKeyAcquisitionUrlTemplate` of the Streaming Policy specified in the `streaming_policy_name` field. Changing this forces a new resource to be created. :param pulumi.Input[Sequence[pulumi.Input['LiveEventPreviewIpAccessControlAllowArgs']]] ip_access_control_allows: One or more `ip_access_control_allow` blocks as defined above. :param pulumi.Input[str] preview_locator: The identifier of the preview locator in Guid format. Specifying this at creation time allows the caller to know the preview locator url before the event is created. If omitted, the service will generate a random identifier. Changing this forces a new resource to be created. :param pulumi.Input[str] streaming_policy_name: The name of streaming policy used for the live event preview. Changing this forces a new resource to be created. """ if alternative_media_id is not None: pulumi.set(__self__, "alternative_media_id", alternative_media_id) if endpoints is not None: pulumi.set(__self__, "endpoints", endpoints) if ip_access_control_allows is not None: pulumi.set(__self__, "ip_access_control_allows", ip_access_control_allows) if preview_locator is not None: pulumi.set(__self__, "preview_locator", preview_locator) if streaming_policy_name is not None: pulumi.set(__self__, "streaming_policy_name", streaming_policy_name) @property @pulumi.getter(name="alternativeMediaId") def alternative_media_id(self) -> Optional[pulumi.Input[str]]: """ An alternative media identifier associated with the streaming locator created for the preview. The identifier can be used in the `CustomLicenseAcquisitionUrlTemplate` or the `CustomKeyAcquisitionUrlTemplate` of the Streaming Policy specified in the `streaming_policy_name` field. Changing this forces a new resource to be created. """ return pulumi.get(self, "alternative_media_id") @alternative_media_id.setter def alternative_media_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "alternative_media_id", value) @property @pulumi.getter def endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LiveEventPreviewEndpointArgs']]]]: return pulumi.get(self, "endpoints") @endpoints.setter def endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LiveEventPreviewEndpointArgs']]]]): pulumi.set(self, "endpoints", value) @property @pulumi.getter(name="ipAccessControlAllows") def ip_access_control_allows(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LiveEventPreviewIpAccessControlAllowArgs']]]]: """ One or more `ip_access_control_allow` blocks as defined above. """ return pulumi.get(self, "ip_access_control_allows") @ip_access_control_allows.setter def ip_access_control_allows(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LiveEventPreviewIpAccessControlAllowArgs']]]]): pulumi.set(self, "ip_access_control_allows", value) @property @pulumi.getter(name="previewLocator") def preview_locator(self) -> Optional[pulumi.Input[str]]: """ The identifier of the preview locator in Guid format. Specifying this at creation time allows the caller to know the preview locator url before the event is created. If omitted, the service will generate a random identifier. Changing this forces a new resource to be created. """ return pulumi.get(self, "preview_locator") @preview_locator.setter def preview_locator(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "preview_locator", value) @property @pulumi.getter(name="streamingPolicyName") def streaming_policy_name(self) -> Optional[pulumi.Input[str]]: """ The name of streaming policy used for the live event preview. Changing this forces a new resource to be created. """ return pulumi.get(self, "streaming_policy_name") @streaming_policy_name.setter def streaming_policy_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "streaming_policy_name", value) @pulumi.input_type class LiveEventPreviewEndpointArgs: def __init__(__self__, *, protocol: Optional[pulumi.Input[str]] = None, url: Optional[pulumi.Input[str]] = None): if protocol is not None: pulumi.set(__self__, "protocol", protocol) if url is not None: pulumi.set(__self__, "url", url) @property @pulumi.getter def protocol(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "protocol") @protocol.setter def protocol(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "protocol", value) @property @pulumi.getter def url(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "url") @url.setter def url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url", value) @pulumi.input_type class LiveEventPreviewIpAccessControlAllowArgs: def __init__(__self__, *, address: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, subnet_prefix_length: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[str] address: The IP address or CIDR range. :param pulumi.Input[str] name: The friendly name for the IP address range. :param pulumi.Input[int] subnet_prefix_length: The subnet mask prefix length (see CIDR notation). """ if address is not None: pulumi.set(__self__, "address", address) if name is not None: pulumi.set(__self__, "name", name) if subnet_prefix_length is not None: pulumi.set(__self__, "subnet_prefix_length", subnet_prefix_length) @property @pulumi.getter def address(self) -> Optional[pulumi.Input[str]]: """ The IP address or CIDR range. """ return pulumi.get(self, "address") @address.setter def address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "address", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The friendly name for the IP address range. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="subnetPrefixLength") def subnet_prefix_length(self) -> Optional[pulumi.Input[int]]: """ The subnet mask prefix length (see CIDR notation). """ return pulumi.get(self, "subnet_prefix_length") @subnet_prefix_length.setter def subnet_prefix_length(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "subnet_prefix_length", value) @pulumi.input_type class ServiceAccountIdentityArgs: def __init__(__self__, *, type: pulumi.Input[str], principal_id: Optional[pulumi.Input[str]] = None, tenant_id: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] type: Specifies the type of Managed Service Identity that should be configured on this Media Services Account. Possible value is `SystemAssigned`. :param pulumi.Input[str] principal_id: The Principal ID associated with this Managed Service Identity. :param pulumi.Input[str] tenant_id: The Tenant ID associated with this Managed Service Identity. """ pulumi.set(__self__, "type", type) if principal_id is not None: pulumi.set(__self__, "principal_id", principal_id) if tenant_id is not None: pulumi.set(__self__, "tenant_id", tenant_id) @property @pulumi.getter def type(self) -> pulumi.Input[str]: """ Specifies the type of Managed Service Identity that should be configured on this Media Services Account. Possible value is `SystemAssigned`. """ return pulumi.get(self, "type") @type.setter def type(self, value: pulumi.Input[str]): pulumi.set(self, "type", value) @property @pulumi.getter(name="principalId") def principal_id(self) -> Optional[pulumi.Input[str]]: """ The Principal ID associated with this Managed Service Identity. """ return pulumi.get(self, "principal_id") @principal_id.setter def principal_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "principal_id", value) @property @pulumi.getter(name="tenantId") def tenant_id(self) -> Optional[pulumi.Input[str]]: """ The Tenant ID associated with this Managed Service Identity. """ return pulumi.get(self, "tenant_id") @tenant_id.setter def tenant_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "tenant_id", value) @pulumi.input_type class ServiceAccountKeyDeliveryAccessControlArgs: def __init__(__self__, *, default_action: Optional[pulumi.Input[str]] = None, ip_allow_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[str] default_action: The Default Action to use when no rules match from `ip_allow_list`. Possible values are `Allow` and `Deny`. :param pulumi.Input[Sequence[pulumi.Input[str]]] ip_allow_lists: One or more IP Addresses, or CIDR Blocks which should be able to access the Key Delivery. """ if default_action is not None: pulumi.set(__self__, "default_action", default_action) if ip_allow_lists is not None: pulumi.set(__self__, "ip_allow_lists", ip_allow_lists) @property @pulumi.getter(name="defaultAction") def default_action(self) -> Optional[pulumi.Input[str]]: """ The Default Action to use when no rules match from `ip_allow_list`. Possible values are `Allow` and `Deny`. """ return pulumi.get(self, "default_action") @default_action.setter def default_action(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "default_action", value) @property @pulumi.getter(name="ipAllowLists") def ip_allow_lists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ One or more IP Addresses, or CIDR Blocks which should be able to access the Key Delivery. """ return pulumi.get(self, "ip_allow_lists") @ip_allow_lists.setter def ip_allow_lists(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "ip_allow_lists", value) @pulumi.input_type class ServiceAccountStorageAccountArgs: def __init__(__self__, *, id: pulumi.Input[str], is_primary: Optional[pulumi.Input[bool]] = None): """ :param pulumi.Input[str] id: Specifies the ID of the Storage Account that will be associated with the Media Services instance. :param pulumi.Input[bool] is_primary: Specifies whether the storage account should be the primary account or not. Defaults to `false`. """ pulumi.set(__self__, "id", id) if is_primary is not None: pulumi.set(__self__, "is_primary", is_primary) @property @pulumi.getter def id(self) -> pulumi.Input[str]: """ Specifies the ID of the Storage Account that will be associated with the Media Services instance. """ return pulumi.get(self, "id") @id.setter def id(self, value: pulumi.Input[str]): pulumi.set(self, "id", value) @property @pulumi.getter(name="isPrimary") def is_primary(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether the storage account should be the primary account or not. Defaults to `false`. """ return pulumi.get(self, "is_primary") @is_primary.setter def is_primary(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "is_primary", value) @pulumi.input_type class StreamingEndpointAccessControlArgs: def __init__(__self__, *, akamai_signature_header_authentication_keys: Optional[pulumi.Input[Sequence[pulumi.Input['StreamingEndpointAccessControlAkamaiSignatureHeaderAuthenticationKeyArgs']]]] = None, ip_allows: Optional[pulumi.Input[Sequence[pulumi.Input['StreamingEndpointAccessControlIpAllowArgs']]]] = None): """ :param pulumi.Input[Sequence[pulumi.Input['StreamingEndpointAccessControlAkamaiSignatureHeaderAuthenticationKeyArgs']]] akamai_signature_header_authentication_keys: One or more `akamai_signature_header_authentication_key` blocks as defined below. :param pulumi.Input[Sequence[pulumi.Input['StreamingEndpointAccessControlIpAllowArgs']]] ip_allows: A `ip` block as defined below. """ if akamai_signature_header_authentication_keys is not None: pulumi.set(__self__, "akamai_signature_header_authentication_keys", akamai_signature_header_authentication_keys) if ip_allows is not None: pulumi.set(__self__, "ip_allows", ip_allows) @property @pulumi.getter(name="akamaiSignatureHeaderAuthenticationKeys") def akamai_signature_header_authentication_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['StreamingEndpointAccessControlAkamaiSignatureHeaderAuthenticationKeyArgs']]]]: """ One or more `akamai_signature_header_authentication_key` blocks as defined below. """ return pulumi.get(self, "akamai_signature_header_authentication_keys") @akamai_signature_header_authentication_keys.setter def akamai_signature_header_authentication_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['StreamingEndpointAccessControlAkamaiSignatureHeaderAuthenticationKeyArgs']]]]): pulumi.set(self, "akamai_signature_header_authentication_keys", value) @property @pulumi.getter(name="ipAllows") def ip_allows(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['StreamingEndpointAccessControlIpAllowArgs']]]]: """ A `ip` block as defined below. """ return pulumi.get(self, "ip_allows") @ip_allows.setter def ip_allows(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['StreamingEndpointAccessControlIpAllowArgs']]]]): pulumi.set(self, "ip_allows", value) @pulumi.input_type class StreamingEndpointAccessControlAkamaiSignatureHeaderAuthenticationKeyArgs: def __init__(__self__, *, base64_key: Optional[pulumi.Input[str]] = None, expiration: Optional[pulumi.Input[str]] = None, identifier: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] base64_key: Authentication key. :param pulumi.Input[str] expiration: The expiration time of the authentication key. :param pulumi.Input[str] identifier: Identifier of the key. """ if base64_key is not None: pulumi.set(__self__, "base64_key", base64_key) if expiration is not None: pulumi.set(__self__, "expiration", expiration) if identifier is not None: pulumi.set(__self__, "identifier", identifier) @property @pulumi.getter(name="base64Key") def base64_key(self) -> Optional[pulumi.Input[str]]: """ Authentication key. """ return pulumi.get(self, "base64_key") @base64_key.setter def base64_key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "base64_key", value) @property @pulumi.getter def expiration(self) -> Optional[pulumi.Input[str]]: """ The expiration time of the authentication key. """ return pulumi.get(self, "expiration") @expiration.setter def expiration(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "expiration", value) @property @pulumi.getter def identifier(self) -> Optional[pulumi.Input[str]]: """ Identifier of the key. """ return pulumi.get(self, "identifier") @identifier.setter def identifier(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "identifier", value) @pulumi.input_type class StreamingEndpointAccessControlIpAllowArgs: def __init__(__self__, *, address: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, subnet_prefix_length: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[str] address: The IP address to allow. :param pulumi.Input[str] name: The friendly name for the IP address range. :param pulumi.Input[int] subnet_prefix_length: The subnet mask prefix length (see CIDR notation). """ if address is not None: pulumi.set(__self__, "address", address) if name is not None: pulumi.set(__self__, "name", name) if subnet_prefix_length is not None: pulumi.set(__self__, "subnet_prefix_length", subnet_prefix_length) @property @pulumi.getter def address(self) -> Optional[pulumi.Input[str]]: """ The IP address to allow. """ return pulumi.get(self, "address") @address.setter def address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "address", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The friendly name for the IP address range. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="subnetPrefixLength") def subnet_prefix_length(self) -> Optional[pulumi.Input[int]]: """ The subnet mask prefix length (see CIDR notation). """ return pulumi.get(self, "subnet_prefix_length") @subnet_prefix_length.setter def subnet_prefix_length(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "subnet_prefix_length", value) @pulumi.input_type class StreamingEndpointCrossSiteAccessPolicyArgs: def __init__(__self__, *, client_access_policy: Optional[pulumi.Input[str]] = None, cross_domain_policy: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] client_access_policy: The content of clientaccesspolicy.xml used by Silverlight. :param pulumi.Input[str] cross_domain_policy: The content of crossdomain.xml used by Silverlight. """ if client_access_policy is not None: pulumi.set(__self__, "client_access_policy", client_access_policy) if cross_domain_policy is not None: pulumi.set(__self__, "cross_domain_policy", cross_domain_policy) @property @pulumi.getter(name="clientAccessPolicy") def client_access_policy(self) -> Optional[pulumi.Input[str]]: """ The content of clientaccesspolicy.xml used by Silverlight. """ return pulumi.get(self, "client_access_policy") @client_access_policy.setter def client_access_policy(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "client_access_policy", value) @property @pulumi.getter(name="crossDomainPolicy") def cross_domain_policy(self) -> Optional[pulumi.Input[str]]: """ The content of crossdomain.xml used by Silverlight. """ return pulumi.get(self, "cross_domain_policy") @cross_domain_policy.setter def cross_domain_policy(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "cross_domain_policy", value) @pulumi.input_type class StreamingLocatorContentKeyArgs: def __init__(__self__, *, content_key_id: Optional[pulumi.Input[str]] = None, label_reference_in_streaming_policy: Optional[pulumi.Input[str]] = None, policy_name: Optional[pulumi.Input[str]] = None, type: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] content_key_id: ID of Content Key. Changing this forces a new Streaming Locator to be created. :param pulumi.Input[str] label_reference_in_streaming_policy: Label of Content Key as specified in the Streaming Policy. Changing this forces a new Streaming Locator to be created. :param pulumi.Input[str] policy_name: Content Key Policy used by Content Key. Changing this forces a new Streaming Locator to be created. :param pulumi.Input[str] type: Encryption type of Content Key. Supported values are `CommonEncryptionCbcs`, `CommonEncryptionCenc` or `EnvelopeEncryption`. Changing this forces a new Streaming Locator to be created. :param pulumi.Input[str] value: Value of Content Key. Changing this forces a new Streaming Locator to be created. """ if content_key_id is not None: pulumi.set(__self__, "content_key_id", content_key_id) if label_reference_in_streaming_policy is not None: pulumi.set(__self__, "label_reference_in_streaming_policy", label_reference_in_streaming_policy) if policy_name is not None: pulumi.set(__self__, "policy_name", policy_name) if type is not None: pulumi.set(__self__, "type", type) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter(name="contentKeyId") def content_key_id(self) -> Optional[pulumi.Input[str]]: """ ID of Content Key. Changing this forces a new Streaming Locator to be created. """ return pulumi.get(self, "content_key_id") @content_key_id.setter def content_key_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "content_key_id", value) @property @pulumi.getter(name="labelReferenceInStreamingPolicy") def label_reference_in_streaming_policy(self) -> Optional[pulumi.Input[str]]: """ Label of Content Key as specified in the Streaming Policy. Changing this forces a new Streaming Locator to be created. """ return pulumi.get(self, "label_reference_in_streaming_policy") @label_reference_in_streaming_policy.setter def label_reference_in_streaming_policy(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "label_reference_in_streaming_policy", value) @property @pulumi.getter(name="policyName") def policy_name(self) -> Optional[pulumi.Input[str]]: """ Content Key Policy used by Content Key. Changing this forces a new Streaming Locator to be created. """ return pulumi.get(self, "policy_name") @policy_name.setter def policy_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "policy_name", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[str]]: """ Encryption type of Content Key. Supported values are `CommonEncryptionCbcs`, `CommonEncryptionCenc` or `EnvelopeEncryption`. Changing this forces a new Streaming Locator to be created. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "type", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: """ Value of Content Key. Changing this forces a new Streaming Locator to be created. """ return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) @pulumi.input_type class StreamingPolicyCommonEncryptionCbcsArgs: def __init__(__self__, *, default_content_key: Optional[pulumi.Input['StreamingPolicyCommonEncryptionCbcsDefaultContentKeyArgs']] = None, drm_fairplay: Optional[pulumi.Input['StreamingPolicyCommonEncryptionCbcsDrmFairplayArgs']] = None, enabled_protocols: Optional[pulumi.Input['StreamingPolicyCommonEncryptionCbcsEnabledProtocolsArgs']] = None): """ :param pulumi.Input['StreamingPolicyCommonEncryptionCbcsDefaultContentKeyArgs'] default_content_key: A `default_content_key` block as defined below. Changing this forces a new Streaming Policy to be created. :param pulumi.Input['StreamingPolicyCommonEncryptionCbcsDrmFairplayArgs'] drm_fairplay: A `drm_fairplay` block as defined below. Changing this forces a new Streaming Policy to be created. :param pulumi.Input['StreamingPolicyCommonEncryptionCbcsEnabledProtocolsArgs'] enabled_protocols: A `enabled_protocols` block as defined below. Changing this forces a new Streaming Policy to be created. """ if default_content_key is not None: pulumi.set(__self__, "default_content_key", default_content_key) if drm_fairplay is not None: pulumi.set(__self__, "drm_fairplay", drm_fairplay) if enabled_protocols is not None: pulumi.set(__self__, "enabled_protocols", enabled_protocols) @property @pulumi.getter(name="defaultContentKey") def default_content_key(self) -> Optional[pulumi.Input['StreamingPolicyCommonEncryptionCbcsDefaultContentKeyArgs']]: """ A `default_content_key` block as defined below. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "default_content_key") @default_content_key.setter def default_content_key(self, value: Optional[pulumi.Input['StreamingPolicyCommonEncryptionCbcsDefaultContentKeyArgs']]): pulumi.set(self, "default_content_key", value) @property @pulumi.getter(name="drmFairplay") def drm_fairplay(self) -> Optional[pulumi.Input['StreamingPolicyCommonEncryptionCbcsDrmFairplayArgs']]: """ A `drm_fairplay` block as defined below. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "drm_fairplay") @drm_fairplay.setter def drm_fairplay(self, value: Optional[pulumi.Input['StreamingPolicyCommonEncryptionCbcsDrmFairplayArgs']]): pulumi.set(self, "drm_fairplay", value) @property @pulumi.getter(name="enabledProtocols") def enabled_protocols(self) -> Optional[pulumi.Input['StreamingPolicyCommonEncryptionCbcsEnabledProtocolsArgs']]: """ A `enabled_protocols` block as defined below. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "enabled_protocols") @enabled_protocols.setter def enabled_protocols(self, value: Optional[pulumi.Input['StreamingPolicyCommonEncryptionCbcsEnabledProtocolsArgs']]): pulumi.set(self, "enabled_protocols", value) @pulumi.input_type class StreamingPolicyCommonEncryptionCbcsDefaultContentKeyArgs: def __init__(__self__, *, label: Optional[pulumi.Input[str]] = None, policy_name: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] label: Label can be used to specify Content Key when creating a Streaming Locator. Changing this forces a new Streaming Policy to be created. :param pulumi.Input[str] policy_name: Policy used by Default Key. Changing this forces a new Streaming Policy to be created. """ if label is not None: pulumi.set(__self__, "label", label) if policy_name is not None: pulumi.set(__self__, "policy_name", policy_name) @property @pulumi.getter def label(self) -> Optional[pulumi.Input[str]]: """ Label can be used to specify Content Key when creating a Streaming Locator. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "label") @label.setter def label(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "label", value) @property @pulumi.getter(name="policyName") def policy_name(self) -> Optional[pulumi.Input[str]]: """ Policy used by Default Key. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "policy_name") @policy_name.setter def policy_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "policy_name", value) @pulumi.input_type class StreamingPolicyCommonEncryptionCbcsDrmFairplayArgs: def __init__(__self__, *, allow_persistent_license: Optional[pulumi.Input[bool]] = None, custom_license_acquisition_url_template: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[bool] allow_persistent_license: All license to be persistent or not. Changing this forces a new Streaming Policy to be created. :param pulumi.Input[str] custom_license_acquisition_url_template: Template for the URL of the custom service delivering licenses to end user players. Not required when using Azure Media Services for issuing licenses. The template supports replaceable tokens that the service will update at runtime with the value specific to the request. The currently supported token values are `{AlternativeMediaId}`, which is replaced with the value of `StreamingLocatorId.AlternativeMediaId`, and `{ContentKeyId}`, which is replaced with the value of identifier of the key being requested. Changing this forces a new Streaming Policy to be created. """ if allow_persistent_license is not None: pulumi.set(__self__, "allow_persistent_license", allow_persistent_license) if custom_license_acquisition_url_template is not None: pulumi.set(__self__, "custom_license_acquisition_url_template", custom_license_acquisition_url_template) @property @pulumi.getter(name="allowPersistentLicense") def allow_persistent_license(self) -> Optional[pulumi.Input[bool]]: """ All license to be persistent or not. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "allow_persistent_license") @allow_persistent_license.setter def allow_persistent_license(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "allow_persistent_license", value) @property @pulumi.getter(name="customLicenseAcquisitionUrlTemplate") def custom_license_acquisition_url_template(self) -> Optional[pulumi.Input[str]]: """ Template for the URL of the custom service delivering licenses to end user players. Not required when using Azure Media Services for issuing licenses. The template supports replaceable tokens that the service will update at runtime with the value specific to the request. The currently supported token values are `{AlternativeMediaId}`, which is replaced with the value of `StreamingLocatorId.AlternativeMediaId`, and `{ContentKeyId}`, which is replaced with the value of identifier of the key being requested. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "custom_license_acquisition_url_template") @custom_license_acquisition_url_template.setter def custom_license_acquisition_url_template(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "custom_license_acquisition_url_template", value) @pulumi.input_type class StreamingPolicyCommonEncryptionCbcsEnabledProtocolsArgs: def __init__(__self__, *, dash: Optional[pulumi.Input[bool]] = None, download: Optional[pulumi.Input[bool]] = None, hls: Optional[pulumi.Input[bool]] = None, smooth_streaming: Optional[pulumi.Input[bool]] = None): """ :param pulumi.Input[bool] dash: Enable DASH protocol or not. Changing this forces a new Streaming Policy to be created. :param pulumi.Input[bool] download: Enable Download protocol or not. Changing this forces a new Streaming Policy to be created. :param pulumi.Input[bool] hls: Enable HLS protocol or not. Changing this forces a new Streaming Policy to be created. :param pulumi.Input[bool] smooth_streaming: Enable SmoothStreaming protocol or not. Changing this forces a new Streaming Policy to be created. """ if dash is not None: pulumi.set(__self__, "dash", dash) if download is not None: pulumi.set(__self__, "download", download) if hls is not None: pulumi.set(__self__, "hls", hls) if smooth_streaming is not None: pulumi.set(__self__, "smooth_streaming", smooth_streaming) @property @pulumi.getter def dash(self) -> Optional[pulumi.Input[bool]]: """ Enable DASH protocol or not. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "dash") @dash.setter def dash(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "dash", value) @property @pulumi.getter def download(self) -> Optional[pulumi.Input[bool]]: """ Enable Download protocol or not. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "download") @download.setter def download(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "download", value) @property @pulumi.getter def hls(self) -> Optional[pulumi.Input[bool]]: """ Enable HLS protocol or not. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "hls") @hls.setter def hls(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "hls", value) @property @pulumi.getter(name="smoothStreaming") def smooth_streaming(self) -> Optional[pulumi.Input[bool]]: """ Enable SmoothStreaming protocol or not. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "smooth_streaming") @smooth_streaming.setter def smooth_streaming(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "smooth_streaming", value) @pulumi.input_type class StreamingPolicyCommonEncryptionCencArgs: def __init__(__self__, *, default_content_key: Optional[pulumi.Input['StreamingPolicyCommonEncryptionCencDefaultContentKeyArgs']] = None, drm_playready: Optional[pulumi.Input['StreamingPolicyCommonEncryptionCencDrmPlayreadyArgs']] = None, drm_widevine_custom_license_acquisition_url_template: Optional[pulumi.Input[str]] = None, enabled_protocols: Optional[pulumi.Input['StreamingPolicyCommonEncryptionCencEnabledProtocolsArgs']] = None): """ :param pulumi.Input['StreamingPolicyCommonEncryptionCencDefaultContentKeyArgs'] default_content_key: A `default_content_key` block as defined below. Changing this forces a new Streaming Policy to be created. :param pulumi.Input['StreamingPolicyCommonEncryptionCencDrmPlayreadyArgs'] drm_playready: A `drm_playready` block as defined below. Changing this forces a new Streaming Policy to be created. :param pulumi.Input[str] drm_widevine_custom_license_acquisition_url_template: Template for the URL of the custom service delivering licenses to end user players. Not required when using Azure Media Services for issuing licenses. The template supports replaceable tokens that the service will update at runtime with the value specific to the request. The currently supported token values are `{AlternativeMediaId}`, which is replaced with the value of `StreamingLocatorId.AlternativeMediaId`, and `{ContentKeyId}`, which is replaced with the value of identifier of the key being requested. Changing this forces a new Streaming Policy to be created. :param pulumi.Input['StreamingPolicyCommonEncryptionCencEnabledProtocolsArgs'] enabled_protocols: A `enabled_protocols` block as defined below. Changing this forces a new Streaming Policy to be created. """ if default_content_key is not None: pulumi.set(__self__, "default_content_key", default_content_key) if drm_playready is not None: pulumi.set(__self__, "drm_playready", drm_playready) if drm_widevine_custom_license_acquisition_url_template is not None: pulumi.set(__self__, "drm_widevine_custom_license_acquisition_url_template", drm_widevine_custom_license_acquisition_url_template) if enabled_protocols is not None: pulumi.set(__self__, "enabled_protocols", enabled_protocols) @property @pulumi.getter(name="defaultContentKey") def default_content_key(self) -> Optional[pulumi.Input['StreamingPolicyCommonEncryptionCencDefaultContentKeyArgs']]: """ A `default_content_key` block as defined below. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "default_content_key") @default_content_key.setter def default_content_key(self, value: Optional[pulumi.Input['StreamingPolicyCommonEncryptionCencDefaultContentKeyArgs']]): pulumi.set(self, "default_content_key", value) @property @pulumi.getter(name="drmPlayready") def drm_playready(self) -> Optional[pulumi.Input['StreamingPolicyCommonEncryptionCencDrmPlayreadyArgs']]: """ A `drm_playready` block as defined below. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "drm_playready") @drm_playready.setter def drm_playready(self, value: Optional[pulumi.Input['StreamingPolicyCommonEncryptionCencDrmPlayreadyArgs']]): pulumi.set(self, "drm_playready", value) @property @pulumi.getter(name="drmWidevineCustomLicenseAcquisitionUrlTemplate") def drm_widevine_custom_license_acquisition_url_template(self) -> Optional[pulumi.Input[str]]: """ Template for the URL of the custom service delivering licenses to end user players. Not required when using Azure Media Services for issuing licenses. The template supports replaceable tokens that the service will update at runtime with the value specific to the request. The currently supported token values are `{AlternativeMediaId}`, which is replaced with the value of `StreamingLocatorId.AlternativeMediaId`, and `{ContentKeyId}`, which is replaced with the value of identifier of the key being requested. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "drm_widevine_custom_license_acquisition_url_template") @drm_widevine_custom_license_acquisition_url_template.setter def drm_widevine_custom_license_acquisition_url_template(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "drm_widevine_custom_license_acquisition_url_template", value) @property @pulumi.getter(name="enabledProtocols") def enabled_protocols(self) -> Optional[pulumi.Input['StreamingPolicyCommonEncryptionCencEnabledProtocolsArgs']]: """ A `enabled_protocols` block as defined below. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "enabled_protocols") @enabled_protocols.setter def enabled_protocols(self, value: Optional[pulumi.Input['StreamingPolicyCommonEncryptionCencEnabledProtocolsArgs']]): pulumi.set(self, "enabled_protocols", value) @pulumi.input_type class StreamingPolicyCommonEncryptionCencDefaultContentKeyArgs: def __init__(__self__, *, label: Optional[pulumi.Input[str]] = None, policy_name: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] label: Label can be used to specify Content Key when creating a Streaming Locator. Changing this forces a new Streaming Policy to be created. :param pulumi.Input[str] policy_name: Policy used by Default Key. Changing this forces a new Streaming Policy to be created. """ if label is not None: pulumi.set(__self__, "label", label) if policy_name is not None: pulumi.set(__self__, "policy_name", policy_name) @property @pulumi.getter def label(self) -> Optional[pulumi.Input[str]]: """ Label can be used to specify Content Key when creating a Streaming Locator. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "label") @label.setter def label(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "label", value) @property @pulumi.getter(name="policyName") def policy_name(self) -> Optional[pulumi.Input[str]]: """ Policy used by Default Key. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "policy_name") @policy_name.setter def policy_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "policy_name", value) @pulumi.input_type class StreamingPolicyCommonEncryptionCencDrmPlayreadyArgs: def __init__(__self__, *, custom_attributes: Optional[pulumi.Input[str]] = None, custom_license_acquisition_url_template: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] custom_attributes: Custom attributes for PlayReady. Changing this forces a new Streaming Policy to be created. :param pulumi.Input[str] custom_license_acquisition_url_template: Template for the URL of the custom service delivering licenses to end user players. Not required when using Azure Media Services for issuing licenses. The template supports replaceable tokens that the service will update at runtime with the value specific to the request. The currently supported token values are `{AlternativeMediaId}`, which is replaced with the value of `StreamingLocatorId.AlternativeMediaId`, and `{ContentKeyId}`, which is replaced with the value of identifier of the key being requested. Changing this forces a new Streaming Policy to be created. """ if custom_attributes is not None: pulumi.set(__self__, "custom_attributes", custom_attributes) if custom_license_acquisition_url_template is not None: pulumi.set(__self__, "custom_license_acquisition_url_template", custom_license_acquisition_url_template) @property @pulumi.getter(name="customAttributes") def custom_attributes(self) -> Optional[pulumi.Input[str]]: """ Custom attributes for PlayReady. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "custom_attributes") @custom_attributes.setter def custom_attributes(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "custom_attributes", value) @property @pulumi.getter(name="customLicenseAcquisitionUrlTemplate") def custom_license_acquisition_url_template(self) -> Optional[pulumi.Input[str]]: """ Template for the URL of the custom service delivering licenses to end user players. Not required when using Azure Media Services for issuing licenses. The template supports replaceable tokens that the service will update at runtime with the value specific to the request. The currently supported token values are `{AlternativeMediaId}`, which is replaced with the value of `StreamingLocatorId.AlternativeMediaId`, and `{ContentKeyId}`, which is replaced with the value of identifier of the key being requested. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "custom_license_acquisition_url_template") @custom_license_acquisition_url_template.setter def custom_license_acquisition_url_template(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "custom_license_acquisition_url_template", value) @pulumi.input_type class StreamingPolicyCommonEncryptionCencEnabledProtocolsArgs: def __init__(__self__, *, dash: Optional[pulumi.Input[bool]] = None, download: Optional[pulumi.Input[bool]] = None, hls: Optional[pulumi.Input[bool]] = None, smooth_streaming: Optional[pulumi.Input[bool]] = None): """ :param pulumi.Input[bool] dash: Enable DASH protocol or not. Changing this forces a new Streaming Policy to be created. :param pulumi.Input[bool] download: Enable Download protocol or not. Changing this forces a new Streaming Policy to be created. :param pulumi.Input[bool] hls: Enable HLS protocol or not. Changing this forces a new Streaming Policy to be created. :param pulumi.Input[bool] smooth_streaming: Enable SmoothStreaming protocol or not. Changing this forces a new Streaming Policy to be created. """ if dash is not None: pulumi.set(__self__, "dash", dash) if download is not None: pulumi.set(__self__, "download", download) if hls is not None: pulumi.set(__self__, "hls", hls) if smooth_streaming is not None: pulumi.set(__self__, "smooth_streaming", smooth_streaming) @property @pulumi.getter def dash(self) -> Optional[pulumi.Input[bool]]: """ Enable DASH protocol or not. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "dash") @dash.setter def dash(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "dash", value) @property @pulumi.getter def download(self) -> Optional[pulumi.Input[bool]]: """ Enable Download protocol or not. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "download") @download.setter def download(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "download", value) @property @pulumi.getter def hls(self) -> Optional[pulumi.Input[bool]]: """ Enable HLS protocol or not. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "hls") @hls.setter def hls(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "hls", value) @property @pulumi.getter(name="smoothStreaming") def smooth_streaming(self) -> Optional[pulumi.Input[bool]]: """ Enable SmoothStreaming protocol or not. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "smooth_streaming") @smooth_streaming.setter def smooth_streaming(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "smooth_streaming", value) @pulumi.input_type class StreamingPolicyNoEncryptionEnabledProtocolsArgs: def __init__(__self__, *, dash: Optional[pulumi.Input[bool]] = None, download: Optional[pulumi.Input[bool]] = None, hls: Optional[pulumi.Input[bool]] = None, smooth_streaming: Optional[pulumi.Input[bool]] = None): """ :param pulumi.Input[bool] dash: Enable DASH protocol or not. Changing this forces a new Streaming Policy to be created. :param pulumi.Input[bool] download: Enable Download protocol or not. Changing this forces a new Streaming Policy to be created. :param pulumi.Input[bool] hls: Enable HLS protocol or not. Changing this forces a new Streaming Policy to be created. :param pulumi.Input[bool] smooth_streaming: Enable SmoothStreaming protocol or not. Changing this forces a new Streaming Policy to be created. """ if dash is not None: pulumi.set(__self__, "dash", dash) if download is not None: pulumi.set(__self__, "download", download) if hls is not None: pulumi.set(__self__, "hls", hls) if smooth_streaming is not None: pulumi.set(__self__, "smooth_streaming", smooth_streaming) @property @pulumi.getter def dash(self) -> Optional[pulumi.Input[bool]]: """ Enable DASH protocol or not. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "dash") @dash.setter def dash(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "dash", value) @property @pulumi.getter def download(self) -> Optional[pulumi.Input[bool]]: """ Enable Download protocol or not. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "download") @download.setter def download(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "download", value) @property @pulumi.getter def hls(self) -> Optional[pulumi.Input[bool]]: """ Enable HLS protocol or not. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "hls") @hls.setter def hls(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "hls", value) @property @pulumi.getter(name="smoothStreaming") def smooth_streaming(self) -> Optional[pulumi.Input[bool]]: """ Enable SmoothStreaming protocol or not. Changing this forces a new Streaming Policy to be created. """ return pulumi.get(self, "smooth_streaming") @smooth_streaming.setter def smooth_streaming(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "smooth_streaming", value) @pulumi.input_type class TransformOutputArgs: def __init__(__self__, *, audio_analyzer_preset: Optional[pulumi.Input['TransformOutputAudioAnalyzerPresetArgs']] = None, builtin_preset: Optional[pulumi.Input['TransformOutputBuiltinPresetArgs']] = None, face_detector_preset: Optional[pulumi.Input['TransformOutputFaceDetectorPresetArgs']] = None, on_error_action: Optional[pulumi.Input[str]] = None, relative_priority: Optional[pulumi.Input[str]] = None, video_analyzer_preset: Optional[pulumi.Input['TransformOutputVideoAnalyzerPresetArgs']] = None): """ :param pulumi.Input['TransformOutputAudioAnalyzerPresetArgs'] audio_analyzer_preset: A `audio_analyzer_preset` block as defined below. :param pulumi.Input['TransformOutputBuiltinPresetArgs'] builtin_preset: A `builtin_preset` block as defined below. :param pulumi.Input['TransformOutputFaceDetectorPresetArgs'] face_detector_preset: A `face_detector_preset` block as defined below. :param pulumi.Input[str] on_error_action: A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with `ContinueJob`. Possibles value are `StopProcessingJob` or `ContinueJob`. :param pulumi.Input[str] relative_priority: Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possibles value are `High`, `Normal` or `Low`. :param pulumi.Input['TransformOutputVideoAnalyzerPresetArgs'] video_analyzer_preset: A `video_analyzer_preset` block as defined below. """ if audio_analyzer_preset is not None: pulumi.set(__self__, "audio_analyzer_preset", audio_analyzer_preset) if builtin_preset is not None: pulumi.set(__self__, "builtin_preset", builtin_preset) if face_detector_preset is not None: pulumi.set(__self__, "face_detector_preset", face_detector_preset) if on_error_action is not None: pulumi.set(__self__, "on_error_action", on_error_action) if relative_priority is not None: pulumi.set(__self__, "relative_priority", relative_priority) if video_analyzer_preset is not None: pulumi.set(__self__, "video_analyzer_preset", video_analyzer_preset) @property @pulumi.getter(name="audioAnalyzerPreset") def audio_analyzer_preset(self) -> Optional[pulumi.Input['TransformOutputAudioAnalyzerPresetArgs']]: """ A `audio_analyzer_preset` block as defined below. """ return pulumi.get(self, "audio_analyzer_preset") @audio_analyzer_preset.setter def audio_analyzer_preset(self, value: Optional[pulumi.Input['TransformOutputAudioAnalyzerPresetArgs']]): pulumi.set(self, "audio_analyzer_preset", value) @property @pulumi.getter(name="builtinPreset") def builtin_preset(self) -> Optional[pulumi.Input['TransformOutputBuiltinPresetArgs']]: """ A `builtin_preset` block as defined below. """ return pulumi.get(self, "builtin_preset") @builtin_preset.setter def builtin_preset(self, value: Optional[pulumi.Input['TransformOutputBuiltinPresetArgs']]): pulumi.set(self, "builtin_preset", value) @property @pulumi.getter(name="faceDetectorPreset") def face_detector_preset(self) -> Optional[pulumi.Input['TransformOutputFaceDetectorPresetArgs']]: """ A `face_detector_preset` block as defined below. """ return pulumi.get(self, "face_detector_preset") @face_detector_preset.setter def face_detector_preset(self, value: Optional[pulumi.Input['TransformOutputFaceDetectorPresetArgs']]): pulumi.set(self, "face_detector_preset", value) @property @pulumi.getter(name="onErrorAction") def on_error_action(self) -> Optional[pulumi.Input[str]]: """ A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with `ContinueJob`. Possibles value are `StopProcessingJob` or `ContinueJob`. """ return pulumi.get(self, "on_error_action") @on_error_action.setter def on_error_action(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "on_error_action", value) @property @pulumi.getter(name="relativePriority") def relative_priority(self) -> Optional[pulumi.Input[str]]: """ Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possibles value are `High`, `Normal` or `Low`. """ return pulumi.get(self, "relative_priority") @relative_priority.setter def relative_priority(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "relative_priority", value) @property @pulumi.getter(name="videoAnalyzerPreset") def video_analyzer_preset(self) -> Optional[pulumi.Input['TransformOutputVideoAnalyzerPresetArgs']]: """ A `video_analyzer_preset` block as defined below. """ return pulumi.get(self, "video_analyzer_preset") @video_analyzer_preset.setter def video_analyzer_preset(self, value: Optional[pulumi.Input['TransformOutputVideoAnalyzerPresetArgs']]): pulumi.set(self, "video_analyzer_preset", value) @pulumi.input_type class TransformOutputAudioAnalyzerPresetArgs: def __init__(__self__, *, audio_analysis_mode: Optional[pulumi.Input[str]] = None, audio_language: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] audio_analysis_mode: Possibles value are `Basic` or `Standard`. Determines the set of audio analysis operations to be performed. :param pulumi.Input[str] audio_language: The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. """ if audio_analysis_mode is not None: pulumi.set(__self__, "audio_analysis_mode", audio_analysis_mode) if audio_language is not None: pulumi.set(__self__, "audio_language", audio_language) @property @pulumi.getter(name="audioAnalysisMode") def audio_analysis_mode(self) -> Optional[pulumi.Input[str]]: """ Possibles value are `Basic` or `Standard`. Determines the set of audio analysis operations to be performed. """ return pulumi.get(self, "audio_analysis_mode") @audio_analysis_mode.setter def audio_analysis_mode(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "audio_analysis_mode", value) @property @pulumi.getter(name="audioLanguage") def audio_language(self) -> Optional[pulumi.Input[str]]: """ The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. """ return pulumi.get(self, "audio_language") @audio_language.setter def audio_language(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "audio_language", value) @pulumi.input_type class TransformOutputBuiltinPresetArgs: def __init__(__self__, *, preset_name: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] preset_name: The built-in preset to be used for encoding videos. The allowed values are `AACGoodQualityAudio`, `AdaptiveStreaming`,`ContentAwareEncoding`, `ContentAwareEncodingExperimental`,`CopyAllBitrateNonInterleaved`, `H264MultipleBitrate1080p`,`H264MultipleBitrate720p`, `H264MultipleBitrateSD`,`H264SingleBitrate1080p`, `H264SingleBitrate720p` and `H264SingleBitrateSD`. """ if preset_name is not None: pulumi.set(__self__, "preset_name", preset_name) @property @pulumi.getter(name="presetName") def preset_name(self) -> Optional[pulumi.Input[str]]: """ The built-in preset to be used for encoding videos. The allowed values are `AACGoodQualityAudio`, `AdaptiveStreaming`,`ContentAwareEncoding`, `ContentAwareEncodingExperimental`,`CopyAllBitrateNonInterleaved`, `H264MultipleBitrate1080p`,`H264MultipleBitrate720p`, `H264MultipleBitrateSD`,`H264SingleBitrate1080p`, `H264SingleBitrate720p` and `H264SingleBitrateSD`. """ return pulumi.get(self, "preset_name") @preset_name.setter def preset_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "preset_name", value) @pulumi.input_type class TransformOutputFaceDetectorPresetArgs: def __init__(__self__, *, analysis_resolution: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] analysis_resolution: Possibles value are `SourceResolution` or `StandardDefinition`. Specifies the maximum resolution at which your video is analyzed. The default behavior is `SourceResolution` which will keep the input video at its original resolution when analyzed. Using `StandardDefinition` will resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to `StandardDefinition` will reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected. """ if analysis_resolution is not None: pulumi.set(__self__, "analysis_resolution", analysis_resolution) @property @pulumi.getter(name="analysisResolution") def analysis_resolution(self) -> Optional[pulumi.Input[str]]: """ Possibles value are `SourceResolution` or `StandardDefinition`. Specifies the maximum resolution at which your video is analyzed. The default behavior is `SourceResolution` which will keep the input video at its original resolution when analyzed. Using `StandardDefinition` will resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to `StandardDefinition` will reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected. """ return pulumi.get(self, "analysis_resolution") @analysis_resolution.setter def analysis_resolution(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "analysis_resolution", value) @pulumi.input_type class TransformOutputVideoAnalyzerPresetArgs: def __init__(__self__, *, audio_analysis_mode: Optional[pulumi.Input[str]] = None, audio_language: Optional[pulumi.Input[str]] = None, insights_type: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] audio_analysis_mode: Possibles value are `Basic` or `Standard`. Determines the set of audio analysis operations to be performed. :param pulumi.Input[str] audio_language: The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. :param pulumi.Input[str] insights_type: Defines the type of insights that you want the service to generate. The allowed values are `AudioInsightsOnly`, `VideoInsightsOnly`, and `AllInsights`. If you set this to `AllInsights` and the input is audio only, then only audio insights are generated. Similarly if the input is video only, then only video insights are generated. It is recommended that you not use `AudioInsightsOnly` if you expect some of your inputs to be video only; or use `VideoInsightsOnly` if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out. """ if audio_analysis_mode is not None: pulumi.set(__self__, "audio_analysis_mode", audio_analysis_mode) if audio_language is not None: pulumi.set(__self__, "audio_language", audio_language) if insights_type is not None: pulumi.set(__self__, "insights_type", insights_type) @property @pulumi.getter(name="audioAnalysisMode") def audio_analysis_mode(self) -> Optional[pulumi.Input[str]]: """ Possibles value are `Basic` or `Standard`. Determines the set of audio analysis operations to be performed. """ return pulumi.get(self, "audio_analysis_mode") @audio_analysis_mode.setter def audio_analysis_mode(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "audio_analysis_mode", value) @property @pulumi.getter(name="audioLanguage") def audio_language(self) -> Optional[pulumi.Input[str]]: """ The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. """ return pulumi.get(self, "audio_language") @audio_language.setter def audio_language(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "audio_language", value) @property @pulumi.getter(name="insightsType") def insights_type(self) -> Optional[pulumi.Input[str]]: """ Defines the type of insights that you want the service to generate. The allowed values are `AudioInsightsOnly`, `VideoInsightsOnly`, and `AllInsights`. If you set this to `AllInsights` and the input is audio only, then only audio insights are generated. Similarly if the input is video only, then only video insights are generated. It is recommended that you not use `AudioInsightsOnly` if you expect some of your inputs to be video only; or use `VideoInsightsOnly` if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out. """ return pulumi.get(self, "insights_type") @insights_type.setter def insights_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "insights_type", value)
52.801838
941
0.708309
acefa5581cfcdf7795c2f0d65df0b3af8d41dba2
1,321
py
Python
setup.py
Jan200101/unixreg
103d2bfcb764fa601f9032cb73cc59cbc3e145c6
[ "MIT" ]
null
null
null
setup.py
Jan200101/unixreg
103d2bfcb764fa601f9032cb73cc59cbc3e145c6
[ "MIT" ]
null
null
null
setup.py
Jan200101/unixreg
103d2bfcb764fa601f9032cb73cc59cbc3e145c6
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import setuptools import re requirements = [] with open('requirements.txt') as f: requirements = f.read().splitlines() version = None with open('unixreg/__init__.py') as f: version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1) if not version: raise RuntimeError('version is not set') long_description = "" with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="unixreg", version=version, author="Jan Drögehoff", author_email="jandroegehoff@gmail.com", description="winreg implementation for non NT systems", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/Jan200101/unixreg", packages=["unixreg"], license="MIT", install_requires=requirements, include_package_data=True, classifiers=[ "Development Status :: 3 - Alpha", "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: POSIX", "Operating System :: POSIX :: BSD", "Operating System :: POSIX :: Linux", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed" ], python_requires=">=3.6" )
25.901961
99
0.637396
acefa77a0a2cf2f2f21668ba398fa6083d45671f
105
py
Python
targetedtherapy/apps.py
fahimfarhan/cancer-web-app
6c5d8c5c90b0909cbd161d2ae87b4f12549bdfef
[ "MIT" ]
null
null
null
targetedtherapy/apps.py
fahimfarhan/cancer-web-app
6c5d8c5c90b0909cbd161d2ae87b4f12549bdfef
[ "MIT" ]
5
2021-03-18T20:13:38.000Z
2022-01-13T00:35:37.000Z
targetedtherapy/apps.py
fahimfarhan/cancer-web-app
6c5d8c5c90b0909cbd161d2ae87b4f12549bdfef
[ "MIT" ]
null
null
null
from django.apps import AppConfig class TargetedtherapyConfig(AppConfig): name = 'targetedtherapy'
17.5
39
0.790476
acefa80fd388e2ffc17aeee76fd77e76031fc86f
12,454
py
Python
blades/seafile/scripts/build/build-server-src.py
krattai/AEBL
a7b12c97479e1236d5370166b15ca9f29d7d4265
[ "BSD-2-Clause" ]
4
2016-04-26T03:43:54.000Z
2016-11-17T08:09:04.000Z
blades/seafile/scripts/build/build-server-src.py
krattai/AEBL
a7b12c97479e1236d5370166b15ca9f29d7d4265
[ "BSD-2-Clause" ]
17
2015-01-05T21:06:22.000Z
2015-12-07T20:45:44.000Z
blades/seafile/scripts/build/build-server-src.py
krattai/AEBL
a7b12c97479e1236d5370166b15ca9f29d7d4265
[ "BSD-2-Clause" ]
3
2016-04-26T03:43:55.000Z
2020-11-06T11:02:08.000Z
#!/usr/bin/env python # coding: UTF-8 '''This scirpt builds the seafile debian source tarball. In this tarball, libsearpc and ccnet is also included. ''' import sys #################### ### Requires Python 2.6+ #################### if sys.version_info[0] == 3: print 'Python 3 not supported yet. Quit now.' sys.exit(1) if sys.version_info[1] < 6: print 'Python 2.6 or above is required. Quit now.' sys.exit(1) import os import tempfile import glob import shutil import re import subprocess import optparse import atexit #################### ### Global variables #################### # command line configuartion conf = {} # key names in the conf dictionary. CONF_VERSION = 'version' CONF_LIBSEARPC_VERSION = 'libsearpc_version' CONF_CCNET_VERSION = 'ccnet_version' CONF_SEAFILE_VERSION = 'seafile_version' CONF_SRCDIR = 'srcdir' CONF_KEEP = 'keep' CONF_BUILDDIR = 'builddir' CONF_OUTPUTDIR = 'outputdir' #################### ### Common helper functions #################### def highlight(content, is_error=False): '''Add ANSI color to content to get it highlighted on terminal''' if is_error: return '\x1b[1;31m%s\x1b[m' % content else: return '\x1b[1;32m%s\x1b[m' % content def info(msg): print highlight('[INFO] ') + msg def exist_in_path(prog): '''Test whether prog exists in system path''' dirs = os.environ['PATH'].split(':') for d in dirs: if d == '': continue path = os.path.join(d, prog) if os.path.exists(path): return True return False def error(msg=None, usage=None): if msg: print highlight('[ERROR] ') + msg if usage: print usage sys.exit(1) def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False): '''Run a program and wait it to finish, and return its exit code. The standard output of this program is supressed. ''' info('running %s, cwd=%s' % (' '.join(argv), cwd if cwd else os.getcwd())) with open(os.devnull, 'w') as devnull: if suppress_stdout: stdout = devnull else: stdout = sys.stdout if suppress_stderr: stderr = devnull else: stderr = sys.stderr proc = subprocess.Popen(argv, cwd=cwd, stdout=stdout, stderr=stderr, env=env) return proc.wait() def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False): '''Like run_argv but specify a command line string instead of argv''' with open(os.devnull, 'w') as devnull: if suppress_stdout: stdout = devnull else: stdout = sys.stdout if suppress_stderr: stderr = devnull else: stderr = sys.stderr proc = subprocess.Popen(cmdline, cwd=cwd, stdout=stdout, stderr=stderr, env=env, shell=True) return proc.wait() def must_mkdir(path): '''Create a directory, exit on failure''' try: os.mkdir(path) except OSError, e: error('failed to create directory %s:%s' % (path, e)) def must_copy(src, dst): '''Copy src to dst, exit on failure''' try: shutil.copy(src, dst) except Exception, e: error('failed to copy %s to %s: %s' % (src, dst, e)) def check_targz_src(proj, version, srcdir): src_tarball = os.path.join(srcdir, '%s-%s.tar.gz' % (proj, version)) if not os.path.exists(src_tarball): error('%s not exists' % src_tarball) def remove_unused_files(): srcdir = os.path.join(conf[CONF_BUILDDIR], 'seafile-%s' % conf[CONF_VERSION]) web_sh_files = glob.glob(os.path.join(srcdir, 'web', '*.sh')) files = [ os.path.join(srcdir, 'web', 'pygettext.py'), ] files.extend(web_sh_files) for f in files: run('rm -f %s' % f) def gen_tarball(): output = os.path.join(conf[CONF_OUTPUTDIR], 'seafile-server-latest.tar.gz') dirname = 'seafile-%s' % conf[CONF_VERSION] ignored_patterns = [ # windows msvc dlls os.path.join(dirname, 'msi', 'bin*'), ] excludes_list = [ '--exclude=%s' % pattern for pattern in ignored_patterns ] argv = [ 'tar', 'czvf', output, dirname, ] argv.append(*excludes_list) if run_argv(argv) != 0: error('failed to gen %s' % output) print '---------------------------------------------' print 'The build is successfully. Output is:\t%s' % output print '---------------------------------------------' def uncompress_seafile(): src = os.path.join(conf[CONF_BUILDDIR], 'seafile-%s' % conf[CONF_SEAFILE_VERSION]) dst = os.path.join(conf[CONF_BUILDDIR], 'seafile-%s' % conf[CONF_VERSION]) if os.path.exists(src): error('dir %s already exists' % src) if os.path.exists(dst): error('dir %s already exists' % dst) tarball = os.path.join(conf[CONF_SRCDIR], 'seafile-%s.tar.gz' % conf[CONF_SEAFILE_VERSION]) argv = [ 'tar', 'xf', tarball, '-C', conf[CONF_BUILDDIR], ] if run_argv(argv) != 0: error('failed to uncompress seafile') if conf[CONF_VERSION] != conf[CONF_SEAFILE_VERSION]: shutil.move(src, dst) def uncompress_libsearpc(): tarball = os.path.join(conf[CONF_SRCDIR], 'libsearpc-%s.tar.gz' % conf[CONF_LIBSEARPC_VERSION]) dst_dir = os.path.join(conf[CONF_BUILDDIR], 'seafile-%s' % conf[CONF_VERSION], 'libsearpc') must_mkdir(dst_dir) argv = [ 'tar', 'xf', tarball, '--strip-components=1', '-C', dst_dir, ] if run_argv(argv) != 0: error('failed to uncompress libsearpc') def uncompress_ccnet(): tarball = os.path.join(conf[CONF_SRCDIR], 'ccnet-%s.tar.gz' % conf[CONF_CCNET_VERSION]) dst_dir = os.path.join(conf[CONF_BUILDDIR], 'seafile-%s' % conf[CONF_VERSION], 'ccnet') must_mkdir(dst_dir) argv = [ 'tar', 'xf', tarball, '--strip-components=1', '-C', dst_dir, ] if run_argv(argv) != 0: error('failed to uncompress ccnet') def remove_debian_subdir(): debian_subdir = os.path.join(conf[CONF_BUILDDIR], 'seafile-%s' % conf[CONF_VERSION], 'debian') argv = [ 'rm', '-rf', debian_subdir ] if run_argv(argv) != 0: error('failed to uncompress ccnet') def parse_args(): parser = optparse.OptionParser() def long_opt(opt): return '--' + opt parser.add_option(long_opt(CONF_VERSION), dest=CONF_VERSION, nargs=1, help='the version of seafile source. Must be digits delimited by dots, like 1.3.0') parser.add_option(long_opt(CONF_SEAFILE_VERSION), dest=CONF_SEAFILE_VERSION, nargs=1, help='the version of seafile. Must be digits delimited by dots, like 1.3.0') parser.add_option(long_opt(CONF_LIBSEARPC_VERSION), dest=CONF_LIBSEARPC_VERSION, nargs=1, help='the version of libsearpc as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0') parser.add_option(long_opt(CONF_CCNET_VERSION), dest=CONF_CCNET_VERSION, nargs=1, help='the version of ccnet as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0') parser.add_option(long_opt(CONF_BUILDDIR), dest=CONF_BUILDDIR, nargs=1, help='the directory to build the source. Defaults to /tmp', default=tempfile.gettempdir()) parser.add_option(long_opt(CONF_OUTPUTDIR), dest=CONF_OUTPUTDIR, nargs=1, help='the output directory to put the generated server tarball. Defaults to the current directory.', default=os.getcwd()) parser.add_option(long_opt(CONF_SRCDIR), dest=CONF_SRCDIR, nargs=1, help='''Source tarballs must be placed in this directory.''') parser.add_option(long_opt(CONF_KEEP), dest=CONF_KEEP, action='store_true', help='''keep the build directory after the script exits. By default, the script would delete the build directory at exit.''') usage = parser.format_help() options, remain = parser.parse_args() if remain: error(usage=usage) validate_args(usage, options) def validate_args(usage, options): required_args = [ CONF_VERSION, CONF_SEAFILE_VERSION, CONF_LIBSEARPC_VERSION, CONF_CCNET_VERSION, CONF_SRCDIR, ] # fist check required args for optname in required_args: if getattr(options, optname, None) == None: error('%s must be specified' % optname, usage=usage) def get_option(optname): return getattr(options, optname) # [ version ] def check_project_version(version): '''A valid version must be like 1.2.2, 1.3''' if not re.match('^[0-9](\.[0-9])+$', version): error('%s is not a valid version' % version, usage=usage) version = get_option(CONF_VERSION) libsearpc_version = get_option(CONF_LIBSEARPC_VERSION) ccnet_version = get_option(CONF_CCNET_VERSION) seafile_version = get_option(CONF_SEAFILE_VERSION) check_project_version(version) check_project_version(libsearpc_version) check_project_version(ccnet_version) check_project_version(seafile_version) # [ srcdir ] srcdir = get_option(CONF_SRCDIR) check_targz_src('libsearpc', libsearpc_version, srcdir) check_targz_src('ccnet', ccnet_version, srcdir) check_targz_src('seafile', seafile_version, srcdir) # [ builddir ] builddir = get_option(CONF_BUILDDIR) if not os.path.exists(builddir): error('%s does not exist' % builddir, usage=usage) builddir = os.path.join(builddir, 'seafile-deb-src') # [ outputdir ] outputdir = get_option(CONF_OUTPUTDIR) if not os.path.exists(outputdir): error('outputdir %s does not exist' % outputdir, usage=usage) # [ keep ] keep = get_option(CONF_KEEP) conf[CONF_VERSION] = version conf[CONF_LIBSEARPC_VERSION] = libsearpc_version conf[CONF_CCNET_VERSION] = ccnet_version conf[CONF_SEAFILE_VERSION] = seafile_version conf[CONF_BUILDDIR] = builddir conf[CONF_SRCDIR] = srcdir conf[CONF_OUTPUTDIR] = outputdir conf[CONF_KEEP] = keep prepare_builddir(builddir) show_build_info() def prepare_builddir(builddir): must_mkdir(builddir) if not conf[CONF_KEEP]: def remove_builddir(): '''Remove the builddir when exit''' info('remove builddir before exit') shutil.rmtree(builddir, ignore_errors=True) atexit.register(remove_builddir) os.chdir(builddir) def show_build_info(): '''Print all conf information. Confirm before continue.''' info('------------------------------------------') info('Seafile debian source tarball %s:' % conf[CONF_VERSION]) info('------------------------------------------') info('seafile: %s' % conf[CONF_SEAFILE_VERSION]) info('ccnet: %s' % conf[CONF_CCNET_VERSION]) info('libsearpc: %s' % conf[CONF_LIBSEARPC_VERSION]) info('builddir: %s' % conf[CONF_BUILDDIR]) info('outputdir: %s' % conf[CONF_OUTPUTDIR]) info('source dir: %s' % conf[CONF_SRCDIR]) info('clean on exit: %s' % (not conf[CONF_KEEP])) info('------------------------------------------') info('press any key to continue ') info('------------------------------------------') dummy = raw_input() def main(): parse_args() uncompress_seafile() uncompress_libsearpc() uncompress_ccnet() remove_debian_subdir() remove_unused_files() gen_tarball() if __name__ == '__main__': main()
31.370277
147
0.580938
acefa870eda3d6190f768600d541a204779cd681
1,498
py
Python
dashboard/views.py
vincentclaes/babybuddy
183a30b0a3ca5e43c74dd40d11b58b3f4f02241c
[ "BSD-2-Clause" ]
null
null
null
dashboard/views.py
vincentclaes/babybuddy
183a30b0a3ca5e43c74dd40d11b58b3f4f02241c
[ "BSD-2-Clause" ]
5
2020-06-05T16:49:14.000Z
2022-01-13T00:34:19.000Z
dashboard/views.py
niti15/baby
74c6dab8243a9b69c41e76d1025b5592969bfc20
[ "BSD-2-Clause" ]
1
2021-07-08T17:59:24.000Z
2021-07-08T17:59:24.000Z
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.contrib.auth.mixins import (LoginRequiredMixin, PermissionRequiredMixin) from django.http import HttpResponseRedirect from django.urls import reverse from django.views.generic.base import TemplateView from django.views.generic.detail import DetailView from core.models import Child class Dashboard(LoginRequiredMixin, TemplateView): # TODO: Use .card-deck in this template once BS4 is finalized. template_name = 'dashboard/dashboard.html' # Show the overall dashboard or a child dashboard if one Child instance. def get(self, request, *args, **kwargs): children = Child.objects.count() if children == 0: return HttpResponseRedirect(reverse('babybuddy:welcome')) elif children == 1: return HttpResponseRedirect( reverse( 'dashboard:dashboard-child', args={Child.objects.first().slug} ) ) return super(Dashboard, self).get(request, *args, **kwargs) def get_context_data(self, **kwargs): context = super(Dashboard, self).get_context_data(**kwargs) context['objects'] = Child.objects.all().order_by('last_name') return context class ChildDashboard(PermissionRequiredMixin, DetailView): model = Child permission_required = ('core.view_child',) template_name = 'dashboard/child.html'
35.666667
76
0.664219
acefa8b238bf2ffb4373d391b2f518435a07bc16
2,187
py
Python
Python36-32/Scripts/player.py
tur103/social-network
41573b10500bd6f453c49fd51c1db787c3997f40
[ "MIT" ]
2
2017-05-05T13:45:35.000Z
2017-05-05T19:20:49.000Z
Python36-32/Scripts/player.py
tur103/social-network
41573b10500bd6f453c49fd51c1db787c3997f40
[ "MIT" ]
null
null
null
Python36-32/Scripts/player.py
tur103/social-network
41573b10500bd6f453c49fd51c1db787c3997f40
[ "MIT" ]
null
null
null
#!C:\work\Python36-32\python.exe # # The Python Imaging Library # $Id$ # from __future__ import print_function try: from tkinter import * except ImportError: from Tkinter import * from PIL import Image, ImageTk import sys # -------------------------------------------------------------------- # an image animation player class UI(Label): def __init__(self, master, im): if isinstance(im, list): # list of images self.im = im[1:] im = self.im[0] else: # sequence self.im = im if im.mode == "1": self.image = ImageTk.BitmapImage(im, foreground="white") else: self.image = ImageTk.PhotoImage(im) Label.__init__(self, master, image=self.image, bg="black", bd=0) self.update() try: duration = im.info["duration"] except KeyError: duration = 100 self.after(duration, self.next) def next(self): if isinstance(self.im, list): try: im = self.im[0] del self.im[0] self.image.paste(im) except IndexError: return # end of list else: try: im = self.im im.seek(im.tell() + 1) self.image.paste(im) except EOFError: return # end of file try: duration = im.info["duration"] except KeyError: duration = 100 self.after(duration, self.next) self.update_idletasks() # -------------------------------------------------------------------- # script interface if __name__ == "__main__": if not sys.argv[1:]: print("Syntax: python player.py imagefile(s)") sys.exit(1) filename = sys.argv[1] root = Tk() root.title(filename) if len(sys.argv) > 2: # list of images print("loading...") im = [] for filename in sys.argv[1:]: im.append(Image.open(filename)) else: # sequence im = Image.open(filename) UI(root, im).pack() root.mainloop()
21.23301
72
0.480567
acefa9b27716e52fba085fbec7ddda6f179ff320
6,234
py
Python
Python/Chittle_Ben_FSE 2019-2020/0.2.4/randomGen3.py
benchittle/misc
a5332f3696d36609154713bdc5e6367b73fdb8be
[ "MIT" ]
null
null
null
Python/Chittle_Ben_FSE 2019-2020/0.2.4/randomGen3.py
benchittle/misc
a5332f3696d36609154713bdc5e6367b73fdb8be
[ "MIT" ]
null
null
null
Python/Chittle_Ben_FSE 2019-2020/0.2.4/randomGen3.py
benchittle/misc
a5332f3696d36609154713bdc5e6367b73fdb8be
[ "MIT" ]
null
null
null
import random import pygame as pg import config, environment class Sub: def __init__(self, game, pos, max_width, height): self.game = game self.pos = pg.Vector2(pos) self.max_w = max_width self.h = height self.grid = [[None] * self.max_w for i in range(self.h)] for row in range(self.h): for col in range(self.max_w): self.grid[row][col] = random.choices([Tile(self, row, col), None], weights=[3, 1])[0] self.build() def __str__(self): string = "" for row in self.iter_rows(): for tile in row: string += "|{:^14}|".format(str(tile)) string += "\n" + "-" * len(row) * 16 + "\n" return string def adjacent_tiles(self, row, col): tiles = {} coords = { "n" : (row - 1, col), "e" : (row, col + 1), "s" : (row + 1, col), "w" : (row, col - 1) } for direction, (r, c) in coords.items(): if 0 <= r < self.h and 0 <= c < self.max_w: ### ACCOUNT FOR THIS tiles[direction] = self.grid[r][c] else: tiles[direction] = None return tiles def find_by_tags(self, *tags): tiles = [] for tile in self.iter_tiles(): if tile.tags.issuperset(tags): tiles.append(tile) return tiles def iter_columns(self): for col in zip(*self.grid): yield col def iter_rows(self): for row in self.grid: yield row def iter_tiles(self): """Iterate over the tiles in the grid by row.""" for row in self.iter_rows(): for tile in row: yield tile def build(self): for tile in self.iter_tiles(): if tile is not None: tile.build() class Tile: def __init__(self, container, row, col, *tags): self.game = container.game self.container = container self.row = row self.col = col self.pos = self.container.pos + pg.Vector2(self.col, self.row) * config.TILE_SIZE self.rect = pg.Rect(self.pos, (config.TILE_SIZE, config.TILE_SIZE)) self.tags = set(tags) self.entrances = set() self.room = random.choice([Corridor, Room])(self) def build(self): for loc, tile in self.adjacent_tiles.items(): if tile is not None: self.entrances.add(loc) self.room.build() @property def adjacent_tiles(self): return self.container.adjacent_tiles(self.row, self.col) class Room(pg.sprite.Group): def __init__(self, tile): super().__init__() self.game = tile.game self.tile = tile # Use this as keys for sides @property def placements(self): return { "n" : self.tile.pos, "e" : self.tile.pos + (config.TILE_SIZE, 0) - (config.WALL_WIDTH, 0), "s" : self.tile.pos + (0, config.TILE_SIZE) - (0, config.WALL_WIDTH), "w" : self.tile.pos } @property def sides(self): return { "n" : environment.HWallDoor if "n" in self.tile.entrances else environment.HWall, "e" : environment.VWallDoor if "e" in self.tile.entrances else environment.VWall, "s" : environment.HWallDoor if "s" in self.tile.entrances else environment.HWall, "w" : environment.VWallDoor if "w" in self.tile.entrances else environment.VWall } def build(self): for loc, kind in self.sides.items(): kind(self.game, self.placements[loc], groups=(self, self.game.walls)) class OpenRoom(pg.sprite.Group): def __init__(self, tile): super().__init__() self.game = tile.game self.tile = tile # Use this as keys for sides @property def placements(self): return { "n" : self.tile.pos, "e" : self.tile.pos + (config.TILE_SIZE, 0) - (config.WALL_WIDTH, 0), "s" : self.tile.pos + (0, config.TILE_SIZE) - (0, config.WALL_WIDTH), "w" : self.tile.pos } @property def sides(self): return { "n" : environment.HWallDoor if "n" in self.tile.entrances else environment.HWall, "e" : environment.VWallDoor if "e" in self.tile.entrances else environment.VWall, "s" : environment.HWallDoor if "s" in self.tile.entrances else environment.HWall, "w" : environment.VWallDoor if "w" in self.tile.entrances else environment.VWall } def build(self): for loc, kind in self.sides.items(): if type(self.tile.adjacent_tiles[loc].room) != OpenRoom: kind(self.game, self.placements[loc], groups=(self, self.game.walls)) class Corridor(pg.sprite.Group): def __init__(self, tile): super().__init__() self.game = tile.game self.tile = tile # Use this as keys for sides @property def placements(self): return { "n" : self.tile.pos + (config.TILE_SIZE // 2, (config.TILE_SIZE - config.DOOR_SIZE) // 2), "e" : self.tile.pos + ((config.TILE_SIZE + config.DOOR_SIZE) // 2, config.TILE_SIZE // 2), "s" : self.tile.pos + (config.TILE_SIZE // 2, (config.TILE_SIZE + config.DOOR_SIZE) // 2), "w" : self.tile.pos + ((config.TILE_SIZE - config.DOOR_SIZE) // 2, config.TILE_SIZE // 2) } @property def sides(self): return { "n" : environment.VCorr if "n" in self.tile.entrances else environment.VCorrDoor, "e" : environment.HCorr if "e" in self.tile.entrances else environment.HCorrDoor, "s" : environment.VCorr if "s" in self.tile.entrances else environment.VCorrDoor, "w" : environment.HCorr if "w" in self.tile.entrances else environment.HCorrDoor } def build(self): for loc, kind in self.sides.items(): anchor = "mid" + self.game.OPPOSITES[self.game.DIRECTIONS[loc]] kind(self.game, self.placements[loc], anchor=anchor, groups=(self, self.game.walls))
30.409756
102
0.555502
acefaa6ccad79827d9f70e9097d6525eacdc4d7d
44,058
py
Python
tensorflow_constrained_optimization/python/rates/multiclass_rates_test.py
RMKruse/tensorflow_constrained_optimization
e7c3c3deec305fb91193e7b063062e11b7398e5f
[ "Apache-2.0" ]
276
2019-01-24T19:49:49.000Z
2022-03-29T15:50:58.000Z
tensorflow_constrained_optimization/python/rates/multiclass_rates_test.py
RMKruse/tensorflow_constrained_optimization
e7c3c3deec305fb91193e7b063062e11b7398e5f
[ "Apache-2.0" ]
13
2019-04-29T00:25:36.000Z
2022-03-18T23:13:16.000Z
tensorflow_constrained_optimization/python/rates/multiclass_rates_test.py
RMKruse/tensorflow_constrained_optimization
e7c3c3deec305fb91193e7b063062e11b7398e5f
[ "Apache-2.0" ]
51
2019-01-26T00:45:01.000Z
2022-02-16T11:38:33.000Z
# Copyright 2018 The TensorFlow Constrained Optimization Authors. All Rights # Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. # ============================================================================== """Tests for multiclass_rates.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf from tensorflow_constrained_optimization.python import graph_and_eager_test_case from tensorflow_constrained_optimization.python.rates import defaults from tensorflow_constrained_optimization.python.rates import deferred_tensor from tensorflow_constrained_optimization.python.rates import multiclass_rates from tensorflow_constrained_optimization.python.rates import subsettable_context # Placeholder for internal import. def zero_one_loss(weights, predictions): shape = np.shape(weights) if np.shape(predictions) != shape: raise ValueError("weights and predictions must have the same shapes") if len(shape) != 2: raise ValueError("weights and predictions must be two-dimensional") if shape != np.shape(predictions): raise ValueError("weights and predictions must be have the same shape") num_examples, num_classes = shape result = [] for ii in xrange(num_examples): best_score = -float("Inf") best_count = 0.0 for jj in xrange(num_classes): if predictions[ii, jj] > best_score: best_score = predictions[ii, jj] best_count = 1.0 elif predictions[ii, jj] == best_score: best_count += 1.0 count = 0.0 for jj in xrange(num_classes): if predictions[ii, jj] >= best_score: count += weights[ii, jj] result.append(count / best_count) return np.array(result) def hinge_loss(weights, predictions): shape = np.shape(weights) if np.shape(predictions) != shape: raise ValueError("weights and predictions must have the same shapes") if len(shape) != 2: raise ValueError("weights and predictions must be two-dimensional") if shape != np.shape(predictions): raise ValueError("weights and predictions must be have the same shape") num_examples, num_classes = shape weights_permutation = np.argsort(weights, axis=1) result = [] for ii in xrange(num_examples): total_weighted_hinge = 0.0 excluded_indices = set() included_indices = set(range(num_classes)) for jj in xrange(num_classes - 1): included_indices.remove(weights_permutation[ii, jj]) excluded_indices.add(weights_permutation[ii, jj]) included_max = -float("Inf") for kk in included_indices: included_max = max(included_max, predictions[ii, kk]) excluded_mean = 0.0 for kk in excluded_indices: excluded_mean += predictions[ii, kk] excluded_mean /= len(excluded_indices) delta_weight = ( weights[ii, weights_permutation[ii, jj + 1]] - weights[ii, weights_permutation[ii, jj]]) total_weighted_hinge += delta_weight * max( 0, 1.0 + included_max - excluded_mean) value = weights[ii, weights_permutation[ii, 0]] + total_weighted_hinge result.append(value) return np.array(result) # @run_all_tests_in_graph_and_eager_modes class MulticlassRatesTest(graph_and_eager_test_case.GraphAndEagerTestCase): """Tests for binary classification rate-constructing functions.""" def __init__(self, *args, **kwargs): super(MulticlassRatesTest, self).__init__(*args, **kwargs) self._penalty_size = 12 self._constraint_size = 8 self._num_classes = 4 # We use a fixed fake dataset to make sure that the tests are reproducible. # The code for generating this random dataset is: # # self._penalty_predictions = np.random.randn( # self._penalty_size, self._num_classes) # self._penalty_labels = np.random.randint( # 0, self._num_classes, size=self._penalty_size) # self._penalty_weights = np.random.rand(self._penalty_size) # self._penalty_predicate = np.random.choice( # [False, True], size=self._penalty_size) # # self._constraint_predictions = np.random.randn( # self._constraint_size, self._num_classes) # self._constraint_labels = np.random.randint( # 0, self._num_classes, size=self._constraint_size) # self._constraint_weights = np.random.rand(self._constraint_size) # self._constraint_predicate = np.random.choice( # [False, True], size=self._constraint_size) # # The dataset itself is: self._penalty_predictions = np.array( [[-1.5970997, 1.877267, -0.66030723, -0.01463978], [-1.87999382, -0.00305018, 2.42472298, 0.05893705], [-0.11031741, 0.82471499, 0.9340874, 0.09632045], [0.49282407, 0.57338305, 0.40928707, -0.61865314], [-0.7886149, 0.94948278, 1.96216129, 0.20474539], [0.72704683, -1.6208753, -0.31098981, -2.16005564], [-0.67164428, 0.37699518, 1.24978421, -0.87508569], [0.67631863, -0.15639794, -0.43874642, 0.43672745], [-0.34359654, -1.41637908, -0.36718105, -0.36349423], [-0.55319159, -0.08677386, 0.86685222, 1.19394724], [0.64423552, 0.13959498, -1.25362601, 0.40450444], [-0.84070832, 1.34938865, -0.63288385, 0.07019597]]) self._penalty_labels = np.array([0, 2, 3, 0, 3, 0, 3, 1, 0, 3, 1, 3]) self._penalty_weights = np.array([ 0.76566858, 0.61112134, 0.09629605, 0.48397956, 0.43083251, 0.54200695, 0.91410649, 0.22486834, 0.29674182, 0.62188739, 0.43582355, 0.73587001 ]) self._penalty_predicate = np.array([ False, True, False, False, True, False, True, True, False, False, True, True ]) self._constraint_predictions = np.array( [[-1.02284955, 2.19879824, 1.01087809, 1.2813714], [-0.2746204, -1.1608573, 0.08607241, -1.78127669], [-0.96669923, 0.66164043, -0.88072148, 2.3059222], [1.84892764, 0.23774778, 1.59575183, -0.55435492], [0.79456944, -1.31367073, -0.82844754, 1.05074885], [0.36645997, -1.31130601, 1.29792815, 0.52346038], [-1.55433413, -1.92272332, -1.26217317, 0.41987784], [0.02946888, 0.69755685, -0.22851259, -1.20193645]]) self._constraint_labels = np.array([0, 3, 1, 3, 2, 2, 0, 3]) self._constraint_weights = np.array([ 0.90018779, 0.08155366, 0.87932082, 0.45599071, 0.03253726, 0.35871828, 0.74693516, 0.03862526 ]) self._constraint_predicate = np.array( [False, True, True, False, True, True, True, True]) @property def _split_context(self): """Creates a new split and subsetted context.""" # We can't create the context in __init__, since it would then wind up in # the wrong TensorFlow graph. penalty_predictions = tf.constant( self._penalty_predictions, dtype=tf.float32) constraint_predictions = tf.constant( self._constraint_predictions, dtype=tf.float32) penalty_labels = tf.one_hot(self._penalty_labels, depth=self._num_classes) constraint_labels = tf.one_hot( self._constraint_labels, depth=self._num_classes) penalty_weights = tf.constant(self._penalty_weights, dtype=tf.float32) constraint_weights = tf.constant(self._constraint_weights, dtype=tf.float32) context = subsettable_context.multiclass_split_rate_context( num_classes=self._num_classes, penalty_predictions=lambda: penalty_predictions, constraint_predictions=lambda: constraint_predictions, penalty_labels=lambda: penalty_labels, constraint_labels=lambda: constraint_labels, penalty_weights=lambda: penalty_weights, constraint_weights=lambda: constraint_weights) return context.subset(self._penalty_predicate, self._constraint_predicate) # FUTURE WORK: this is identical to the corresponding function in # binary_rates_test.py. Maybe put this in some common place? def _check_rates(self, expected_penalty_value, expected_constraint_value, actual_expression): structure_memoizer = { defaults.DENOMINATOR_LOWER_BOUND_KEY: 0.0, defaults.GLOBAL_STEP_KEY: tf.Variable(0, dtype=tf.int32), defaults.VARIABLE_FN_KEY: tf.Variable } actual_penalty_value = actual_expression.penalty_expression.evaluate( structure_memoizer) actual_constraint_value = actual_expression.constraint_expression.evaluate( structure_memoizer) # We need to explicitly create the variables before creating the wrapped # session. variables = deferred_tensor.DeferredVariableList( actual_penalty_value.variables + actual_constraint_value.variables).list for variable in variables: variable.create(structure_memoizer) def update_ops_fn(): update_ops = [] for variable in variables: update_ops += variable.update_ops(structure_memoizer) return update_ops with self.wrapped_session() as session: # We only need to run the update ops once, since the entire dataset is # contained within the Tensors, so the denominators will be correct. session.run_ops(update_ops_fn) self.assertAllClose( expected_penalty_value, session.run(actual_penalty_value(structure_memoizer)), rtol=0, atol=1e-6) self.assertAllClose( expected_constraint_value, session.run(actual_constraint_value(structure_memoizer)), rtol=0, atol=1e-6) def test_positive_prediction_rate(self): """Checks `positive_prediction_rate`.""" positive_class = 0 penalty_weights = np.zeros((self._penalty_size, self._num_classes)) penalty_weights[:, positive_class] = 1.0 # For the penalty, the default loss is hinge. penalty_losses = hinge_loss(penalty_weights, self._penalty_predictions) expected_penalty_numerator = np.sum(penalty_losses * self._penalty_weights * self._penalty_predicate) expected_penalty_denominator = np.sum(self._penalty_weights * self._penalty_predicate) expected_penalty_value = ( expected_penalty_numerator / expected_penalty_denominator) constraint_weights = np.zeros((self._constraint_size, self._num_classes)) constraint_weights[:, positive_class] = 1.0 # For the constraint, the default loss is zero-one. constraint_losses = zero_one_loss(constraint_weights, self._constraint_predictions) expected_constraint_numerator = np.sum(constraint_losses * self._constraint_weights * self._constraint_predicate) expected_constraint_denominator = np.sum(self._constraint_weights * self._constraint_predicate) expected_constraint_value = ( expected_constraint_numerator / expected_constraint_denominator) actual_expression = multiclass_rates.positive_prediction_rate( self._split_context, positive_class=positive_class) self._check_rates(expected_penalty_value, expected_constraint_value, actual_expression) def test_negative_prediction_rate(self): """Checks `negative_prediction_rate`.""" positive_class = 2 penalty_weights = np.ones((self._penalty_size, self._num_classes)) penalty_weights[:, positive_class] = 0.0 # For the penalty, the default loss is hinge. penalty_losses = hinge_loss(penalty_weights, self._penalty_predictions) expected_penalty_numerator = np.sum(penalty_losses * self._penalty_weights * self._penalty_predicate) expected_penalty_denominator = np.sum(self._penalty_weights * self._penalty_predicate) expected_penalty_value = ( expected_penalty_numerator / expected_penalty_denominator) constraint_weights = np.ones((self._constraint_size, self._num_classes)) constraint_weights[:, positive_class] = 0.0 # For the constraint, the default loss is zero-one. constraint_losses = zero_one_loss(constraint_weights, self._constraint_predictions) expected_constraint_numerator = np.sum(constraint_losses * self._constraint_weights * self._constraint_predicate) expected_constraint_denominator = np.sum(self._constraint_weights * self._constraint_predicate) expected_constraint_value = ( expected_constraint_numerator / expected_constraint_denominator) actual_expression = multiclass_rates.negative_prediction_rate( self._split_context, positive_class=positive_class) self._check_rates(expected_penalty_value, expected_constraint_value, actual_expression) def test_error_rate(self): """Checks `error_rate`.""" penalty_weights = np.ones((self._penalty_size, self._num_classes)) for ii in xrange(self._penalty_size): penalty_weights[ii, self._penalty_labels[ii]] = 0.0 # For the penalty, the default loss is hinge. penalty_losses = hinge_loss(penalty_weights, self._penalty_predictions) expected_penalty_numerator = np.sum(penalty_losses * self._penalty_weights * self._penalty_predicate) expected_penalty_denominator = np.sum(self._penalty_weights * self._penalty_predicate) expected_penalty_value = ( expected_penalty_numerator / expected_penalty_denominator) constraint_weights = np.ones((self._constraint_size, self._num_classes)) for ii in xrange(self._constraint_size): constraint_weights[ii, self._constraint_labels[ii]] = 0.0 # For the constraint, the default loss is zero-one. constraint_losses = zero_one_loss(constraint_weights, self._constraint_predictions) expected_constraint_numerator = np.sum(constraint_losses * self._constraint_weights * self._constraint_predicate) expected_constraint_denominator = np.sum(self._constraint_weights * self._constraint_predicate) expected_constraint_value = ( expected_constraint_numerator / expected_constraint_denominator) actual_expression = multiclass_rates.error_rate(self._split_context) self._check_rates(expected_penalty_value, expected_constraint_value, actual_expression) def test_accuracy_rate(self): """Checks `accuracy_rate`.""" penalty_weights = np.zeros((self._penalty_size, self._num_classes)) for ii in xrange(self._penalty_size): penalty_weights[ii, self._penalty_labels[ii]] = 1.0 # For the penalty, the default loss is hinge. penalty_losses = hinge_loss(penalty_weights, self._penalty_predictions) expected_penalty_numerator = np.sum(penalty_losses * self._penalty_weights * self._penalty_predicate) expected_penalty_denominator = np.sum(self._penalty_weights * self._penalty_predicate) expected_penalty_value = ( expected_penalty_numerator / expected_penalty_denominator) constraint_weights = np.zeros((self._constraint_size, self._num_classes)) for ii in xrange(self._constraint_size): constraint_weights[ii, self._constraint_labels[ii]] = 1.0 # For the constraint, the default loss is zero-one. constraint_losses = zero_one_loss(constraint_weights, self._constraint_predictions) expected_constraint_numerator = np.sum(constraint_losses * self._constraint_weights * self._constraint_predicate) expected_constraint_denominator = np.sum(self._constraint_weights * self._constraint_predicate) expected_constraint_value = ( expected_constraint_numerator / expected_constraint_denominator) actual_expression = multiclass_rates.accuracy_rate(self._split_context) self._check_rates(expected_penalty_value, expected_constraint_value, actual_expression) def test_true_positive_rate(self): """Checks `true_positive_rate`.""" positive_class = [True, False, False, True] class_weights = np.array(positive_class, dtype=np.float32) penalty_label_weights = np.zeros(self._penalty_size) for ii in xrange(self._penalty_size): if positive_class[self._penalty_labels[ii]]: penalty_label_weights[ii] = 1.0 penalty_weights = np.tensordot(penalty_label_weights, class_weights, axes=0) # For the penalty, the default loss is hinge. penalty_losses = hinge_loss(penalty_weights, self._penalty_predictions) expected_penalty_numerator = np.sum(penalty_losses * penalty_label_weights * self._penalty_weights * self._penalty_predicate) expected_penalty_denominator = np.sum( penalty_label_weights * self._penalty_weights * self._penalty_predicate) expected_penalty_value = ( expected_penalty_numerator / expected_penalty_denominator) constraint_label_weights = np.zeros(self._constraint_size) for ii in xrange(self._constraint_size): if positive_class[self._constraint_labels[ii]]: constraint_label_weights[ii] = 1.0 constraint_weights = np.tensordot( constraint_label_weights, class_weights, axes=0) # For the constraint, the default loss is zero-one. constraint_losses = zero_one_loss(constraint_weights, self._constraint_predictions) expected_constraint_numerator = np.sum( constraint_losses * constraint_label_weights * self._constraint_weights * self._constraint_predicate) expected_constraint_denominator = np.sum(constraint_label_weights * self._constraint_weights * self._constraint_predicate) expected_constraint_value = ( expected_constraint_numerator / expected_constraint_denominator) actual_expression = multiclass_rates.true_positive_rate( self._split_context, positive_class) self._check_rates(expected_penalty_value, expected_constraint_value, actual_expression) def test_false_negative_rate(self): """Checks `false_negative_rate`.""" positive_class = [True, False, True, True] class_weights = 1.0 - np.array(positive_class, dtype=np.float32) penalty_label_weights = np.zeros(self._penalty_size) for ii in xrange(self._penalty_size): if positive_class[self._penalty_labels[ii]]: penalty_label_weights[ii] = 1.0 penalty_weights = np.tensordot(penalty_label_weights, class_weights, axes=0) # For the penalty, the default loss is hinge. penalty_losses = hinge_loss(penalty_weights, self._penalty_predictions) expected_penalty_numerator = np.sum(penalty_losses * penalty_label_weights * self._penalty_weights * self._penalty_predicate) expected_penalty_denominator = np.sum( penalty_label_weights * self._penalty_weights * self._penalty_predicate) expected_penalty_value = ( expected_penalty_numerator / expected_penalty_denominator) constraint_label_weights = np.zeros(self._constraint_size) for ii in xrange(self._constraint_size): if positive_class[self._constraint_labels[ii]]: constraint_label_weights[ii] = 1.0 constraint_weights = np.tensordot( constraint_label_weights, class_weights, axes=0) # For the constraint, the default loss is zero-one. constraint_losses = zero_one_loss(constraint_weights, self._constraint_predictions) expected_constraint_numerator = np.sum( constraint_losses * constraint_label_weights * self._constraint_weights * self._constraint_predicate) expected_constraint_denominator = np.sum(constraint_label_weights * self._constraint_weights * self._constraint_predicate) expected_constraint_value = ( expected_constraint_numerator / expected_constraint_denominator) actual_expression = multiclass_rates.false_negative_rate( self._split_context, positive_class) self._check_rates(expected_penalty_value, expected_constraint_value, actual_expression) def test_false_positive_rate(self): """Checks `false_positive_rate`.""" positive_class = [False, False, False, True] class_weights = np.array(positive_class, dtype=np.float32) penalty_label_weights = np.ones(self._penalty_size) for ii in xrange(self._penalty_size): if positive_class[self._penalty_labels[ii]]: penalty_label_weights[ii] = 0.0 penalty_weights = np.tensordot(penalty_label_weights, class_weights, axes=0) # For the penalty, the default loss is hinge. penalty_losses = hinge_loss(penalty_weights, self._penalty_predictions) expected_penalty_numerator = np.sum(penalty_losses * penalty_label_weights * self._penalty_weights * self._penalty_predicate) expected_penalty_denominator = np.sum( penalty_label_weights * self._penalty_weights * self._penalty_predicate) expected_penalty_value = ( expected_penalty_numerator / expected_penalty_denominator) constraint_label_weights = np.ones(self._constraint_size) for ii in xrange(self._constraint_size): if positive_class[self._constraint_labels[ii]]: constraint_label_weights[ii] = 0.0 constraint_weights = np.tensordot( constraint_label_weights, class_weights, axes=0) # For the constraint, the default loss is zero-one. constraint_losses = zero_one_loss(constraint_weights, self._constraint_predictions) expected_constraint_numerator = np.sum( constraint_losses * constraint_label_weights * self._constraint_weights * self._constraint_predicate) expected_constraint_denominator = np.sum(constraint_label_weights * self._constraint_weights * self._constraint_predicate) expected_constraint_value = ( expected_constraint_numerator / expected_constraint_denominator) actual_expression = multiclass_rates.false_positive_rate( self._split_context, positive_class) self._check_rates(expected_penalty_value, expected_constraint_value, actual_expression) def test_true_negative_rate(self): """Checks `true_negative_rate`.""" positive_class = [False, False, True, True] class_weights = 1.0 - np.array(positive_class, dtype=np.float32) penalty_label_weights = np.ones(self._penalty_size) for ii in xrange(self._penalty_size): if positive_class[self._penalty_labels[ii]]: penalty_label_weights[ii] = 0.0 penalty_weights = np.tensordot(penalty_label_weights, class_weights, axes=0) # For the penalty, the default loss is hinge. penalty_losses = hinge_loss(penalty_weights, self._penalty_predictions) expected_penalty_numerator = np.sum(penalty_losses * penalty_label_weights * self._penalty_weights * self._penalty_predicate) expected_penalty_denominator = np.sum( penalty_label_weights * self._penalty_weights * self._penalty_predicate) expected_penalty_value = ( expected_penalty_numerator / expected_penalty_denominator) constraint_label_weights = np.ones(self._constraint_size) for ii in xrange(self._constraint_size): if positive_class[self._constraint_labels[ii]]: constraint_label_weights[ii] = 0.0 constraint_weights = np.tensordot( constraint_label_weights, class_weights, axes=0) # For the constraint, the default loss is zero-one. constraint_losses = zero_one_loss(constraint_weights, self._constraint_predictions) expected_constraint_numerator = np.sum( constraint_losses * constraint_label_weights * self._constraint_weights * self._constraint_predicate) expected_constraint_denominator = np.sum(constraint_label_weights * self._constraint_weights * self._constraint_predicate) expected_constraint_value = ( expected_constraint_numerator / expected_constraint_denominator) actual_expression = multiclass_rates.true_negative_rate( self._split_context, positive_class) self._check_rates(expected_penalty_value, expected_constraint_value, actual_expression) def test_true_positive_proportion(self): """Checks `true_positive_proportion`.""" positive_class = [True, False, False, True] class_weights = np.array(positive_class, dtype=np.float32) penalty_label_weights = np.zeros(self._penalty_size) for ii in xrange(self._penalty_size): if positive_class[self._penalty_labels[ii]]: penalty_label_weights[ii] = 1.0 penalty_weights = np.tensordot(penalty_label_weights, class_weights, axes=0) # For the penalty, the default loss is hinge. penalty_losses = hinge_loss(penalty_weights, self._penalty_predictions) expected_penalty_numerator = np.sum(penalty_losses * penalty_label_weights * self._penalty_weights * self._penalty_predicate) expected_penalty_denominator = np.sum(self._penalty_weights * self._penalty_predicate) expected_penalty_value = ( expected_penalty_numerator / expected_penalty_denominator) constraint_label_weights = np.zeros(self._constraint_size) for ii in xrange(self._constraint_size): if positive_class[self._constraint_labels[ii]]: constraint_label_weights[ii] = 1.0 constraint_weights = np.tensordot( constraint_label_weights, class_weights, axes=0) # For the constraint, the default loss is zero-one. constraint_losses = zero_one_loss(constraint_weights, self._constraint_predictions) expected_constraint_numerator = np.sum( constraint_losses * constraint_label_weights * self._constraint_weights * self._constraint_predicate) expected_constraint_denominator = np.sum(self._constraint_weights * self._constraint_predicate) expected_constraint_value = ( expected_constraint_numerator / expected_constraint_denominator) actual_expression = multiclass_rates.true_positive_proportion( self._split_context, positive_class) self._check_rates(expected_penalty_value, expected_constraint_value, actual_expression) def test_false_negative_proportion(self): """Checks `false_negative_proportion`.""" positive_class = [True, False, True, True] class_weights = 1.0 - np.array(positive_class, dtype=np.float32) penalty_label_weights = np.zeros(self._penalty_size) for ii in xrange(self._penalty_size): if positive_class[self._penalty_labels[ii]]: penalty_label_weights[ii] = 1.0 penalty_weights = np.tensordot(penalty_label_weights, class_weights, axes=0) # For the penalty, the default loss is hinge. penalty_losses = hinge_loss(penalty_weights, self._penalty_predictions) expected_penalty_numerator = np.sum(penalty_losses * penalty_label_weights * self._penalty_weights * self._penalty_predicate) expected_penalty_denominator = np.sum(self._penalty_weights * self._penalty_predicate) expected_penalty_value = ( expected_penalty_numerator / expected_penalty_denominator) constraint_label_weights = np.zeros(self._constraint_size) for ii in xrange(self._constraint_size): if positive_class[self._constraint_labels[ii]]: constraint_label_weights[ii] = 1.0 constraint_weights = np.tensordot( constraint_label_weights, class_weights, axes=0) # For the constraint, the default loss is zero-one. constraint_losses = zero_one_loss(constraint_weights, self._constraint_predictions) expected_constraint_numerator = np.sum( constraint_losses * constraint_label_weights * self._constraint_weights * self._constraint_predicate) expected_constraint_denominator = np.sum(self._constraint_weights * self._constraint_predicate) expected_constraint_value = ( expected_constraint_numerator / expected_constraint_denominator) actual_expression = multiclass_rates.false_negative_proportion( self._split_context, positive_class) self._check_rates(expected_penalty_value, expected_constraint_value, actual_expression) def test_false_positive_proportion(self): """Checks `false_positive_proportion`.""" positive_class = [False, False, False, True] class_weights = np.array(positive_class, dtype=np.float32) penalty_label_weights = np.ones(self._penalty_size) for ii in xrange(self._penalty_size): if positive_class[self._penalty_labels[ii]]: penalty_label_weights[ii] = 0.0 penalty_weights = np.tensordot(penalty_label_weights, class_weights, axes=0) # For the penalty, the default loss is hinge. penalty_losses = hinge_loss(penalty_weights, self._penalty_predictions) expected_penalty_numerator = np.sum(penalty_losses * penalty_label_weights * self._penalty_weights * self._penalty_predicate) expected_penalty_denominator = np.sum(self._penalty_weights * self._penalty_predicate) expected_penalty_value = ( expected_penalty_numerator / expected_penalty_denominator) constraint_label_weights = np.ones(self._constraint_size) for ii in xrange(self._constraint_size): if positive_class[self._constraint_labels[ii]]: constraint_label_weights[ii] = 0.0 constraint_weights = np.tensordot( constraint_label_weights, class_weights, axes=0) # For the constraint, the default loss is zero-one. constraint_losses = zero_one_loss(constraint_weights, self._constraint_predictions) expected_constraint_numerator = np.sum( constraint_losses * constraint_label_weights * self._constraint_weights * self._constraint_predicate) expected_constraint_denominator = np.sum(self._constraint_weights * self._constraint_predicate) expected_constraint_value = ( expected_constraint_numerator / expected_constraint_denominator) actual_expression = multiclass_rates.false_positive_proportion( self._split_context, positive_class) self._check_rates(expected_penalty_value, expected_constraint_value, actual_expression) def test_true_negative_proportion(self): """Checks `true_negative_proportion`.""" positive_class = [False, False, True, True] class_weights = 1.0 - np.array(positive_class, dtype=np.float32) penalty_label_weights = np.ones(self._penalty_size) for ii in xrange(self._penalty_size): if positive_class[self._penalty_labels[ii]]: penalty_label_weights[ii] = 0.0 penalty_weights = np.tensordot(penalty_label_weights, class_weights, axes=0) # For the penalty, the default loss is hinge. penalty_losses = hinge_loss(penalty_weights, self._penalty_predictions) expected_penalty_numerator = np.sum(penalty_losses * penalty_label_weights * self._penalty_weights * self._penalty_predicate) expected_penalty_denominator = np.sum(self._penalty_weights * self._penalty_predicate) expected_penalty_value = ( expected_penalty_numerator / expected_penalty_denominator) constraint_label_weights = np.ones(self._constraint_size) for ii in xrange(self._constraint_size): if positive_class[self._constraint_labels[ii]]: constraint_label_weights[ii] = 0.0 constraint_weights = np.tensordot( constraint_label_weights, class_weights, axes=0) # For the constraint, the default loss is zero-one. constraint_losses = zero_one_loss(constraint_weights, self._constraint_predictions) expected_constraint_numerator = np.sum( constraint_losses * constraint_label_weights * self._constraint_weights * self._constraint_predicate) expected_constraint_denominator = np.sum(self._constraint_weights * self._constraint_predicate) expected_constraint_value = ( expected_constraint_numerator / expected_constraint_denominator) actual_expression = multiclass_rates.true_negative_proportion( self._split_context, positive_class) self._check_rates(expected_penalty_value, expected_constraint_value, actual_expression) def test_precision_ratio(self): """Checks `precision_ratio`.""" positive_class = [False, True, True, False] class_weights = np.array(positive_class, dtype=np.float32) actual_numerator_expression, actual_denominator_expression = ( multiclass_rates.precision_ratio(self._split_context, positive_class)) # First check the numerator of the precision (which is a rate that itself # has a numerator and denominator). penalty_label_weights = np.zeros(self._penalty_size) for ii in xrange(self._penalty_size): if positive_class[self._penalty_labels[ii]]: penalty_label_weights[ii] = 1.0 penalty_weights = np.tensordot(penalty_label_weights, class_weights, axes=0) # For the penalty, the default loss is hinge. penalty_losses = hinge_loss(penalty_weights, self._penalty_predictions) expected_penalty_numerator = np.sum(penalty_losses * penalty_label_weights * self._penalty_weights * self._penalty_predicate) expected_penalty_denominator = np.sum(self._penalty_weights * self._penalty_predicate) expected_penalty_value = ( expected_penalty_numerator / expected_penalty_denominator) constraint_label_weights = np.zeros(self._constraint_size) for ii in xrange(self._constraint_size): if positive_class[self._constraint_labels[ii]]: constraint_label_weights[ii] = 1.0 constraint_weights = np.tensordot( constraint_label_weights, class_weights, axes=0) # For the constraint, the default loss is zero-one. constraint_losses = zero_one_loss(constraint_weights, self._constraint_predictions) expected_constraint_numerator = np.sum( constraint_losses * constraint_label_weights * self._constraint_weights * self._constraint_predicate) expected_constraint_denominator = np.sum(self._constraint_weights * self._constraint_predicate) expected_constraint_value = ( expected_constraint_numerator / expected_constraint_denominator) self._check_rates(expected_penalty_value, expected_constraint_value, actual_numerator_expression) # Next check the denominator of the precision (which is a rate that itself # has a numerator and denominator, although this "inner" denominator is the # same as above). penalty_weights = np.tile(class_weights, (self._penalty_size, 1)) # For the penalty, the default loss is hinge. penalty_losses = hinge_loss(penalty_weights, self._penalty_predictions) expected_penalty_numerator = np.sum(penalty_losses * self._penalty_weights * self._penalty_predicate) expected_penalty_value = ( expected_penalty_numerator / expected_penalty_denominator) constraint_weights = np.tile(class_weights, (self._constraint_size, 1)) # For the constraint, the default loss is zero-one. constraint_losses = zero_one_loss(constraint_weights, self._constraint_predictions) expected_constraint_numerator = np.sum(constraint_losses * self._constraint_weights * self._constraint_predicate) expected_constraint_value = ( expected_constraint_numerator / expected_constraint_denominator) self._check_rates(expected_penalty_value, expected_constraint_value, actual_denominator_expression) def test_f_score_ratio(self): """Checks `f_score_ratio`.""" positive_class = [True, False, True, False] class_weights = np.array(positive_class, dtype=np.float32) # We check the most common choices for the beta parameter to the F-score. for beta in [0.0, 0.5, 1.0, 2.0]: actual_numerator_expression, actual_denominator_expression = ( multiclass_rates.f_score_ratio(self._split_context, positive_class, beta)) # First check the numerator of the F-score (which is a rate that itself # has a numerator and denominator). penalty_label_weights = np.zeros(self._penalty_size) for ii in xrange(self._penalty_size): if positive_class[self._penalty_labels[ii]]: penalty_label_weights[ii] = 1.0 penalty_weights = np.tensordot( penalty_label_weights, class_weights, axes=0) # For the penalty, the default loss is hinge. penalty_losses = hinge_loss(penalty_weights, self._penalty_predictions) expected_penalty_numerator = (1.0 + beta * beta) * np.sum( penalty_losses * penalty_label_weights * self._penalty_weights * self._penalty_predicate) expected_penalty_denominator = np.sum(self._penalty_weights * self._penalty_predicate) expected_penalty_value = ( expected_penalty_numerator / expected_penalty_denominator) constraint_label_weights = np.zeros(self._constraint_size) for ii in xrange(self._constraint_size): if positive_class[self._constraint_labels[ii]]: constraint_label_weights[ii] = 1.0 constraint_weights = np.tensordot( constraint_label_weights, class_weights, axes=0) # For the constraint, the default loss is zero-one. constraint_losses = zero_one_loss(constraint_weights, self._constraint_predictions) expected_constraint_numerator = (1.0 + beta * beta) * np.sum( constraint_losses * constraint_label_weights * self._constraint_weights * self._constraint_predicate) expected_constraint_denominator = np.sum(self._constraint_weights * self._constraint_predicate) expected_constraint_value = ( expected_constraint_numerator / expected_constraint_denominator) self._check_rates(expected_penalty_value, expected_constraint_value, actual_numerator_expression) # Next check the denominator of the F-score (which is a rate that itself # has a numerator and denominator, although this "inner" denominator is # the same as above). # For the penalty, the default loss is hinge. Notice that, on the # positively-labeled examples, we have positive predictions weighted as # (1 + beta^2), and negative predictions weighted as beta^2. Internally, # the rate-handling code simplifies this to a beta^2 weight on *all* # positively-labeled examples (independently of the model, so there is no # hinge loss), plus a weight of 1 on true positives. The idea here is that # since what we actually want to constrain are rates, we only bound the # quantities that need to be bounded--constants remain as constants. # Hence, we do this: penalty_weights = np.tensordot( penalty_label_weights, class_weights, axes=0) # For the penalty, the default loss is hinge. penalty_losses = hinge_loss(penalty_weights, self._penalty_predictions) expected_penalty_numerator = np.sum( penalty_losses * penalty_label_weights * self._penalty_weights * self._penalty_predicate) expected_penalty_numerator += (beta * beta) * np.sum( penalty_label_weights * self._penalty_weights * self._penalty_predicate) # There is no such issue for the negatively-labeled examples. penalty_weights = np.tensordot( 1.0 - penalty_label_weights, class_weights, axes=0) # For the penalty, the default loss is hinge. penalty_losses = hinge_loss(penalty_weights, self._penalty_predictions) expected_penalty_numerator += np.sum( penalty_losses * (1.0 - penalty_label_weights) * self._penalty_weights * self._penalty_predicate) expected_penalty_value = ( expected_penalty_numerator / expected_penalty_denominator) constraint_weights = np.tensordot( constraint_label_weights, class_weights, axes=0) # For the constraint, the default loss is zero-one. constraint_losses = zero_one_loss(constraint_weights, self._constraint_predictions) expected_constraint_numerator = np.sum( constraint_losses * constraint_label_weights * self._constraint_weights * self._constraint_predicate) expected_constraint_numerator += (beta * beta) * np.sum( constraint_label_weights * self._constraint_weights * self._constraint_predicate) constraint_weights = np.tensordot( 1.0 - constraint_label_weights, class_weights, axes=0) # For the constraint, the default loss is zero-one. constraint_losses = zero_one_loss(constraint_weights, self._constraint_predictions) expected_constraint_numerator += np.sum( constraint_losses * (1.0 - constraint_label_weights) * self._constraint_weights * self._constraint_predicate) expected_constraint_value = ( expected_constraint_numerator / expected_constraint_denominator) self._check_rates(expected_penalty_value, expected_constraint_value, actual_denominator_expression) if __name__ == "__main__": tf.test.main()
48.736726
80
0.68723
aceface64d2697ca304b8efc757c4d0e3e7034d8
1,580
py
Python
src/models.py
junkainiu/dinner-helper
94a2fda92d0bcbc8909e985b28950c5059bf708d
[ "MIT" ]
3
2016-05-31T12:14:42.000Z
2016-07-25T02:45:13.000Z
src/models.py
junkainiu/dinner-helper
94a2fda92d0bcbc8909e985b28950c5059bf708d
[ "MIT" ]
3
2016-05-31T12:37:06.000Z
2016-06-20T13:50:20.000Z
src/models.py
junkainiu/dinner-helper
94a2fda92d0bcbc8909e985b28950c5059bf708d
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import urllib2 from processor import LihuaProcessor class HtmlCollector(object): pass class LihuaCollector(object): URL = "http://www.lihua.com" def connect(self): header = self.get_header() req = urllib2.Request(self.URL, headers=header) con = urllib2.urlopen(req) doc = con.read() result = LihuaProcessor(doc) con.close() def get_header(self): header = { "Accept": "application/json, text/javascript, */*; q=0.01", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh-CN,zh;q=0.8", "Connection": "keep-alive", "Content-Length": 0, "Cookie": "PHPSESSID=e386gmipdk3p6k7ltq7hqkg2u6; lihua_home__user_inv=%E4%B8%8A%E6%B5%B7%E5%A4%A9%E6%97%A6%E7%BD%91%E7%BB%9C%E7%A7%91%E6%8A%80%E5%8F%91%E5%B1%95%E6%9C%89%E9%99%90%E5%85%AC%E5%8F%B8; lihua_home_lihuashop_cookie_tag=1; lihua_home_lihuashop_cookie_sn=2016060413465216; lihua_home_lihuashop_cookie_amount=23; lihua_home___forward__=%2Findex.php%3Fs%3D%2FHelp%2Findex%2Ftype%2F7.html; cckf_track_112082_AutoInviteNumber=0; cckf_track_112082_ManualInviteNumber=0; CCKF_visitor_id_112082=749323679", "Host": "www.lihua.com", "Origin": "http://www.lihua.com", "Referer": "http://www.lihua.com/", "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.63 Safari/537.36", "X-Requested-With": "XMLHttpRequest", } return header
41.578947
520
0.644304
acefadd8090feec3bb4188b989713cef09b05f35
2,589
py
Python
UE4Parse/Assets/Objects/EPackageFlags.py
MinshuG/pyUE4Parse
96cda8132ff423bd36be20828025c2c1c0a7e406
[ "MIT" ]
13
2021-06-09T09:21:00.000Z
2022-03-30T22:13:24.000Z
UE4Parse/Assets/Objects/EPackageFlags.py
MinshuG/pyUE4Parse
96cda8132ff423bd36be20828025c2c1c0a7e406
[ "MIT" ]
3
2021-09-04T22:23:02.000Z
2022-03-04T09:45:45.000Z
UE4Parse/Assets/Objects/EPackageFlags.py
MinshuG/pyUE4Parse
96cda8132ff423bd36be20828025c2c1c0a7e406
[ "MIT" ]
6
2021-09-02T10:28:21.000Z
2022-03-30T22:13:37.000Z
from enum import IntEnum class EPackageFlags(IntEnum): PKG_None = 0x00000000 # No flags PKG_NewlyCreated = 0x00000001 # Newly created package, not saved yet. In editor only. PKG_ClientOptional = 0x00000002 # Purely optional for clients. PKG_ServerSideOnly = 0x00000004 # Only needed on the server side. PKG_CompiledIn = 0x00000010 # This package is from "compiled in" classes. PKG_ForDiffing = 0x00000020 # This package was loaded just for the purposes of diffing PKG_EditorOnly = 0x00000040 # This is editor-only package (for example: editor module script package) PKG_Developer = 0x00000080 # Developer module PKG_UncookedOnly = 0x00000100 # Loaded only in uncooked builds (i.e. runtime in editor) PKG_Cooked = 0x00000200 # Package is cooked PKG_ContainsNoAsset = 0x00000400 # Package doesn't contain any asset object (although asset tags can be present) # PKG_Unused = 0x00000800 # PKG_Unused = 0x00001000 PKG_UnversionedProperties = 0x00002000 # Uses unversioned property serialization instead of versioned tagged property serialization PKG_ContainsMapData = 0x00004000 # Contains map data (UObjects only referenced by a single ULevel) but is stored in a different package # PKG_Unused = 0x00008000 PKG_Compiling = 0x00010000 # package is currently being compiled PKG_ContainsMap = 0x00020000 # Set if the package contains a ULevel/ UWorld object PKG_RequiresLocalizationGather = 0x00040000 # Set if the package contains any data to be gathered by localization # PKG_Unused = 0x00080000 PKG_PlayInEditor = 0x00100000 # Set if the package was created for the purpose of PIE PKG_ContainsScript = 0x00200000 # Package is allowed to contain UClass objects PKG_DisallowExport = 0x00400000 # Editor should not export asset in this package # PKG_Unused = 0x00800000 # PKG_Unused = 0x01000000 # PKG_Unused = 0x02000000 # PKG_Unused = 0x04000000 # PKG_Unused = 0x08000000 PKG_DynamicImports = 0x10000000 # This package should resolve dynamic imports from its export at runtime. PKG_RuntimeGenerated = 0x20000000 # This package contains elements that are runtime generated, and may not follow standard loading order rules PKG_ReloadingForCooker = 0x40000000 # This package is reloading in the cooker, try to avoid getting data we will never need. We won't save this package. PKG_FilterEditorOnly = 0x80000000 # Package has editor-only data filtered out
69.972973
159
0.737737
acefae16154664ba90053fc37ef65ef806ed30e6
6,036
py
Python
mfem/_par/plinearform.py
tomstitt/PyMFEM
b00199ec0d7a5fba891f656575e91a64d3e35eb5
[ "BSD-3-Clause" ]
null
null
null
mfem/_par/plinearform.py
tomstitt/PyMFEM
b00199ec0d7a5fba891f656575e91a64d3e35eb5
[ "BSD-3-Clause" ]
null
null
null
mfem/_par/plinearform.py
tomstitt/PyMFEM
b00199ec0d7a5fba891f656575e91a64d3e35eb5
[ "BSD-3-Clause" ]
null
null
null
# This file was automatically generated by SWIG (http://www.swig.org). # Version 4.0.2 # # Do not make changes to this file unless you know what you are doing--modify # the SWIG interface file instead. from sys import version_info as _swig_python_version_info if _swig_python_version_info < (2, 7, 0): raise RuntimeError("Python 2.7 or later required") # Import the low-level C/C++ module if __package__ or "." in __name__: from . import _plinearform else: import _plinearform try: import builtins as __builtin__ except ImportError: import __builtin__ _swig_new_instance_method = _plinearform.SWIG_PyInstanceMethod_New _swig_new_static_method = _plinearform.SWIG_PyStaticMethod_New def _swig_repr(self): try: strthis = "proxy of " + self.this.__repr__() except __builtin__.Exception: strthis = "" return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) def _swig_setattr_nondynamic_instance_variable(set): def set_instance_attr(self, name, value): if name == "thisown": self.this.own(value) elif name == "this": set(self, name, value) elif hasattr(self, name) and isinstance(getattr(type(self), name), property): set(self, name, value) else: raise AttributeError("You cannot add instance attributes to %s" % self) return set_instance_attr def _swig_setattr_nondynamic_class_variable(set): def set_class_attr(cls, name, value): if hasattr(cls, name) and not isinstance(getattr(cls, name), property): set(cls, name, value) else: raise AttributeError("You cannot add class attributes to %s" % cls) return set_class_attr def _swig_add_metaclass(metaclass): """Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass""" def wrapper(cls): return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy()) return wrapper class _SwigNonDynamicMeta(type): """Meta class to enforce nondynamic attributes (no new attributes) for a class""" __setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__) import weakref MFEM_VERSION = _plinearform.MFEM_VERSION MFEM_VERSION_STRING = _plinearform.MFEM_VERSION_STRING MFEM_VERSION_TYPE = _plinearform.MFEM_VERSION_TYPE MFEM_VERSION_TYPE_RELEASE = _plinearform.MFEM_VERSION_TYPE_RELEASE MFEM_VERSION_TYPE_DEVELOPMENT = _plinearform.MFEM_VERSION_TYPE_DEVELOPMENT MFEM_VERSION_MAJOR = _plinearform.MFEM_VERSION_MAJOR MFEM_VERSION_MINOR = _plinearform.MFEM_VERSION_MINOR MFEM_VERSION_PATCH = _plinearform.MFEM_VERSION_PATCH MFEM_HYPRE_VERSION = _plinearform.MFEM_HYPRE_VERSION import mfem._par.linearform import mfem._par.coefficient import mfem._par.globals import mfem._par.array import mfem._par.mem_manager import mfem._par.matrix import mfem._par.vector import mfem._par.operators import mfem._par.intrules import mfem._par.sparsemat import mfem._par.densemat import mfem._par.eltrans import mfem._par.fe import mfem._par.geom import mfem._par.mesh import mfem._par.sort_pairs import mfem._par.ncmesh import mfem._par.vtk import mfem._par.element import mfem._par.table import mfem._par.hash import mfem._par.vertex import mfem._par.gridfunc import mfem._par.fespace import mfem._par.fe_coll import mfem._par.lininteg import mfem._par.handle import mfem._par.hypre import mfem._par.restriction import mfem._par.bilininteg import mfem._par.pfespace import mfem._par.pmesh import mfem._par.pncmesh import mfem._par.communication import mfem._par.sets import mfem._par.pgridfunc class ParLinearForm(mfem._par.linearform.LinearForm): r"""Proxy of C++ mfem::ParLinearForm class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") __repr__ = _swig_repr def __init__(self, *args): r""" __init__(ParLinearForm self) -> ParLinearForm __init__(ParLinearForm self, ParFiniteElementSpace pf) -> ParLinearForm __init__(ParLinearForm self, ParFiniteElementSpace pf, double * data) -> ParLinearForm __init__(ParLinearForm self, ParFiniteElementSpace pf, ParLinearForm plf) -> ParLinearForm """ _plinearform.ParLinearForm_swiginit(self, _plinearform.new_ParLinearForm(*args)) def ParFESpace(self): r"""ParFESpace(ParLinearForm self) -> ParFiniteElementSpace""" return _plinearform.ParLinearForm_ParFESpace(self) ParFESpace = _swig_new_instance_method(_plinearform.ParLinearForm_ParFESpace) def Update(self, *args): r""" Update(ParLinearForm self, ParFiniteElementSpace pf=None) Update(ParLinearForm self, ParFiniteElementSpace pf, Vector v, int v_offset) """ return _plinearform.ParLinearForm_Update(self, *args) Update = _swig_new_instance_method(_plinearform.ParLinearForm_Update) def MakeRef(self, *args): r""" MakeRef(ParLinearForm self, FiniteElementSpace f, Vector v, int v_offset) MakeRef(ParLinearForm self, ParFiniteElementSpace pf, Vector v, int v_offset) """ return _plinearform.ParLinearForm_MakeRef(self, *args) MakeRef = _swig_new_instance_method(_plinearform.ParLinearForm_MakeRef) def ParallelAssemble(self, *args): r""" ParallelAssemble(ParLinearForm self, Vector tv) ParallelAssemble(ParLinearForm self) -> HypreParVector """ return _plinearform.ParLinearForm_ParallelAssemble(self, *args) ParallelAssemble = _swig_new_instance_method(_plinearform.ParLinearForm_ParallelAssemble) def __call__(self, gf): r"""__call__(ParLinearForm self, ParGridFunction gf) -> double""" return _plinearform.ParLinearForm___call__(self, gf) __call__ = _swig_new_instance_method(_plinearform.ParLinearForm___call__) __swig_destroy__ = _plinearform.delete_ParLinearForm # Register ParLinearForm in _plinearform: _plinearform.ParLinearForm_swigregister(ParLinearForm)
35.715976
118
0.752154
acefae4450671a4f65b6929aa0033efa51e99b1a
8,163
py
Python
onpolicy/envs/gridworld/gym_minigrid/envs/multiroom.py
zoeyuchao/onpolicy-release
c2cb64e59c5b1f21cce022db76c378b396fd480e
[ "MIT" ]
1
2021-07-04T08:08:30.000Z
2021-07-04T08:08:30.000Z
onpolicy/envs/gridworld/gym_minigrid/envs/multiroom.py
zoeyuchao/onpolicy-release
c2cb64e59c5b1f21cce022db76c378b396fd480e
[ "MIT" ]
1
2021-06-11T15:28:11.000Z
2021-06-11T15:28:11.000Z
onpolicy/envs/gridworld/gym_minigrid/envs/multiroom.py
zoeyuchao/onpolicy-release
c2cb64e59c5b1f21cce022db76c378b396fd480e
[ "MIT" ]
1
2021-05-17T02:00:18.000Z
2021-05-17T02:00:18.000Z
from onpolicy.envs.gridworld.gym_minigrid.minigrid import * from onpolicy.envs.gridworld.gym_minigrid.register import register class Room: def __init__(self, top, size, entryDoorPos, exitDoorPos ): self.top = top self.size = size self.entryDoorPos = entryDoorPos self.exitDoorPos = exitDoorPos class MultiRoomEnv(MiniGridEnv): """ Environment with multiple rooms (subgoals) """ def __init__(self, minNumRooms, maxNumRooms, maxRoomSize=10 ): assert minNumRooms > 0 assert maxNumRooms >= minNumRooms assert maxRoomSize >= 4 self.minNumRooms = minNumRooms self.maxNumRooms = maxNumRooms self.maxRoomSize = maxRoomSize self.rooms = [] super(MultiRoomEnv, self).__init__( grid_size=25, max_steps=self.maxNumRooms * 20 ) def _gen_grid(self, width, height): roomList = [] # Choose a random number of rooms to generate numRooms = self._rand_int(self.minNumRooms, self.maxNumRooms+1) while len(roomList) < numRooms: curRoomList = [] entryDoorPos = ( self._rand_int(0, width - 2), self._rand_int(0, width - 2) ) # Recursively place the rooms self._placeRoom( numRooms, roomList=curRoomList, minSz=4, maxSz=self.maxRoomSize, entryDoorWall=2, entryDoorPos=entryDoorPos ) if len(curRoomList) > len(roomList): roomList = curRoomList # Store the list of rooms in this environment assert len(roomList) > 0 self.rooms = roomList # Create the grid self.grid = Grid(width, height) wall = Wall() prevDoorColor = None # For each room for idx, room in enumerate(roomList): topX, topY = room.top sizeX, sizeY = room.size # Draw the top and bottom walls for i in range(0, sizeX): self.grid.set(topX + i, topY, wall) self.grid.set(topX + i, topY + sizeY - 1, wall) # Draw the left and right walls for j in range(0, sizeY): self.grid.set(topX, topY + j, wall) self.grid.set(topX + sizeX - 1, topY + j, wall) # If this isn't the first room, place the entry door if idx > 0: # Pick a door color different from the previous one doorColors = set(COLOR_NAMES) if prevDoorColor: doorColors.remove(prevDoorColor) # Note: the use of sorting here guarantees determinism, # This is needed because Python's set is not deterministic doorColor = self._rand_elem(sorted(doorColors)) entryDoor = Door(doorColor) self.grid.set(*room.entryDoorPos, entryDoor) prevDoorColor = doorColor prevRoom = roomList[idx-1] prevRoom.exitDoorPos = room.entryDoorPos # Randomize the starting agent position and direction self.place_agent(roomList[0].top, roomList[0].size) # Place the final goal in the last room self.goal_pos = self.place_obj(Goal(), roomList[-1].top, roomList[-1].size) self.mission = 'traverse the rooms to get to the goal' def _placeRoom( self, numLeft, roomList, minSz, maxSz, entryDoorWall, entryDoorPos ): # Choose the room size randomly sizeX = self._rand_int(minSz, maxSz+1) sizeY = self._rand_int(minSz, maxSz+1) # The first room will be at the door position if len(roomList) == 0: topX, topY = entryDoorPos # Entry on the right elif entryDoorWall == 0: topX = entryDoorPos[0] - sizeX + 1 y = entryDoorPos[1] topY = self._rand_int(y - sizeY + 2, y) # Entry wall on the south elif entryDoorWall == 1: x = entryDoorPos[0] topX = self._rand_int(x - sizeX + 2, x) topY = entryDoorPos[1] - sizeY + 1 # Entry wall on the left elif entryDoorWall == 2: topX = entryDoorPos[0] y = entryDoorPos[1] topY = self._rand_int(y - sizeY + 2, y) # Entry wall on the top elif entryDoorWall == 3: x = entryDoorPos[0] topX = self._rand_int(x - sizeX + 2, x) topY = entryDoorPos[1] else: assert False, entryDoorWall # If the room is out of the grid, can't place a room here if topX < 0 or topY < 0: return False if topX + sizeX > self.width or topY + sizeY >= self.height: return False # If the room intersects with previous rooms, can't place it here for room in roomList[:-1]: nonOverlap = \ topX + sizeX < room.top[0] or \ room.top[0] + room.size[0] <= topX or \ topY + sizeY < room.top[1] or \ room.top[1] + room.size[1] <= topY if not nonOverlap: return False # Add this room to the list roomList.append(Room( (topX, topY), (sizeX, sizeY), entryDoorPos, None )) # If this was the last room, stop if numLeft == 1: return True # Try placing the next room for i in range(0, 8): # Pick which wall to place the out door on wallSet = set((0, 1, 2, 3)) wallSet.remove(entryDoorWall) exitDoorWall = self._rand_elem(sorted(wallSet)) nextEntryWall = (exitDoorWall + 2) % 4 # Pick the exit door position # Exit on right wall if exitDoorWall == 0: exitDoorPos = ( topX + sizeX - 1, topY + self._rand_int(1, sizeY - 1) ) # Exit on south wall elif exitDoorWall == 1: exitDoorPos = ( topX + self._rand_int(1, sizeX - 1), topY + sizeY - 1 ) # Exit on left wall elif exitDoorWall == 2: exitDoorPos = ( topX, topY + self._rand_int(1, sizeY - 1) ) # Exit on north wall elif exitDoorWall == 3: exitDoorPos = ( topX + self._rand_int(1, sizeX - 1), topY ) else: assert False # Recursively create the other rooms success = self._placeRoom( numLeft - 1, roomList=roomList, minSz=minSz, maxSz=maxSz, entryDoorWall=nextEntryWall, entryDoorPos=exitDoorPos ) if success: break return True class MultiRoomEnvN2S4(MultiRoomEnv): def __init__(self): super().__init__( minNumRooms=2, maxNumRooms=2, maxRoomSize=4 ) class MultiRoomEnvN4S5(MultiRoomEnv): def __init__(self): super().__init__( minNumRooms=4, maxNumRooms=4, maxRoomSize=5 ) class MultiRoomEnvN6(MultiRoomEnv): def __init__(self): super().__init__( minNumRooms=6, maxNumRooms=6 ) register( id='MiniGrid-MultiRoom-N2-S4-v0', entry_point='onpolicy.envs.gridworld.gym_minigrid.envs:MultiRoomEnvN2S4' ) register( id='MiniGrid-MultiRoom-N4-S5-v0', entry_point='onpolicy.envs.gridworld.gym_minigrid.envs:MultiRoomEnvN4S5' ) register( id='MiniGrid-MultiRoom-N6-v0', entry_point='onpolicy.envs.gridworld.gym_minigrid.envs:MultiRoomEnvN6' )
29.576087
83
0.521867
acefaed88e42d459421b87fbe1620a8b9fa07cdd
2,827
py
Python
pyonepassword/py_op_exceptions.py
acabey-klaviyo/pyonepassword
609e2fab912884dd8ba40dc7391e8d07b35eb82c
[ "MIT" ]
null
null
null
pyonepassword/py_op_exceptions.py
acabey-klaviyo/pyonepassword
609e2fab912884dd8ba40dc7391e8d07b35eb82c
[ "MIT" ]
null
null
null
pyonepassword/py_op_exceptions.py
acabey-klaviyo/pyonepassword
609e2fab912884dd8ba40dc7391e8d07b35eb82c
[ "MIT" ]
null
null
null
""" Various exception classes raised by ponepassword API TODO: Move other exception classes here """ from abc import ABCMeta, abstractmethod from ._py_op_deprecation import deprecated class _OPAbstractException(Exception, metaclass=ABCMeta): @abstractmethod def __init__(self, msg): super().__init__(msg) class OPCmdFailedException(_OPAbstractException): """ Generic Exception class for when an `op` command fails. Description: Raised from subprocess call-site when `op` executable returns non-zero Caller should handle this exception and raise a more descriptive exception reflecting the action that failed: Example: try: self._run(argv, capture_stdout=True, input_string=password) except OPCmdFailedException as ocfe: raise OPSigninException.from_opexception(ocfe) from ocfe """ MSG = "'op' command failed" def __init__(self, stderr_out, returncode): super().__init__(self.MSG) self.err_output = stderr_out self.returncode = returncode @classmethod def from_opexception(cls, ope): return cls(ope.err_output, ope.returncode) class OPSigninException(OPCmdFailedException): MSG = "1Password sign-in failed." class OPSignoutException(OPCmdFailedException): MSG = "1Password signout failed." class OPForgetException(OPCmdFailedException): MSG = "1Password forget failed." # Keep this exception class around for a bit # so any code handling this exception instead of OPGetItemException # can still work @deprecated("handle OPGetItemException instead") class OPLookupException(OPCmdFailedException): MSG = "1Password lookup failed." # For now have this class extend OPLookupException # so code can handle that exception or this one # TODO: remove OPLookupException, have this class extend # _OPAbstractException class OPGetItemException(OPCmdFailedException): MSG = "1Password 'get item' failed." class OPGetDocumentException(OPCmdFailedException): MSG = "1Password 'get document' failed." class OPGetUserException(OPCmdFailedException): MSG = "1Password 'get user' failed." class OPGetVaultException(OPCmdFailedException): MSG = "1Password 'get vault' failed." class OPGetGroupException(OPCmdFailedException): MSG = "1Password 'get group' failed." class OPListEventsException(OPCmdFailedException): MSG = "1Passworm 'list events' failed." class OPInvalidDocumentException(_OPAbstractException): def __init__(self, msg): super().__init__(msg) class OPNotFoundException(Exception): MSG = "1Password cli command not found at path: %s" def __init__(self, op_path, errno): msg = self.MSG % op_path self.errno = errno super().__init__(msg) class OPConfigNotFoundException(Exception): pass
25.468468
89
0.738592
acefaf27f02bbc0689446167c1185407a4b37255
31,394
py
Python
LH_RCNN.py
dongyyyyy/Object-Detection-API-Tensorflow
9541bcb7750c0c20c9ade25544c38398cc8aed45
[ "MIT" ]
null
null
null
LH_RCNN.py
dongyyyyy/Object-Detection-API-Tensorflow
9541bcb7750c0c20c9ade25544c38398cc8aed45
[ "MIT" ]
null
null
null
LH_RCNN.py
dongyyyyy/Object-Detection-API-Tensorflow
9541bcb7750c0c20c9ade25544c38398cc8aed45
[ "MIT" ]
1
2020-09-14T12:50:38.000Z
2020-09-14T12:50:38.000Z
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import sys import os import numpy as np class LHRCNN: def __init__(self, config, data_provider): assert config['mode'] in ['train', 'test'] assert config['data_format'] in ['channels_first', 'channels_last'] self.config = config self.data_provider = data_provider self.data_shape = config['data_shape'] self.num_classes = config['num_classes'] + 1 self.weight_decay = config['weight_decay'] self.prob = 1. - config['keep_prob'] self.data_format = config['data_format'] self.mode = config['mode'] self.batch_size = config['batch_size'] if config['mode'] == 'train' else 1 self.nms_score_threshold = config['nms_score_threshold'] self.nms_max_boxes = config['nms_max_boxes'] self.nms_iou_threshold = config['nms_iou_threshold'] self.rpn_first_step = config['rpn_first_step'] self.rcnn_first_step = config['rcnn_first_step'] self.rpn_second_step = config['rpn_second_step'] self.post_nms_proposal = config['post_nms_proposal'] self.anchor_scales = [32, 64, 128, 256, 512] self.anchor_ratios = [0.5, 1.0, 2.0] self.num_anchors = len(self.anchor_scales) * len(self.anchor_ratios) if self.data_format == 'channels_first': self.h, self.w = float(self.data_shape[1]-1), float(self.data_shape[2]-1) else: self.h, self.w = float(self.data_shape[0]-1), float(self.data_shape[1]-1) if self.mode == 'train': self.num_train = data_provider['num_train'] self.num_val = data_provider['num_val'] self.train_generator = data_provider['train_generator'] self.train_initializer, self.train_iterator = self.train_generator if data_provider['val_generator'] is not None: self.val_generator = data_provider['val_generator'] self.val_initializer, self.val_iterator = self.val_generator self.global_step = tf.get_variable(name='global_step', initializer=tf.constant(0), trainable=False) self.is_training = True self._define_inputs() self._build_graph() self._create_saver() if self.mode == 'train': self._create_summary() self._init_session() def _define_inputs(self): shape = [self.batch_size] shape.extend(self.data_shape) if self.mode == 'train': self.images, self.ground_truth = self.train_iterator.get_next() self.images.set_shape(shape) self.images = self.images / 127.5 -1. else: self.images = tf.placeholder(tf.float32, shape, name='images') self.images = self.images / 127.5 -1. self.ground_truth = tf.placeholder(tf.float32, [self.batch_size, None, 5], name='labels') self.lr = tf.placeholder(dtype=tf.float32, shape=[], name='lr') def _build_graph(self): with tf.variable_scope('feature_extractor'): c4_feat, stride = self._feature_extractor(self.images) with tf.variable_scope('rpn'): rpn_conv = self._conv_layer(c4_feat, 256, 3, 1, 'rpn_conv', activation=tf.nn.relu) rpn_conf = self._conv_layer(rpn_conv, self.num_anchors*2, 3, 1, 'rpn_conf') rpn_pbbox = self._conv_layer(rpn_conv, self.num_anchors*4, 3, 1, 'rpn_pbbox') if self.data_format == 'channels_first': rpn_conf = tf.transpose(rpn_conf, [0, 2, 3, 1]) rpn_pbbox = tf.transpose(rpn_pbbox, [0, 2, 3, 1]) pshape = tf.shape(rpn_conf) rpn_pbbox_yx, rpn_pbbox_hw, rpn_pconf = self._get_rpn_pbbox(rpn_conf, rpn_pbbox) abbox_y1x1, abbox_y2x2, abbox_yx, abbox_hw = self._get_abbox(pshape, stride) min_mask = tf.cast(abbox_y1x1[:, 0] >= 0., tf.float32) * tf.cast(abbox_y1x1[:, 1] >= 0., tf.float32) max_mask = tf.cast(abbox_y2x2[:, 0] <= self.h-1, tf.float32) * tf.cast(abbox_y2x2[:, 1] <= self.w-1, tf.float32) mask = (min_mask * max_mask) > 0. abbox_y1x1 = tf.boolean_mask(abbox_y1x1, mask) abbox_y2x2 = tf.boolean_mask(abbox_y2x2, mask) abbox_yx = tf.boolean_mask(abbox_yx, mask) abbox_hw = tf.boolean_mask(abbox_hw, mask) rpn_pbbox_yx = tf.boolean_mask(rpn_pbbox_yx, mask, axis=1) rpn_pbbox_hw = tf.boolean_mask(rpn_pbbox_hw, mask, axis=1) rpn_pconf = tf.boolean_mask(rpn_pconf, mask, axis=1) with tf.variable_scope('rcnn'): state5_conv1_1 = self._separable_conv_layer(c4_feat, 256, [1, 15], 1, 'state5_conv1_1', activation=tf.nn.relu) state5_conv1_2 = self._separable_conv_layer(state5_conv1_1, 490, [15, 1], 1, 'state5_conv1_2', activation=tf.nn.relu) state5_conv2_1 = self._separable_conv_layer(c4_feat, 256, [1, 15], 1, 'state5_conv2_1', activation=tf.nn.relu) state5_conv2_2 = self._separable_conv_layer(state5_conv2_1, 490, [15, 1], 1, 'state5_conv2_2', activation=tf.nn.relu) rcnn_feat = state5_conv1_2 + state5_conv2_2 if self.mode == 'train': rpn_loss = [] pos_proposal = [] pos_rcnn_label = [] rcnn_truth_pbbox = [] neg_proposal = [] pos_box_ind = [] neg_box_ind = [] for i in range(self.batch_size): rpn_loss_, pos_proposal_, pos_rcnn_label_, rcnn_truth_pbbox_, neg_proposal_ = self._compute_one_image_loss( rpn_pbbox_yx[i, ...], rpn_pbbox_hw[i, ...], abbox_y1x1, abbox_y2x2, abbox_yx, abbox_hw, rpn_pconf[i, ...], self.ground_truth[i, ...] ) pos_box_ind_ = tf.zeros_like(pos_rcnn_label_, dtype=tf.int32) + i neg_box_ind_ = tf.zeros_like(neg_proposal_[:, 0], dtype=tf.int32) + i rpn_loss.append(rpn_loss_) pos_proposal.append(pos_proposal_) pos_rcnn_label.append(pos_rcnn_label_) rcnn_truth_pbbox.append(rcnn_truth_pbbox_) neg_proposal.append(neg_proposal_) pos_box_ind.append(pos_box_ind_) neg_box_ind.append(neg_box_ind_) rpn_loss = tf.reduce_mean(rpn_loss) pos_proposal = tf.concat(pos_proposal, axis=0) pos_rcnn_label = tf.concat(pos_rcnn_label, axis=0) rcnn_truth_pbbox = tf.concat(rcnn_truth_pbbox, axis=0) neg_proposal = tf.concat(neg_proposal, axis=0) pos_box_ind = tf.concat(pos_box_ind, axis=0) neg_box_ind = tf.concat(neg_box_ind, axis=0) else: proposal_yx = rpn_pbbox_yx[0, ...] * abbox_hw + abbox_yx proposal_hw = tf.exp(rpn_pbbox_hw[0, ...]) * abbox_hw proposal = tf.concat([proposal_yx-proposal_hw/2., proposal_yx+proposal_hw/2.], axis=-1) proposal_conf = tf.nn.softmax(rpn_pconf[0, ...]) if self.mode == 'train': pos_proposal = tf.maximum(pos_proposal, [0., 0., 0., 0.]) pos_proposal = tf.minimum(pos_proposal, [self.h, self.w, self.h, self.w]) neg_proposal = tf.maximum(neg_proposal, [0., 0., 0., 0.]) neg_proposal = tf.minimum(neg_proposal, [self.h, self.w, self.h, self.w]) norm_factor = [self.h, self.w, self.h, self.w] pos_roi_feat = tf.image.crop_and_resize(rcnn_feat, pos_proposal/norm_factor, pos_box_ind, [7, 7]) pos_roi_feat = tf.layers.flatten(pos_roi_feat) neg_roi_feat = tf.image.crop_and_resize(rcnn_feat, neg_proposal/norm_factor, neg_box_ind, [7, 7]) neg_rcnn_label = tf.constant([self.num_classes-1]) neg_rcnn_label = tf.tile(neg_rcnn_label, [tf.shape(neg_roi_feat)[0]]) neg_roi_feat = tf.layers.flatten(neg_roi_feat) roi_feat = tf.concat([pos_roi_feat, neg_roi_feat], axis=0) rcnn_label = tf.concat([pos_rcnn_label, neg_rcnn_label], axis=0) num_pos = tf.shape(pos_rcnn_label)[0] else: proposal = tf.maximum(proposal, [0., 0., 0., 0.]) proposal = tf.minimum(proposal, [self.h, self.w, self.h, self.w]) selected_indices = tf.image.non_max_suppression( proposal, proposal_conf[:, 0], self.post_nms_proposal, iou_threshold=0.7 ) proposal = tf.gather(proposal, selected_indices) proposal_yx = proposal[..., 0:2] / 2. + proposal[..., 2:4] / 2. proposal_hw = proposal[..., 2:4] - proposal[..., 0:2] box_ind = tf.zeros_like(selected_indices, dtype=tf.int32) norm_factor = [self.h, self.w, self.h, self.w] roi_feat = tf.image.crop_and_resize(rcnn_feat, proposal/norm_factor, box_ind, [7, 7]) roi_feat = tf.layers.flatten(roi_feat) roi_feat = tf.layers.dense(roi_feat, 2048, name='roi_feat_dense', activation=tf.nn.relu) rcnn_pconf = tf.layers.dense(roi_feat, self.num_classes, name='rcnn_pconf') rcnn_pbbox = tf.layers.dense(roi_feat, 4, name='rcnn_pbbox') if self.mode == 'train': rcnn_conf_loss = tf.losses.sparse_softmax_cross_entropy(rcnn_label, rcnn_pconf, reduction=tf.losses.Reduction.MEAN) pos_rcnn_pbbox_loss = self._smooth_l1_loss(tf.gather(rcnn_pbbox, tf.range(num_pos, dtype=tf.int32)) - rcnn_truth_pbbox) pos_rcnn_pbbox_loss = tf.reduce_mean(tf.reduce_sum(pos_rcnn_pbbox_loss, axis=-1)) rcnn_loss = rcnn_conf_loss + pos_rcnn_pbbox_loss optimizer = tf.train.MomentumOptimizer(learning_rate=self.lr, momentum=.9) rpn_loss = rpn_loss + self.weight_decay * tf.add_n( [tf.nn.l2_loss(var) for var in tf.trainable_variables('feature_extractor')] ) + self.weight_decay * tf.add_n( [tf.nn.l2_loss(var) for var in tf.trainable_variables('rpn')] ) rcnn_loss = rcnn_loss + + self.weight_decay * tf.add_n( [tf.nn.l2_loss(var) for var in tf.trainable_variables('rcnn')] ) rpn_vars = tf.trainable_variables('feature_extractor') + tf.trainable_variables('rpn') rpn_grads_and_vars = optimizer.compute_gradients(rpn_loss, rpn_vars) train_rpn_op = optimizer.apply_gradients(rpn_grads_and_vars) rcnn_vars = tf.trainable_variables('rcnn') rcnn_grads_and_vars = optimizer.compute_gradients(rcnn_loss, rcnn_vars) train_rcnn_op = optimizer.apply_gradients(rcnn_grads_and_vars, global_step=self.global_step) train_op = tf.case([(tf.less(self.global_step, self.rpn_first_step), lambda: train_rpn_op), (tf.less(self.global_step, self.rcnn_first_step), lambda: train_rcnn_op), (tf.less(self.global_step, self.rpn_second_step), lambda: train_rpn_op)], default=lambda: train_rcnn_op, exclusive=False) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.train_op = tf.group([update_ops, train_op]) self.loss = tf.case([(tf.less(self.global_step, self.rpn_first_step), lambda: rpn_loss), (tf.less(self.global_step, self.rcnn_first_step), lambda: rcnn_loss), (tf.less(self.global_step, self.rpn_second_step), lambda: rpn_loss)], default=lambda: rcnn_loss, exclusive=False) else: rcnn_pbbox_yxt = rcnn_pbbox[..., 0:2] rcnn_pbbox_hwt = rcnn_pbbox[..., 2:4] proposal_yxt = proposal_yx proposal_hwt = proposal_hw confidence = tf.nn.softmax(rcnn_pconf) class_id = tf.argmax(confidence, axis=-1) conf_mask = tf.less(class_id, self.num_classes-1) rcnn_pbbox_yxt = tf.boolean_mask(rcnn_pbbox_yxt, conf_mask) rcnn_pbbox_hwt = tf.boolean_mask(rcnn_pbbox_hwt, conf_mask) confidence = tf.boolean_mask(confidence, conf_mask) proposal_yxt = tf.boolean_mask(proposal_yxt, conf_mask) proposal_hwt = tf.boolean_mask(proposal_hwt, conf_mask) dpbbox_yxt = rcnn_pbbox_yxt * proposal_hwt + proposal_yxt dpbbox_hwt = proposal_hwt * tf.exp(rcnn_pbbox_hwt) dpbbox_y1x1 = dpbbox_yxt - dpbbox_hwt / 2. dpbbox_y2x2 = dpbbox_yxt + dpbbox_hwt / 2. dpbbox_y1x1y2x2 = tf.concat([dpbbox_y1x1, dpbbox_y2x2], axis=-1) filter_mask = tf.greater_equal(confidence, self.nms_score_threshold) scores = [] class_id = [] bbox = [] for i in range(self.num_classes-1): scoresi = tf.boolean_mask(confidence[:, i], filter_mask[:, i]) bboxi = tf.boolean_mask(dpbbox_y1x1y2x2, filter_mask[:, i]) selected_indices = tf.image.non_max_suppression( bboxi, scoresi, self.nms_max_boxes, self.nms_iou_threshold, ) scores.append(tf.gather(scoresi, selected_indices)) bbox.append(tf.gather(bboxi, selected_indices)) class_id.append(tf.ones_like(tf.gather(scoresi, selected_indices), tf.int32) * i) bbox = tf.concat(bbox, axis=0) scores = tf.concat(scores, axis=0) class_id = tf.concat(class_id, axis=0) self.detection_pred = [scores, bbox, class_id] def _feature_extractor(self, images): with tf.variable_scope('stage1'): conv1 = self._conv_layer(images, 24, 3, 2, 'conv1', activation=tf.nn.relu) pool1 = self._max_pooling(conv1, 3, 2, 'pool1') with tf.variable_scope('stage2'): stage2_sconv1 = self._conv_layer(pool1, 144, 3, 2, 'stage2_sconv1', activation=tf.nn.relu) stage2_sconv2 = self._separable_conv_layer(stage2_sconv1, 144, 3, 1, 'stage2_sconv2', activation=tf.nn.relu) stage2_sconv3 = self._separable_conv_layer(stage2_sconv2, 144, 3, 1, 'stage2_sconv3', activation=tf.nn.relu) stage2_sconv4 = self._separable_conv_layer(stage2_sconv3, 144, 3, 1, 'stage2_sconv4', activation=tf.nn.relu) with tf.variable_scope('stage3'): stage3_sconv1 = self._conv_layer(stage2_sconv4, 288, 3, 2, 'stage3_sconv1', activation=tf.nn.relu) stage3_sconv2 = self._separable_conv_layer(stage3_sconv1, 288, 3, 1, 'stage3_sconv2', activation=tf.nn.relu) stage3_sconv3 = self._separable_conv_layer(stage3_sconv2, 288, 3, 1, 'stage3_sconv3', activation=tf.nn.relu) stage3_sconv4 = self._separable_conv_layer(stage3_sconv3, 288, 3, 1, 'stage3_sconv4', activation=tf.nn.relu) stage3_sconv5 = self._separable_conv_layer(stage3_sconv4, 288, 3, 1, 'stage3_sconv5', activation=tf.nn.relu) stage3_sconv6 = self._separable_conv_layer(stage3_sconv5, 288, 3, 1, 'stage3_sconv6', activation=tf.nn.relu) stage3_sconv7 = self._separable_conv_layer(stage3_sconv6, 288, 3, 1, 'stage3_sconv7', activation=tf.nn.relu) stage3_sconv8 = self._separable_conv_layer(stage3_sconv7, 288, 3, 1, 'stage3_sconv8', activation=tf.nn.relu) with tf.variable_scope('stage4'): stage4_sconv1 = self._conv_layer(stage3_sconv8, 576, 3, 2, 'stage4_sconv1', activation=tf.nn.relu) stage4_sconv2 = self._separable_conv_layer(stage4_sconv1, 576, 3, 1, 'stage4_sconv2', activation=tf.nn.relu) stage4_sconv3 = self._separable_conv_layer(stage4_sconv2, 576, 3, 1, 'stage4_sconv3', activation=tf.nn.relu) stage4_sconv4 = self._separable_conv_layer(stage4_sconv3, 576, 3, 1, 'stage4_sconv4', activation=tf.nn.relu) downsampling_rate = 32. return stage4_sconv4, downsampling_rate def _get_rpn_pbbox(self, rpn_conf, rpn_bbox): rpn_conf = tf.reshape(rpn_conf, [self.batch_size, -1, 2]) rpn_bbox = tf.reshape(rpn_bbox, [self.batch_size, -1, 4]) rpn_pbbox_yx = rpn_bbox[..., :2] rpn_pbbox_hw = rpn_bbox[..., 2:] return rpn_pbbox_yx, rpn_pbbox_hw, rpn_conf def _get_abbox(self, pshape, stride): topleft_y = tf.range(0., tf.cast(pshape[1], tf.float32), dtype=tf.float32) topleft_x = tf.range(0., tf.cast(pshape[2], tf.float32), dtype=tf.float32) topleft_y = tf.reshape(topleft_y, [-1, 1, 1, 1]) + 0.5 topleft_x = tf.reshape(topleft_x, [1, -1, 1, 1]) + 0.5 topleft_y = tf.tile(topleft_y, [1, pshape[2], 1, 1]) topleft_x = tf.tile(topleft_x, [pshape[1], 1, 1, 1]) topleft_yx = tf.concat([topleft_y, topleft_x], -1) topleft_yx = tf.tile(topleft_yx, [1, 1, self.num_anchors, 1]) * stride priors = [] for size in self.anchor_scales: for ratio in self.anchor_ratios: priors.append([size*(ratio**0.5), size/(ratio**0.5)]) priors = tf.convert_to_tensor(priors, tf.float32) priors = tf.reshape(priors, [1, 1, -1, 2]) abbox_y1x1 = tf.reshape(topleft_yx - priors / 2., [-1, 2]) abbox_y2x2 = tf.reshape(topleft_yx + priors / 2., [-1, 2]) abbox_yx = abbox_y1x1 / 2. + abbox_y2x2 / 2. abbox_hw = abbox_y2x2 - abbox_y1x1 return abbox_y1x1, abbox_y2x2, abbox_yx, abbox_hw def _compute_one_image_loss(self, pbbox_yx, pbbox_hw, abbox_y1x1, abbox_y2x2, abbox_yx, abbox_hw, pconf, ground_truth): slice_index = tf.argmin(ground_truth, axis=0)[0] ground_truth = tf.gather(ground_truth, tf.range(0, slice_index, dtype=tf.int64)) gbbox_yx = ground_truth[..., 0:2] gbbox_hw = ground_truth[..., 2:4] gbbox_y1x1 = gbbox_yx - gbbox_hw / 2. gbbox_y2x2 = gbbox_yx + gbbox_hw / 2. rcnn_label = tf.cast(ground_truth[..., 4], dtype=tf.int32) abbox_hwti = tf.reshape(abbox_hw, [1, -1, 2]) abbox_y1x1ti = tf.reshape(abbox_y1x1, [1, -1, 2]) abbox_y2x2ti = tf.reshape(abbox_y2x2, [1, -1, 2]) gbbox_hwti = tf.reshape(gbbox_hw, [-1, 1, 2]) gbbox_y1x1ti = tf.reshape(gbbox_y1x1, [-1, 1, 2]) gbbox_y2x2ti = tf.reshape(gbbox_y2x2, [-1, 1, 2]) num_a = tf.shape(abbox_hwti)[1] num_g = tf.shape(gbbox_hwti)[0] abbox_hwti = tf.tile(abbox_hwti, [num_g, 1, 1]) abbox_y1x1ti = tf.tile(abbox_y1x1ti, [num_g, 1, 1]) abbox_y2x2ti = tf.tile(abbox_y2x2ti, [num_g, 1, 1]) gbbox_hwti = tf.tile(gbbox_hwti, [1, num_a, 1]) gbbox_y1x1ti = tf.tile(gbbox_y1x1ti, [1, num_a, 1]) gbbox_y2x2ti = tf.tile(gbbox_y2x2ti, [1, num_a, 1]) gaiou_y1x1ti = tf.maximum(abbox_y1x1ti, gbbox_y1x1ti) gaiou_y2x2ti = tf.minimum(abbox_y2x2ti, gbbox_y2x2ti) gaiou_area = tf.reduce_prod(tf.maximum(gaiou_y2x2ti - gaiou_y1x1ti, 0), axis=-1) aarea = tf.reduce_prod(abbox_hwti, axis=-1) garea = tf.reduce_prod(gbbox_hwti, axis=-1) gaiou_rate = gaiou_area / (aarea + garea - gaiou_area + 1e-8) best_raindex = tf.argmax(gaiou_rate, axis=1) best_pbbox_yx = tf.gather(pbbox_yx, best_raindex) best_pbbox_hw = tf.gather(pbbox_hw, best_raindex) best_pconf = tf.gather(pconf, best_raindex) best_abbox_yx = tf.gather(abbox_yx, best_raindex) best_abbox_hw = tf.gather(abbox_hw, best_raindex) best_rcnn_label = tf.gather(rcnn_label, best_raindex) bestmask, _ = tf.unique(best_raindex) bestmask = tf.contrib.framework.sort(bestmask) bestmask = tf.reshape(bestmask, [-1, 1]) bestmask = tf.sparse.SparseTensor(tf.concat([bestmask, tf.zeros_like(bestmask)], axis=-1), tf.squeeze(tf.ones_like(bestmask)), dense_shape=[num_a, 1]) bestmask = tf.reshape(tf.cast(tf.sparse.to_dense(bestmask), tf.float32), [-1]) othermask = 1. - bestmask othermask = othermask > 0. other_pbbox_yx = tf.boolean_mask(pbbox_yx, othermask) other_pbbox_hw = tf.boolean_mask(pbbox_hw, othermask) other_pconf = tf.boolean_mask(pconf, othermask) other_abbox_yx = tf.boolean_mask(abbox_yx, othermask) other_abbox_hw = tf.boolean_mask(abbox_hw, othermask) agiou_rate = tf.transpose(gaiou_rate) other_agiou_rate = tf.boolean_mask(agiou_rate, othermask) max_agiou_rate = tf.reduce_max(other_agiou_rate, axis=1) pos_agiou_mask = max_agiou_rate > 0.5 neg_agiou_mask = max_agiou_rate < 0.3 rgindex = tf.argmax(other_agiou_rate, axis=1) pos_rgindex = tf.boolean_mask(rgindex, pos_agiou_mask) pos_pbbox_yx = tf.boolean_mask(other_pbbox_yx, pos_agiou_mask) pos_pbbox_hw = tf.boolean_mask(other_pbbox_hw, pos_agiou_mask) pos_pconf = tf.boolean_mask(other_pconf, pos_agiou_mask) pos_abbox_yx = tf.boolean_mask(other_abbox_yx, pos_agiou_mask) pos_abbox_hw = tf.boolean_mask(other_abbox_hw, pos_agiou_mask) pos_gbbox_yx = tf.gather(gbbox_yx, pos_rgindex) pos_gbbox_hw = tf.gather(gbbox_hw, pos_rgindex) pos_rcnn_label = tf.gather(rcnn_label, pos_rgindex) pos_pbbox_yx = tf.concat([best_pbbox_yx, pos_pbbox_yx], axis=0) pos_pbbox_hw = tf.concat([best_pbbox_hw, pos_pbbox_hw], axis=0) pos_pconf = tf.concat([best_pconf, pos_pconf], axis=0) pos_gbbox_yx = tf.concat([gbbox_yx, pos_gbbox_yx], axis=0) pos_gbbox_hw = tf.concat([gbbox_hw, pos_gbbox_hw], axis=0) pos_rcnn_label = tf.concat([best_rcnn_label, pos_rcnn_label], axis=0) pos_abbox_yx = tf.concat([best_abbox_yx, pos_abbox_yx], axis=0) pos_abbox_hw = tf.concat([best_abbox_hw, pos_abbox_hw], axis=0) pos_abbox_y1x1y2x2 = tf.concat([pos_abbox_yx-pos_abbox_hw/2., pos_abbox_yx+pos_abbox_hw/2.], axis=-1) neg_pconf = tf.boolean_mask(other_pconf, neg_agiou_mask) neg_abbox_yx = tf.boolean_mask(other_abbox_yx, neg_agiou_mask) neg_abbox_hw = tf.boolean_mask(other_abbox_hw, neg_agiou_mask) neg_pbbox_yx = tf.boolean_mask(other_pbbox_yx, neg_agiou_mask) neg_pbbox_hw = tf.boolean_mask(other_pbbox_hw, neg_agiou_mask) neg_abbox_y1x1y2x2 = tf.concat([neg_abbox_yx-neg_abbox_hw/2., neg_abbox_yx+neg_abbox_hw/2.], axis=-1) num_pos = tf.shape(pos_pconf)[0] num_neg = tf.shape(neg_pconf)[0] pos_label = tf.constant([0]) pos_label = tf.tile(pos_label, [num_pos]) neg_label = tf.constant([1]) neg_label = tf.tile(neg_label, [num_neg]) chosen_num_pos = tf.cond(num_pos > 128, lambda: 128, lambda: num_pos) chosen_num_neg = tf.cond(num_neg > 256-chosen_num_pos, lambda: 256-chosen_num_pos, lambda: num_neg) pos_conf_loss = tf.losses.sparse_softmax_cross_entropy(pos_label, pos_pconf, reduction=tf.losses.Reduction.NONE) selected_posindices = tf.image.non_max_suppression( pos_abbox_y1x1y2x2, tf.nn.softmax(pos_pconf)[:, 0], chosen_num_pos, iou_threshold=0.7 ) pos_conf_loss = tf.reduce_mean(tf.gather(pos_conf_loss, selected_posindices)) neg_loss = tf.losses.sparse_softmax_cross_entropy(neg_label, neg_pconf, reduction=tf.losses.Reduction.NONE) selected_negindices = tf.image.non_max_suppression( neg_abbox_y1x1y2x2, neg_loss, chosen_num_neg, iou_threshold=0.7 ) neg_loss = tf.reduce_mean(tf.gather(neg_loss, selected_negindices)) pos_abbox_yx = tf.gather(pos_abbox_yx, selected_posindices) pos_abbox_hw = tf.gather(pos_abbox_hw, selected_posindices) pos_pbbox_yx = tf.gather(pos_pbbox_yx, selected_posindices) pos_pbbox_hw = tf.gather(pos_pbbox_hw, selected_posindices) pos_gbbox_yx = tf.gather(pos_gbbox_yx, selected_posindices) pos_gbbox_hw = tf.gather(pos_gbbox_hw, selected_posindices) pos_rcnn_label = tf.gather(pos_rcnn_label, selected_posindices) neg_abbox_yx = tf.gather(neg_abbox_yx, selected_negindices) neg_abbox_hw = tf.gather(neg_abbox_hw, selected_negindices) neg_pbbox_yx = tf.gather(neg_pbbox_yx, selected_negindices) neg_pbbox_hw = tf.gather(neg_pbbox_hw, selected_negindices) pos_truth_pbbox_yx = (pos_gbbox_yx - pos_abbox_yx) / pos_abbox_hw pos_truth_pbbox_hw = tf.log(pos_gbbox_hw / pos_abbox_hw) pos_yx_loss = tf.reduce_sum(self._smooth_l1_loss(pos_pbbox_yx - pos_truth_pbbox_yx), axis=-1) pos_hw_loss = tf.reduce_sum(self._smooth_l1_loss(pos_pbbox_hw - pos_truth_pbbox_hw), axis=-1) pos_coord_loss = tf.reduce_mean(pos_yx_loss + pos_hw_loss) total_loss = neg_loss + pos_conf_loss + 10.*pos_coord_loss pos_proposal_yx = pos_abbox_hw * pos_pbbox_yx + pos_abbox_yx pos_proposal_hw = tf.exp(pos_pbbox_hw) * pos_abbox_hw rcnn_truth_pbbox_yx = (pos_gbbox_yx - pos_proposal_yx) / pos_proposal_yx rcnn_truth_pbbox_hw = tf.log(pos_gbbox_hw / pos_proposal_hw) rcnn_truth_pbbox = tf.concat([rcnn_truth_pbbox_yx, rcnn_truth_pbbox_hw], axis=-1) neg_proposal_yx = neg_abbox_hw * neg_pbbox_yx + neg_abbox_yx neg_proposal_hw = tf.exp(neg_pbbox_hw) * neg_abbox_hw pos_proposal_y1x1 = pos_proposal_yx - pos_proposal_hw / 2. pos_proposal_y2x2 = pos_proposal_yx + pos_proposal_hw / 2. pos_proposal = tf.concat([pos_proposal_y1x1, pos_proposal_y2x2], axis=-1) neg_proposal_y1x1 = neg_proposal_yx - neg_proposal_hw / 2. neg_proposal_y2x2 = neg_proposal_yx + neg_proposal_hw / 2. neg_proposal = tf.concat([neg_proposal_y1x1, neg_proposal_y2x2], axis=-1) return total_loss, pos_proposal, pos_rcnn_label, rcnn_truth_pbbox, neg_proposal def _smooth_l1_loss(self, x): return tf.where(tf.abs(x) < 1., 0.5*x*x, tf.abs(x)-0.5) def _init_session(self): self.sess = tf.InteractiveSession() self.sess.run(tf.global_variables_initializer()) if self.mode == 'train': self.sess.run(self.train_initializer) def _create_saver(self): weights = tf.trainable_variables(scope='feature_extractor') self.pretraining_weight_saver = tf.train.Saver(weights) self.saver = tf.train.Saver() self.best_saver = tf.train.Saver() def _create_summary(self): with tf.variable_scope('summaries'): tf.summary.scalar('loss', self.loss) self.summary_op = tf.summary.merge_all() def train_one_epoch(self, lr): self.is_training = True self.sess.run(self.train_initializer) mean_loss = [] num_iters = self.num_train // self.batch_size for i in range(num_iters): _, loss, global_step = self.sess.run([self.train_op, self.loss, self.global_step], feed_dict={self.lr: lr}) # sys.stdout.write('\r>> ' + 'iters '+str(i+1)+str('/')+str(num_iters)+' loss '+str(loss)) if global_step < self.rpn_first_step: loss_name = 'rpn_loss' elif global_step < self.rcnn_first_step: loss_name = 'rcnn_loss' elif global_step < self.rpn_second_step: loss_name = 'rpn_loss' else: loss_name = 'rcnn_loss' print('iters ',str(i+1)+str('/')+str(num_iters), loss_name, loss, 'global_step', global_step) # sys.stdout.flush() mean_loss.append(loss) # sys.stdout.write('\n') mean_loss = np.mean(mean_loss) return mean_loss def test_one_image(self, images): self.is_training = False pred = self.sess.run(self.detection_pred, feed_dict={self.images: images}) return pred def save_weight(self, mode, path): assert(mode in ['latest', 'best']) if mode == 'latest': saver = self.saver else: saver = self.best_saver if not tf.gfile.Exists(os.path.dirname(path)): tf.gfile.MakeDirs(os.path.dirname(path)) print(os.path.dirname(path), 'does not exist, create it done') saver.save(self.sess, path, global_step=self.global_step) print('save', mode, 'model in', path, 'successfully') def load_weight(self, path): self.saver.restore(self.sess, path) print('load weight', path, 'successfully') def load_rpn_weight(self, path): self.rpn_saver.restore(self.sess, path) print('load rpn weight', path, 'successfully') def load_pretraining_weight(self, path): self.pretraining_weight_saver.restore(self.sess, path) print('>> load pretraining weight', path, 'successfully') def _bn(self, bottom): bn = tf.layers.batch_normalization( inputs=bottom, axis=3 if self.data_format == 'channels_last' else 1, training=self.is_training ) return bn def _conv_layer(self, bottom, filters, kernel_size, strides, name=None, dilation_rate=1, activation=None, padding='same'): conv = tf.layers.conv2d( inputs=bottom, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, name=name, data_format=self.data_format, dilation_rate=dilation_rate, ) bn = self._bn(conv) if activation is not None: bn = activation(bn) return bn def _separable_conv_layer(self, bottom, filters, kernel_size, strides, name=None, dilation_rate=1, activation=None): conv = tf.layers.separable_conv2d( inputs=bottom, filters=filters, kernel_size=kernel_size, strides=strides, padding='same', name=name, data_format=self.data_format, use_bias=False, dilation_rate=dilation_rate, ) bn = self._bn(conv) if activation is not None: bn = activation(bn) return bn def _max_pooling(self, bottom, pool_size, strides, name): return tf.layers.max_pooling2d( inputs=bottom, pool_size=pool_size, strides=strides, padding='same', data_format=self.data_format, name=name ) def _avg_pooling(self, bottom, pool_size, strides, name): return tf.layers.average_pooling2d( inputs=bottom, pool_size=pool_size, strides=strides, padding='same', data_format=self.data_format, name=name ) def _dropout(self, bottom, name): return tf.layers.dropout( inputs=bottom, rate=self.prob, training=self.is_training, name=name )
53.756849
135
0.628177
acefaf7b7f2d9c33248f3f648d9189ac482a5752
65,102
py
Python
english/C18FamilyEng.py
wholesomegarden/Challenge18
5aeac0f130fd69f9e29b3cf83d730e8e1fddd00d
[ "MIT" ]
1
2021-05-04T10:19:51.000Z
2021-05-04T10:19:51.000Z
english/C18FamilyEng.py
wholesomegarden/Challenge18
5aeac0f130fd69f9e29b3cf83d730e8e1fddd00d
[ "MIT" ]
null
null
null
english/C18FamilyEng.py
wholesomegarden/Challenge18
5aeac0f130fd69f9e29b3cf83d730e8e1fddd00d
[ "MIT" ]
null
null
null
# ‘’’ TASKS ‘’’ # For Sharon: # make sure all msgs you want are included (15 min intro for example) # For Tami: # make sure messages early in the day are sent in the morning (send one by one) # send msgs with url as attachments with preview Family = { # ''' DAY -5 ''' 5 dayד before the challenge -5:{ "12:00:00": "image/C18Heb/pre5.png" , "12:00:05": ''' שלום לכולם, אני מופתע ושמח מכמות המשתתפים ומקווה שהתהליך הזה יתרום לכם לצמיחה ושלווה נפשית בתקופה מאתגרת כזו ❤️🌻🌸🌍🍒⚽💡💸📱👂🥥🐜⌛🤹‍♂️🥇🎥🌸🌻❤️ ''' , }, # ''' DAY -4 ''' 4 dayד before the challenge -4:{ "12:00:00": "image/C18Heb/pre4.png" , "12:00:05": ''' מרגש אותי שעוד סוכני שינוי ממשיכים להצטרף אלינו ❤️ נתחיל בעוד 4 ימים - בשעה 20:00 GMT3. אתגר 18 הימים של קיימות ומנהיגות הוא תהליך קבוצתי חברתי בהשראת ה- SDG - יעדי פיתוח בר-קיימא, שמטרתן לעזור לנו להתפתח ולשנות את המציאות בה אנו חיים. אלו משימות יומיות קצרות של 10-20 דקות, הפותחת אותנו לתהליך קסום של צמיחה ומודעות חדשה, שתנתק אתכם מאזור הנוחות המיידי ותפתח בכם מודעות עולמית.You are giving yourself an amazing gift and you will be happy about it. זכרו שאתם עושים את זה לא רק בשביל עצמכם אלא עבור הקהילה שאתם חלק ממנה. אני מעריך ביותר את השתתפותכם, הרצון טוב והסבלנות, שיהיה לכולנו יום קסום מלא בקיימות ומנהיגות 🌻🌼🌸🌺🌹🌷💐🌾 אני כאן לכל שאלה / עזרה שתצטרכו באופן פרטי הצלחה וברכה ברחשי משחק ידידותיים שרון גל-אור 🐋🌸🙏 ''' , }, # ''' DAY -3 ''' 3 dayד before the challenge -3:{ "12:00:00": "image/C18Heb/pre3.png" , "12:00:05": ''' שלום לכולם, אני מופתע ושמח ממספר המשתתפים ומקווה שתהליך זה יתרום לצמיחתכם ולשקט הנפשי שלכם בתקופה כה מאתגרת 🐋🌸🙏🙏🙏❤️🌻☀️🌎💛💚💖 ''' , }, # ''' DAY -2 ''' 2 dayד before the challenge -2:{ "12:00:00": "image/C18Heb/pre2.png" , "12:00:05": ''' כדי להבטיח שאנשים ישארו בקבוצה ועוברים את התהליך, וכך אתם לא צריכים לטרוח ולחפש את המשימות זמן רב, אני מבקש מכם להשיב רק לגבי השלמת המשימות ... אלו הזקוקים להסברים - אשמח מאוד לענות בפרטי.You are giving yourself an amazing gift and you will be happy about it, שיהיה לכולנו יום קסום מלא בקיימות ומנהיגות 🌻🌼🌸🌺🌹🌷💐🌾 לאלו מכם המעוניינים, אתם מוזמנים להצטרף לקבוצה אחרת בה נשתף את החוויות האישיות בקבוצה הבאה: קבוצת שיתופים אתגר18 - לשיתופי רעיונות ושאלת שאלות 🐋🌸🙏 https://chat.whatsapp.com/JZikTOFwjdXJnGjG8V4GXh ''' , }, # ''' DAY -1 ''' one day before the challenge -1:{ "12:00:00": "image/C18Heb/pre1.png" , "12:00:05": ''' אני יודע שלחלקכם אין זו הפעם הראשונה 😉😉😉, אך עדיין חשוב מאד לעשות את התרגילים. אני בטוח שלאור המצב המשתנה בעולם, המודעות בביצוע התרגילים תהיה שונה. זוהי תקופה שבה לרוב צפים הפחדים הפנימיים, ולכן אני משער שבימים מסוימים חלק יחוו רגעים פחות נעימים. אין לי ספק ששמירה על מודעות גבוהה באמצעות ביצוע התרגילים תאפשר לנו התמודדות קלה יותר🌸 החל מהערב, ובכל יום, אשלח את המשימות לביצוע ביום הבא: 1. תרגיל אישי לכתיבה במחברת (הצטיידו במחברת צבעונית שתחייך אליכם) . 2. רקע על יעדי הקיימות הגלובליים וקצת מלח ופלפל התהליך דורש נאמנות לעצמכם ולקבוצה. *בסיום התירגול יש לסמן את מספר היום/יעד ואימוג'י לפי המשימה/ות שביצעתם, אחת או יותר, 🌻🌼🌸🌺🌹🌷💐🌾 ול-18 הימים הקרובים אבקש שזה כל מה שתגיבו בקבוצה*. כמו תמיד, מי שזקוק לתמיכה או הסברים מוזמן בשמחה לשלוח הודעות בפרטי בהצלחה באתגר ושיהיה לכולנו תהליך קיימות ומנהיגות מדהים ומצמיח - טינג גלובל ''' , "12:00:11": ''' אני מופתע ושמח מכמות המשתתפים ומקווה שהתהליך הזה יתרום לכם לצמיחה ושלווה נפשית בתקופה מאתגרת כזו ❤❤️🌻🌸🌍🍒⚽💡💸📱👂🥥🐜⌛🤹‍♂️🥇🎥🌸🌻❤ ️️ ''' , }, # ''' DAY 0 ''' the challenge starting day 0:{ "12:00:00": "image/C18Heb/pre0.png" , "12:00:05": ''' חברים יקרים ומקיימים 🥳🥳🥳 מי שלא מכיר את המסע המדהים הזה של אתגר 18 - אתן לכם קצת מידע: *מדובר בתהליך בן 18 ימים, שבו נשלחות אליכם מדי יום 1-3 משימות יומיות קצרות ומצמיחות בהשראת יעדי הקיימות ולצידם, רקע בסיסי על היעדים. כל ערב אשלח את המשימה של היום למחרת, ויעמדו לרשותכם 24 שעות לבצע ולאשר בקבוצה שביצעתם. האישור הוא מספר היום/יעד ולידו אימוג'י אחד או יותר, לפי המשימה/ות שבחרתם לבצע.* למשל: לב בכל צבע שתבחרו או אימוג'י סביבתי 'ירוק' לבחירתכם. הניקוד הוא בהתאם למספר המשימה, כך שהמספר המקסימלי מצטבר מיום ליום. לדוגמא: משימה 1 = נקודה אחת והסימון הוא ❤️ משימה 2 = שתי נקודות הסימון הוא ❤️❤️ או אימוג'י שנבחר לייצג את המשימה משימה 3 = שלוש נקודות הסימון הוא ❤️❤️❤️ או אימוג'י שנבחר לייצג את המשימה לדוגמא, ביום הראשון הציון המקסימלי הוא 6 לבבות/פרחים ❤️❤️❤️❤️❤️❤️ - זה במידה שבחרתם לבצע את כל שלוש המשימות. *הניקוד הוא מצטבר לאורך האתגר, הן במסע האישי שלכם והן בקבוצתי, כך שההתחייבות של כל אחד ואחת מכם היא לעצמכם ולקבוצה כולה. בהצלחה רבה.* מכיוון שאנחנו קבוצה גדולה, יתכן שזה יהיה קצת מאתגר אבל אני בטוח שתצליחו להכיל זאת, כי אתם מדהימים ומקיימים וכי אכפת לכם באמת מכדור הארץ ומדורות העתיד 🌻🌼🌸🌺🌹🌷💐🌾 בתהליך הזה מאוד מומלץ שתרגישו מחויבים לתהליך ולזמנים. מאחר שאני לא מאמין בהנחתות מלמעלה, אשמח שמי שלא מתחבר יפרוש בעצמו מהקבוצה כדי שהאנרגיה הכללית תישאר של עשייה וצמיחה ושל מחוייבות. תודה לכולכם על ההזדמנות לתת לכם מהלב ❤️ מי שצריך עזרה במשהו, בשמחה רבה. פנו אלי בפרטי. ''' , "12:00:10": ''' הבהרה נוספת חברים, כל יום בשעה 20:00 , תישלח אליכם המשימה היומית, לפי 1-3 דרגות מחויבות וקושי. בחרו לפחות אחת מהן, שאיתה אתם מרגישים בנוח ותרגישו מחויבים ומחוברים לתהליך - יש לכם 24 שעות לעשות אותה ולסמן בקבוצה שעשיתם...*אין צורך להיות זמינים במיוחד כל יום בשעה 20:00* 🌏🥥⚽💚🐋📱💸 ''' , "12:00:15": ''' שלום לכולם, מרגש אותי שאנשים ממשיכים להצטרף אלינו ❤️ אני מזכיר לך שהיום בשעה 20:00 נתחיל בתהליך המדהים של קיימות ומנהיגות לכל אחד ואחת מכם ... אני ממליץ לכם להיות מחויבים לתהליך. מי שלא מחויב בליבו לתהליך מוזמן לעזוב את הקבוצה בשמחה רבה 🌻🌼🌸🌺🌹🌷💐🌾 יום מהמם לכולם ''' , "12:00:20": ''' אני יודע שלחלקכם אין זו הפעם הראשונה 😉😉😉, אך עדיין חשוב מאד לעשות את התרגילים. אני בטוח שלאור המצב המשתנה בעולם, המודעות בביצוע התרגילים תהיה שונה. זוהי תקופה שבה לרוב צפים הפחדים הפנימיים, ולכן אני משער שבימים מסוימים חלק יחוו רגעים פחות נעימים. אין לי ספק ששמירה על מודעות גבוהה באמצעות ביצוע התרגילים תאפשר לנו התמודדות קלה יותר🌸 החל מהערב, ובכל יום, אשלח את המשימות לביצוע ביום הבא: 1. תרגיל אישי לכתיבה במחברת (הצטיידו במחברת צבעונית שתחייך אליכם) . 2. רקע על יעדי הקיימות הגלובליים וקצת מלח ופלפל התהליך דורש נאמנות לעצמכם ולקבוצה. *בסיום התירגול יש לסמן את מספר היום/יעד ואימוג'י לפי המשימה/ות שביצעתם, אחת או יותר, 🌻🌼🌸🌺🌹🌷💐🌾 ול-18 הימים הקרובים אבקש שזה כל מה שתגיבו בקבוצה*. כמו תמיד, מי שזקוק לתמיכה או הסברים מוזמן בשמחה לשלוח הודעות בפרטי בהצלחה באתגר ושיהיה לכולנו תהליך קיימות ומנהיגות מדהים ומצמיח - טינג גלובל ''' , "12:00:25": ''' בוקר טוב חברות וחברים, מסע השינוי שלנו, של האנושות כולה, תלוי ביכולת שלנו לתקשר במספרים גדולים בצורה מהירה ומדויקת לטובת 'תיקון עולם' ושמירה על הרמוניה כולי תקווה כי אתגר18 יעזור לכם להרחיב את הידע והדמיון, להטמיע את ערכי הקיימות, ויעודד אתכם להפוך ל- 'סוכני שינוי' כך שיהיה בכוחנו להשפיע באופן עמוק ומהותי על תחומי החברה, הסביבה והקיימות בבית הספר, בקהילה, בעיר, במדינה ואפילו בעולם כולו אז קבלו את ברכתי, היום אתם מתחילים את אתגר "18 הימים לקיימות ומנהיגות" של טינג גלובל, כי ניתן לשנות את המציאות בה אנו חיים ולהציל את האנושות, אם רק נבחר לשנות את ההרגלים שלנו. איך? נתחיל בתרגול יומיומי... לעבר מציאות חדשה שמחכה לכם, למשפחה שלכם, לקהילה שלכם ולמען עתיד הדורות הבאים. 🐋🌸🙏 ''' , "12:00:35": "image/english/0.ogg" , }, # ''' DAY 1 ''' 1:{ "19:59:50": "image/C18Heb/1.png" , "19:59:57": ''' שלום לכולם, מתחילים..... מאחל לכולכם בהצלחה ❤ ושתקבלו את המקסימום מהתהליך הקסום הזה *שיעור 1 – "הוקרת הטוב את הטוב" - על עושר רוחני ועושר אנושי* המשימה של היום לפי 1-3 דרגות מחויבות – בחרו לפחות אחת מהן, לא משנה מהי העיקר שתרגישו בנוח ותרגישו מחויבים ומחוברים לתהליך. המשימה של יום 1: במחברת שבחרתם לתהליך, יש ליצור רשימה של אנשים שיש להם ערך מוסף לחיים שלכם. 1. *צפו בסרטון* – עוני בישראל ❤ https://www.youtube.com/watch?v=wuQkMk9ZK7M 2. *יש לי – יש לי* - רשימה של 18 דברים שיש לכם כבר בחיים. ❤❤ 3. *48 אנשים* שהעניקו לחייכם ערך מוסף. רשמו כל מי שחיובי ואשר לתפיסתכם גרם לכם לגדול בצורה כלשהי. ❤❤❤ הרשימה חייבת להיות מלאה לפי דרגת המחויבות שבחרתם. חשוב שתזכרו שגם אנשים שלכאורה עשו לכם ״רע״ גרמו לכם לצמוח. (תוך כדי העשייה תבינו עם עצמכם מה הסיבה לבחירתם ברשימה). התקדמו ברוגע, היזכרו בדברים הטובים בכל אדם, ומה הוא/ היא הביא לתוך החיים שלכם. תיהנו מהעשייה הזו! ''', "20:00:07": ''' *ברוכים הבאים ליום הראשון של אתגר 18ימים של קיימות ומנהיגות – מיגור העוני* מי עני? מי נחשב לעני בחברה הישראלית? מה זה בכלל להיות עני? הידעתם שבישראל כ 2.5 מיליון עניים, (מתוכם קרוב לחצי מיליון תלמידים עניים), ובעולם כולו כמעט מחצית מאוכלוסיית העולם חיה בעוני, - ישנם למעלה ממיליארד אנשים – אשר עדיין מתקיימים מסכום של 1.25$ ליום או פחות מכך, והמספר עולה מאז פרוץ הקורונה. האם עלינו להאשים את אותם ילדים שנולדו לתוך המציאות הזו? איך הילדים האלו יגדלו בידיעה שהם לעולם לא יוכלו להתחמק מהעוני? *מה זה אומר בכלל? מה זה אומר על העתיד שלנו? ומה אפשר לעשות?* על פי יעדי הקיימות שהציבו לעצמן המדינות החברות באו"ם, יעד מספר 1 הוא להפחית בחצי עד שנת 2030, את האחוז היחסי של העניים בעולם. ובישראל? לאיזה יעד אתם התחייבתם? לאיזה יעד אנחנו התחייבנו? אלו הזדמנויות תעסוקה עומדים לרשות אלו בינינו החיים בעוני? אלו שירותים ומוצרים קיימים בעבור אלו מביננו החיים בעוני? מאז פרוץ מגפת הקורונה מצבם הכלכלי של ישראלים רבים קשה עד כדי כך שהם נאלצים לעתים לוותר על סיפוק הצורך האנושי הבסיסי ביותר – מזון. חיסול העוני אינה משימה של צדקה, אלא מעשה של צדק. *מה אתם יכולים לעשות כדי להילחם בעוני?* *ועתה לעושר רוחני.* אספנו רשימה של אנשים שיש להם ערך מוסף לחיים שלנו והתמקדנו במה שיש לנו בחיים. *מהו עושר רוחני ומה הקשר לעושר ממשי?* נוכל להבין זאת כשנחשוב על המשפחה שלנו, משפחה שתומכת האחד בשני בשעת הצורך. כשאח או אחות שלכם משיגים הצלחה, כל המשפחה חוגגת. ברובד הרוחני העמוק ביותר, אנחנו מחוברים לא רק להורינו ולאחינו אלא לעולם כולו, כשאנחנו גדלים במודעות הזו, אנו חיים באחדות, חוגגים את הצלחתם של כולם. משפחת האדם. בלו זמן מסוים בלתרגל אחדות, היו נוכחים והתעניינו תדיר בחיי האחרים כשאתם מדברים איתם. בשיחות שלכם, העניקו את תשומת ליבכם המוחלטת ואם תראו מישהו עם צורך, הציעו עזרה. לחדד את ההתבוננות בסביבתכם ולעשות את הדברים הללו, יעזור לכם לפתח הבנה עמוקה יותר לגבי קשרים אמיתיים. המחשבה המרכזית להיום: *"איזהו העשיר השמח בחלקו".* ''' , "20:00:17": "image/C18Heb/1.opus" , # ''' DAY 2 ''' "09:31:00": ''' בוקררר טוב גבירותי ורבותי! אני מזכיר לכם שסימון המשימה / ים הוא עד הערב, 20:00 הציון תלוי במספר המשימות שבחרתם וזה מצטבר גם במסע האישי וגם בקבוצה 🙌🏻🎶🎵 אני מעודד את כולכם לסמן מספר יום ולידו אימוג'י מתאים לבחירתכם: יום 1 ❤️ ולהעלות את המורל של הקבוצה ושל עצמכם מכיוון שהחיים הם מסע משמעותי ועוצמתי, לעולם אל תוותרו 🐋🌸🙌 ''' , "09:31:10": ''' 🌻🌼🌸🌺🌹🌷💐🌾 yoכולכם מוזמנים להצטרף לקבוצת הקהילה בה אנו חולקים את החוויות האישיות ומדברים מכל הלב על הפיכת העולם למקום טוב יותר:u are all welcome to join the community group where we share the personal experiences and talk whatever in out heart about making the world a better place : קבוצת שיתופים אתגר18 - לשיתופי רעיונות ושאלת שאלות 🐋🌸🙏 https://chat.whatsapp.com/JZikTOFwjdXJnGjG8V4GXh ''' , "18:00:00": ''' *נותרו שעתיים להשלמת המשימות של היום* _ העלאת האנושות על דרך חדשה - הכל מתחיל בכם_ 🐋🌸🙏 ''' , }, # ''' DAY 2 ''' 2:{ "19:45:00": "#totalPoints", "20:00:00": "image/C18Heb/2.png" , "20:00:05": ''' *משימה ליום השני – אפס רעב* 💖 🌏 💖 בחרו לפחות משימה אחת משלוש, לא משנה איזו, העיקר שתרגישו בנוח, מחויבים ומחוברים לתהליך. אתגרו את עצמכם לאכול אחרת. 1. *צפו בסרטון* – "מדוע בישראל אנחנו זורקים מזון בשווי 20 מיליארד שקל בשנה?" - ❤️ לאחר מכן, הכנסו לרשימת הטלפונים של חברי הקבוצה ותשלחו הודעה לחבר אחר על המחשבות שלכם מהסרטון ובכלל בהקשר של עוני ורעב בעיר/מדינה שלכם ומה אפשר לעשות בנידון. https://www.youtube.com/watch?v=nSGCTn6LM-U 2. *נמנעים ממתוקים* - בחרו שלא לאכול מתוקים (עוגות, ממתקים, משקאות ממותקים וכולי) - מהיום ועד סוף האתגר, (בסיום התרגול יש לסמן את מספר היום 2 ו- 🍒 בכל יום מעתה ועד סוף האתגר) 3. *ווגניישן* אתגרו את עצמכם לאכול טבעוני או צמחוני מהיום ועד סוף האתגר (בסיום התרגול יש לסמן את מספר היום 2 ו- 🌏 בכל יום מעתה ועד סוף האתגר) העיקר שתשמרו על תזונה טובה יותר מזו שאתם רגילים לה. מה אתם אומרים, מסוגלים? לאחר ביצוע מוצלח של המשימה עד מחר בשעה 20:00, הקלידו 🌏 2 , 🍒 2 בקבוצה ובכל יום מעתה ועד סוף האתגר בהתאם. זכרו: ככל שתאפשרו לעצמכם יותר בתהליכים האלה, כך תקבלו יותר לצמיחה האישית שלכם המשך ערב מלא בקיימות ומנהיגות 💫 🌏❤️🍒 💫 ''', "20:00:08": ''' *ברוכים הבאים ליום השני של האתגר – אפס רעב* אנחנו רוצים יותר, צורכים יותר וזורקים יותר, והכוונה לא רק למזון. מה זה אומר עלינו בכלל? אם נקדם חקלאות בת-קיימא עם טכנולוגיות מודרניות ומערכות הפצה הוגנות, נוכל לקיים את כל אוכלוסיית העולם ולוודא שאף אחד לא יסבול אי פעם מרעב. על פי יעדי הקיימות, יעד מספר 2 הוא - עד לשנת 2030, לשים קץ לרעב ולהבטיח גישה של כל בני האדם – במיוחד העניים, הילדים ואנשים המצויים במצבים פגיעים,– למזון בטוח, מזין ומספק במהלך השנה כולה. מה עושים כדי להפסיק את הבזבוז האדיר של מזון ולוודא שיגיע לאלו שזקוקים לו? הפלנטה שלנו סיפקה לנו משאבים אדירים, אך גישה לא שוויונית וטיפול לא יעיל משאירים מיליוני אנשים שסובלים מתת-תזונה. *מה אתם יכולים לשנות בהרגלי צריכת המזון שלכם?* *שינוי קטן בהרגלי צריכת המזון שלכם – שינוי ענק לאנושות.* חשוב לדעת שאפשר אחרת, שאנחנו יכולים לדמיין ולפעול למען עתיד טוב יותר – למען שפע רוחני שיחליף את זה החומרי והבזבוז העודף. המחשבה המרכזית להיום: *"איזהו העשיר השמח בחָלְקוֹ"* - משמע – עשיר הוא זה *החולק* עם אחרים. ''' , "20:00:13": "image/C18Heb/2.opus" , "18:00:00": ''' *נותרו שעתיים להשלמת המשימות של היום* _ העלאת האנושות על דרך חדשה - הכל מתחיל בכם_ 🐋🌸🙏 ''' , }, ################################################################# ################################################################# ################################################################# # # ENTER CORRECT DAY MESSAGES FOR DAY 3 # ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# # ''' DAY 3 ''' 3:{ "19:45:00": "#totalPoints", "20:00:00": "image/C18Heb/3.png" , "20:00:05": ''' משימה ליום השלישי 💖 ⚽ 💖 – *בריאות טובה* - בחרו לפחות משימה אחת משלוש, העיקר שתרגישו איתה בנוח, מחויבים ומחוברים לתהליך. 1. *צפו בסרטון* – על הארכת תוחלת החיים וחשבו האם הייתם רוצים לחיות לנצח ולמה? https://www.youtube.com/watch?v=_b0rNa-ts2g לאחר ביצוע מוצלח של המשימה עד מחר בשעה 20:00, הקלידו ❤ 3 בקבוצה. ''', "20:00:08": ''' 2. *חיים מאושרים* - הכינו רשימה, (הפעם אין חשיבות למספר) של האנשים שנחשבים בעיניכם כמשגשגים ובריאים, במשפחה שלכם או בסביבה החברתית שלכם. אנשים שהשיגו את מטרות החיים שלהם (כולם או חלקם) והם מאושרים. ❤❤️3 3. *פלאנק איט* - מהיום ועד סוף האתגר, תרגלו פלאנק לפחות דקה מדי יום (בסיום התרגול יש לסמן את מספר היום ו- ⚽ 3 בכל יום מעתה ועד סוף האתגר) שימו לב, לא רק בהקשר לשגשוג חומרי אלא גם אושר, צמיחה ושלווה אמיתית *זכרו*: ככל שתאפשרו לעצמכם יותר בתוך התהליכים האלה, כך תקבלו יותר לצמיחה האישית שלכם💖 ⚽ 💖 המשך ערב מלא בקיימות ומנהיגות 💫 ⚽ 💫 הבהרה חשובה לגבי סימנים. מספר מייצג את היום/יעד באתגר אימוגי מייצג סוג משימה ❤️ מייצג משימה יומית חד פעמית = 1 נקודה לאכול טבעוני 🌏 = 3 נקודות להימנע ממתוקים 🍒= 2 נקודות פלאנק-איט ⚽ = 3 נקודות דוגמא: מי שבחר באתגר להימנע ממתוקים מהיום השני ועד סוף האתגר מסמן בקבוצה 🍒 במשך כל יום. אם בחרתם גם ביום השלישי את אתגר הפלאנק-איט אז בנוסף , תסמנו משך כל יום ⚽ ''' , "20:00:10": ''' *ברוכים הבאים ליום השלישי של האתגר – בריאות טובה* "בריאות היא זכות אדם בסיסית" (ארגון הבריאות הבינלאומי), ואינדיקטור מפתח לפיתוח בר קיימא. נשים בכל העולם עדיין חסרות גישה לטיפול רפואי הנוגע למין ולפריון; מיליארדי אנשים נותרו ללא גישה לתרופות חיוניות, מיליוני מבוגרים וילדים יסבלו מתת תזונה השנה, ויש המעריכים שכמות הפסולת תשולש השנה, ולכך יהיו השפעות חמורות על הבריאות שלנו. בנוסף, אלפי מקרים חדשים של מגפת הקורונה ממשיכים לקרות מדי יום, ומביאים לידי חוסר יציבות פוליטית וכלכלית, המאיימים על הפרנסה של רבים מתושבי העולם. נכון, החיסון לקורונה עדיין לא נמצא, הפיתוח בדרך, אבל חשבו - מי לדעתכם יהיו הראשונים ליהנות ממנו? *מה אתם יכולים לעשות בשביל להיות בריאים יותר, נפש, גוף ורוח?* *המחשבה המרכזית להיום: נפש בריאה בגוף בריא בעולם בריא* ''' , "20:00:13": "image/C18Heb/3.opus" , "20:00:20": "פלאנק-איט ⚽\nhttps://youtu.be/p4cDh0FrrdQ" , "18:00:00": ''' *נותרו שעתיים להשלמת המשימות של היום* _ העלאת האנושות על דרך חדשה - הכל מתחיל בכם_ 🐋🌸🙏 ''' , }, ################################################################# ################################################################# ################################################################# # # ENTER CORRECT DAY MESSAGES FOR DAY 4 # ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# # ''' DAY 4 ''' 4:{ "19:45:00": "#totalPoints", "20:00:00": "image/C18Heb/4.png" , "20:00:05": ''' ערב טוב לכולם 🌙 *ברוכים הבאים ליום הרביעי של האתגר – חינוך איכותי* בחרו לפחות משימה אחת משלוש, העיקר שתרגישו בנוח, מחויבים ומחוברים לתהליך. 1. *צפו בסרטון* – לינדה קליאט-ווימן: איך לתקן בית ספר מקולקל? כיצד להנהיג ללא פחד, לאהוב בעוצמה | הרהרו במה שראיתם. ❤️ https://youtu.be/Xe2nlti47kA 2. *ספרים נודדים* - כתבו שני רעיונות שקראתם בספר בנושא קיימות ושלחו אותם לחבר שנמצא בקבוצת האתגר. 💚❤️ ההמלצה שלי אליכם היא לקרוא את ספר הפנטסיה: 'המסע לעבר ממלכת לודולנד'. 3. *מכתב תודה* - כתבו מכתב למורה (מורה לחיים) שהשפיע עליכם, שבו תבטאו את כל הרגשות שלכם, התחושות שיש לכם/ן. כתבו את כל מה שאתם רוצים. אתם כמובן מוזמנים לשתף בקבוצה ממה שכתבתם. מוזמנים לשלוח את המכתב לאותו מורה ולצפות בקסם שמתרחש. הרגישו נוח לשתף כאן. ❤💜💛 שיהיה לכולכם יום מלא בקיימות ומנהיגות 🌈🌈🌈 ❤️💜💛💙💚💛💜💚❤️ ''', "20:00:08": ''' *ברוכים הבאים ליום הרביעי של האתגר – חינוך איכותי* מה אם היינו מלמדים את ילדינו איך לחשוב ולא מה לחשוב? מה אם היינו מלמדים את ילדינו להטיל ספק, לשאול? מה אם היינו מלמדים את ילדינו להגשים את החלומות שלהם? חינוך, הוא זכות אדם בסיסית, והוא חיוני לשם השגת פיתוח בר קיימא. אנחנו יכולים להעצים נשים, להילחם בשינויי מזג האוויר, להילחם באי שוויון, למגר עוני קיצוני ועוד - רק אם אנחנו, האזרחים, כולל מערכת החינוך עצמה, מתחייבים לקדם מטרות הקשורות בחינוך. יעד 4 ביעדים הגלובליים קובע כי עד שנת 2030, ישלימו כל הבנים והבנות השכלה יסודית ועליונה הניתנת בחינם, שתהיה שוויונית ואיכותית. *מה אתם יכולים לעשות עכשיו כדי להבטיח חינוך איכותי ונגיש בקהילה שלכם?* כולם רוצים להישמע ואף אחד לא מקשיב. משבר הקורונה עזר לנו להבין שבלי מעורבות דומיננטית של החברה, שתשפיע על הפוליטיקאים מקבלי ההחלטות, אין סיכוי לאנושות לשרוד על כדור הארץ. ממשברים אפשר ללמוד, ובעיקר מלהקשיב לאחר. לפיכך, המחשבה המרכזית להיום היא: *אני מקשיב לאחר, אני מקשיב כי אכפת לי.* ''' , "20:00:13": "image/C18Heb/4.opus" , "18:00:00": ''' *נותרו שעתיים להשלמת המשימות של היום* _ העלאת האנושות על דרך חדשה - הכל מתחיל בכם_ 🐋🌸🙏 ''' , }, 4.5:{}, ################################################################# ################################################################# ################################################################# # # ENTER CORRECT DAY MESSAGES FOR DAY 5 # ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# # ''' DAY 5 ''' 5:{ "19:45:00": "#totalPoints", "20:00:00": "image/C18Heb/5.png" , "20:00:05": ''' המשימה הזו הגיעה עלינו לקבל את השבת כך שיש לכם זמן ורוגע להתרכז כמו שצריך ....כבר מרגיש שהקבוצה הזו ברת מזל💜 שבת קסומה ותמשיכו לפרגן לעצמכם קיימות ומנהיגות ❤❤❤❤❤❤❤❤ ''', "20:00:08": ''' *צהריים טובים וברוכים הבאים ליום ה- 5 של תרגול קיימות ומנהיגות* 🌸🌸🌸🌸 אנשים פורשים וזה טבעי, פרגנו לעצמכם על זה שאתם עדיין כאן 👏👏👏👏👏👏 אני אישית גאה בכל אחד/ת מכם/ן ❤❤❤❤❤ אנו ממשיכים בתרגול הודיה ה 🤗🌱🌸🌷🌹 יום 5 – *שוויון מגדרי* - בחרו לפחות אחת משלוש, לא משנה מה העיקר שתרגישו בנוח ותרגישו מחויבים ומחוברים לתהליך. לגבי המשימה של היום - *הודיה ולא אפליה* 1. *צפו בסרטון TED* – מדוע שוויון מגדרי טוב לכולם - גברים כלולים | מייקל קימל | שיחות TED | והרהרו בנאמר. ❤ https://youtu.be/7n9IOH0NvyY 2. *התבוננות* - חשבו על מקרה שבו אתם עצמכם הרגשתם מופלים מגדרית, או על מקרה של אפליה מגדרית שבו נתקלתם בחייכם. מה עשיתם בשעתו ומה הייתם עושים אחרת כיום? אתם מוזמנים לשתף בקבוצה ממה שכתבתם. ❤💜 3. *מכתב תודה* - כתבו מכתב תודה, מכתב הערכה, לאדם מבן המין השני הקרוב לליבכם שאתם חושבים שפגע בכם בשלב כלשהו בחייכם. ❤💜💛 לפני כתיבת מכתב תודה, נקו את כל הרגשות השליליים על אותו אדם. אם אתם עדיין מרגישים טינה, אתם יכולים גם לרשום את כל הרגשות השליליים על פיסת נייר ולאחר מכן לשרוף אותו או לקרוע אותו. הדבר החשוב הוא שכאשר אתם כותבים מכתב תודה / הערכה, אתם כבר לא מרגישים טינה נגד אותו אדם. עצרו לחשוב מי האדם הזה. יותר מאדם אחד יכול לעלות על הדעת, אבל אנחנו צריכים לעשות את העבודה רק עם אחד. ✳ המכתב חייב להיות כתוב בכתב יד. בהצלחה 🙏 ''' , "20:00:10": ''' *ברוכים הבאים ליום החמישי של האתגר – שוויון מגדרי* שנים רבות הייתה חלוקה ברורה בין תפקידי הגבר ותפקידי האישה. הגבר פרנס והאישה הייתה אחראית למשק הבית. לכל מגדר היו כללי לבוש והתנהגות ברורים למדי. המחצית הגברית של האוכלוסייה נחשבה יותר מזו הנשית. בעשרות השנים האחרונות המצב הולך ומתאזן. החוק מקפיד על זכויותיהן של נשים ואוסר על אפליה מטעמי מגדר, ונשים רבות מגיעות לתפקידים בכירים במשק. עם זאת המצב עוד רחוק מלהיות מיטבי: נשים עדיין מרוויחות פחות, נשפטות יותר על סמך המראה שלהן וסובלות מהטרדות. שוויון בין המינים הינו זכות אדם בסיסית שאסור לפגוע בה, והעצמה של נשים ונערות היא חיונית לשם חיזוק הצמיחה הכלכלית, קידום פיתוח חברתי והעצמת המדינה ככלל. *מה אתם עושים כדי להילחם באי-השוויון?* היום נתמקד בהזדמנויות במקום במכשולים. היום, נתמקד בנדיבות במקום באדישות, באור במקום בחושך, באהבה במקום בשנאה, בקיימות במקום בכאוס, בשפע במקום בחוסר, היום נתמקד בבחירה שלנו לכבד את עצמנו ואת הזולת. המחשבה המרכזית להיום: *"איננו מפלים בין אדם לאדם. איננו שואלים מהי דתו של אדם, ובן איזה גזע הוא. עליו להיות אדם ובכך אנו אומרים די"*. - בנימין זאב הרצל { מתוך הספר "אלטנוילנד" } ''' , "20:00:13": "image/C18Heb/5.opus" , "18:00:00": ''' *נותרו שעתיים להשלמת המשימות של היום* _ העלאת האנושות על דרך חדשה - הכל מתחיל בכם_ 🐋🌸🙏 ''' , }, ################################################################# ################################################################# ################################################################# # # ENTER CORRECT DAY MESSAGES FOR DAY 6 # ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# # ''' DAY 6 ''' 6:{ "19:45:00": "#totalPoints", "20:00:00": "image/C18Heb/6.png" , "20:00:05": ''' שבת שלום ומבורכת, היום אשלח מוקדם יותר את המשימה כדי להתחשב בשומרי השבת בקבוצה הקסומה שלנו ❤️ *ברוכים הבאים ליום השישי של האתגר – מים נקיים* אני מצרף את התרגיל ליום 6 – *מים נקיים* - בחרו לפחות משימה אחת משלוש, הרגישו בנוח, הרגישו מחויבים ומחוברים לתהליך. אני מאחל לכולכם שתפקחו את העיניים ליום חדש מלא בשפע כנחל זורם ☀☀☀☀☀ 💟💟💟💟💟💟💟💟💟💟💟💟💟 1. *צפו בשיחת TED* - מסע של מדענית צעירה למים נקיים | Deepika Kurup | והרהרו במה שראיתם, ואז מצאו טכנולוגיות מים שפותחו בארצכם ושתפו איתנו בקבוצת השיתופים. 💟 https://youtu.be/AkUcaludrcI 2. *כתבו 5 פגמים שלכם*, שלהערכתם עוצרים או מאטים את זרימת החיים שלכם, מגבילים או חוסמים אתכם, שאינם מאפשרים לכם לקבל את מה שאתם באמת רוצים, בין אם זה חומרי או לא. חסרונות שתרצו לשפר/לתקן – ואשר בהיעדרם - כל דבר יזרום טוב יותר. נסו להתרכז וללכת עמוק ככל האפשר. חפשו את התכונות והתבניות שמעכבות את הגשמת הפוטנציאל האותנטי והמקורי שלכם. ❤️❤️ 3. תרגלו את *"חוק הנתינה וזרימת החיים"* על ידי מתן מתנה קטנה לכל מי שתפגשו, זו לא צריכה להיות מתנה יקרה או בעלת ערך כספי, המתנה יכולה להיות חיוך, פרח, מחמאה מהלב, יד עוזרת או ברכה בלב ו... הישארו פתוחים לקבל בהודיה כל מתנה המוצעת לכם. ❤️❤️❤️ לילה טוב מלא בקיימות ומנהיגות 💌💌💌 שרון גל-אור טינג גלובל ''', "20:00:08": ''' *ברוכים הבאים ליום השישי של האתגר – מים נקיים* בעיית המים היא בעיה עולמית שהולכת ומתרחבת. היא שלובה בהתחממות הגלובלית, בתהליכי המדבור, בגידול באוכלוסייה, והתוצאה שלה היא שורה של מלחמות סביב מים, הגירות המוניות, ושינויים פוליטיים. קצת קשה להאמין, אבל כיום ליותר ממיליארד בני אדם אין גישה למים נקיים ובטוחים. באו"ם מזהירים כי עד שנת 2025 כמעט מחצית מאוכלוסיית כדור הארץ תתגורר באזורים מוכי בצורת, ועד שנת 2050 150 מיליון בני אדם יהיו "פליטי אקלים" כתוצאה מהתפתחויות אקולוגיות (מחסור במים, בצורת, מדבור, ירידת פריון הקרקע, ועליית פני הים). גישה למים ולתברואה הן זכויות אדם בסיסיות, אתגר חיוני לפיתוח בר קיימא. יעד גלובלי מספר 6 מדבר על כך שעד לשנת 2030, תהיה לכולם גישה עולמית שוויונית למי שתיה בטוחים וברי השגה. חוק הנתינה וזרימת החיים הוא פשוט: *אם אתם רוצים אושר, העניקו אושר לאחרים*. אם אהבה היא מה שאתם מחפשים, הציעו אהבה. אם אתם רוצים שפע רוחני וקיימות, עזרו לאחרים להפוך משגשגים. הדרך הקלה ביותר להשיג את מבוקשכם היא *ליצור מחזוריות בזרימת השפע* ולעזור לאחרים להשיג את מה שהם רוצים וצריכים. ככל שאתם מעניקים יותר, כך תקבלו יותר ותצמחו. המחשבה להיום: *"היום ובכל יום - אני נותנ/ת את מה שאני רוצה למשוך לחיי."* ''' , "20:00:13": "image/C18Heb/6.opus" , "18:00:00": ''' *נותרו שעתיים להשלמת המשימות של היום* _ העלאת האנושות על דרך חדשה - הכל מתחיל בכם_ 🐋🌸🙏 ''' , }, ################################################################# ################################################################# ################################################################# # # ENTER CORRECT DAY MESSAGES FOR DAY 7 # ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# # ''' DAY 7 ''' 7:{ "19:45:00": "#totalPoints", "20:00:00": "image/C18Heb/7.png" , "20:00:05": ''' שבוע טוב וערב מהנה לכולם ♥️ בחרו לפחות אחת משלוש, לא משנה מה העיקר שתרגישו בנוח ותרגישו מחויבים ומחוברים לתהליך. ראשית, אני גאה בכם שהתמדתם והגעתם עד ליום ה - 7... היו גאים בעצמכם, עטפו את עצמכם בשפע של אהבה על ההתמדה והרצון לחיים של קיימות ומנהיגות *אנרגיה ללא נתינה וקבלה, אינה זורמת.* 💟💟💟💟💟💟💟💟 1. *צפו בשיחת TED* - מדוע אנרגיות מתחדשות לא יכולות להציל את כדור הארץ | מייקל שלנברגר | TEDxDanubia | והרהרו במה שראיתם. ❤️ https://youtu.be/N-yALPEpV4w 2. *נפטרים מהחסמים* - מתוך חמש התכונות המעכבות שרשמתם במשימה הקודמת, בחרו שתיים שלתחושתכם שואבות מכם הכי הרבה אנרגיה, ואשר תרצו לעבוד עליהם. הגדירו את תוכנית הפעולה שלכם, על ידי כתיבת לפחות שתי פעולות (פעולות קונקרטיות יומיומיות, או פעולות רחבות טווח), שיסייעו לכם להיפטר מהחסמים. יישמו אותן! ❤️❤️ 3. *חוסכים באנרגיה* - רשמו במחברת, דרכים שאתם יכולים לחסוך אנרגיה בחייכם, פיזי ורוחני, ולהתחייב להם עד סוף האתגר. יש לסמן 💡 בכל יום מעתה ועד סוף האתגר) ''' , "20:00:10": ''' *ברוכים הבאים ליום השביעי של האתגר – אנרגיה נקייה* אנרגיה היא חיים. בשנים האחרונות חל גידול עצום בצריכת האנרגיה שלנו. כמעט כל האנרגיה שאנחנו משתמשים בה באה מן האדמה – נפט פחם וגז. מה עושים עם הזיהום שהשימוש בהם גורם, ואיך מתמודדים עם העובדה שיום אחד כל אלה ייגמרו? החברה המודרנית תלויה בשירותי אנרגיה אמינים וברי השגה על מנת לתפקד באופן תקין וכדי לפתח שוויון. מערכת אנרגיה שהוקמה כהלכה תומכת בכל המגזרים החל מרפואה, דרך חינוך ועד חקלאות, תשתיות, תקשורת וטכנולוגיה מתקדמת. כיום עומדים לרשותנו טכנולוגיות חדשות ונקיות והן יכולות להסיט את הפיתוח לעבר מסלול ייצור אנרגיה ירוקה ובת קיימא. ככלל, יישום פתרונות אנרגיה חדשים, מהר ככל האפשר, הינו חיוני על מנת להתמודד עם שינויי האקלים, אחד האיומים הגדולים ביותר להישרדות שלנו. *ועתה לאנרגיה במימד הרוחני* – MC2 שווה L אהבה כי אנרגיה בממד הכי גבוה היא אהבה, וקיימות - פירושה למלא את חייכם בשפע ואהבה - שלא על חשבון הדורות הבאים. כשאנו מדברים על החיבור בין קיימות ואנרגיה, אנחנו דואגים בצורה אחראית למשהו שאנחנו מעריכים, כחלק מהדרך להגשמת החלומות. טיפול הולם בילד, בחירות בריאותיות נבונות, דאגה לגוף, שימוש אחראי במשאבים של כדור הארץ, כל אלו הן דוגמאות של התארגנות טובה – אשר מקטינה את טביעת הרגל המוגזמת שלנו. זכרו: לפני כל בחירה שאתם עושים תשאלו את עצמכם: ״האם הבחירה הזאת תגרום לתחושת מלאות ושמחה לי ולאלה שמושפעים מהבחירות שלי?״ עשו זאת בכל פעם שאתם צריכים לקבל החלטה שמשפיעה עליכם ועל אלה שסביבכם. הקשיבו לליבכם, שמעו את התשובה ובטחו בבחירה הערכית הנכונה – כי אנרגיה בממד הכי גבוה שלה היא אהבה טהורה. המשפט המרכזי של המחשבה להיום : "היום אני עושה בחירות נהדרות כי הן נעשות במודעות מלאה" במהלך יומכם זכרו את החשיבות של בחירה מתוך מודעות. הזכירו לעצמכם את המחשבה המרכזית של היום : *"היום אני עושה בחירות נהדרות כי הן נעשות במודעות מלאה"* ''' , "20:00:13": "image/C18Heb/7.opus" , "18:00:00": ''' *נותרו שעתיים להשלמת המשימות של היום* _ העלאת האנושות על דרך חדשה - הכל מתחיל בכם_ 🐋🌸🙏 ''' , }, ################################################################# ################################################################# ################################################################# # # ENTER CORRECT DAY MESSAGES FOR DAY 8 # ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# # ''' DAY 8 ''' 8:{ "19:45:00": "#totalPoints", "20:00:00": "image/C18Heb/8.png" , "20:00:05": ''' *צמיחה כלכלית – תודעת שפע וחשיבה מוטת עתיד* ערב טוב לכולם ❤ היום ה- 8 הוא יום מכונן ומהותי בכל הנוגע לשפע כלכלי 😇💸💸💸 נצלו את האתגר לברוא לחייכם כל מה שמדויק ונכון בתחום השפע שאתם מבקשים למגנט אליכם❤ בחרו לפחות אחת משלוש המשימות, העיקר שתרגישו בנוח, מחויבים ומחוברים לתהליך. שימו לב כי משימה 1 היא מהיום ועד סוף האתגר. 1. *צפו בסרטון* - "אף אחד לא רוצה קפה של עבדים" - *סחר הוגן* - האם כבר ראיתם את הלוגו של "סחר הוגן" בחנויות? מה גיליתם שהפתיע אתכם בהקשר זה? ❤️ https://www.youtube.com/watch?v=fBeaxlim8ek 2. *טינג איקיגאי* רוצה למצוא את האיקיגאי שלך? - חבר.י בין מרכיבים שונים משלושת חלקי הטבלה ליצירה של פעילות ערכית חדשה שאת.ה יכול.ה לעבוד. – אתגר את עצמך לעזור לעוד 3 חברים למצוא את הטינג איקיגאי שלהם. 3. *משחק תפקידים*: "בחרו דמות שמתקשרת לסוג השפע שבו אתם חושקים". משם, כמו שחקנים מסורים עליכם להתחיל להיכנס לנעלי הדמות ולהתנהל בחייכם כאילו אתם אותה דמות. עלינו להתמקד לא רק במה שנקבל. חשוב לא פחות לדמיין ולראות את עצמינו כאותו אדם בעל משאבים ואמצעים. מי האדם הזה? איך הוא חושב? נוהג, מתלבש ואוכל? איך נראה היום שלו? – כיצד היה מגיב לשיחות שנקרות בדרככם? מהי רמת האנרגיה שהיה משקיע? מהן הבחירות שהיה עושה? שימו לב לאופן שבו אנחנו תופסים את עצמנו כשאנחנו מתנהלים מתוך התפיסה החדשה הזו ולאופן שבו הסובבים אותנו תופסים אותנו. כיצד זה מרגיש? האם דברים נראים אחרת? מטרת המשחק היא ללמד אותנו את מידת ההשפעה שיש לתפיסה שלנו את המציאות על המציאות שאנו חווים. מהיום ועד סוף האתגר עליכם לתרגל את אותה הדמות. *בסיום התרגול יש לסמן את מספר היום ו- 💸 בכל יום מעתה ועד סוף האתגר.* ''', "20:00:08": ''' *ברוכים הבאים ליום השמיני של האתגר* *יעד מספר 8: הוגנות תעסוקתית וצמיחה כלכלית* בערך מחצית מאוכלוסיית העולם עדיין מתקיימת על המקבילה של 2 דולר אמריקאי ליום. וביותר מדי מקומות, העובדה שלאדם ישנה עבודה אינה מבטיחה את יכולתו להימלט מעוני. ובישראל? עם פרוץ משבר הקורונה, שיעור האבטלה טיפס במהירות למעל מיליון מובטלים - רובם הם עובדים שהוצאו לחל"ת. הצורך הוא ברור - מודלים עסקיים חדשים, חקיקה חדשה, לטובת מה שבאמת נחוץ וטוב לנו ולעולם. איך מחזירים את המובטלים ודורשי העבודה לשוק התעסוקה? אילו פתרונות מציעים משרדי הממשלה? ומהם השינויים בדפוסי העבודה שאנו עתידים לראות? בימים שכאלו חלה החובה על כל אחד ואחת מאיתנו להראות יצירתיות, אחדות ומנהיגות, ולהוות דוגמה אישית ומודל לחיקוי לילדינו וחברינו, לבניית אמון מחדש וליצירה של תחושת בטחון וקיימות. ועכשיו ל*חשיבה מוטת עתיד* - אחד ההבדלים הבולטים ביותר בין אנשים מצליחים לאלו שפחות מצליחים, אינו כמות הכסף ברשותם, אלא חשיבה מוטת עתיד, והיכולת שלהם לפעול בזמן. כשאנו כאחד מדמיינים שפע ובטחון, נוכל אזי ליצור לעצמנו ולקדם שפע ובטחון כלכלי. מה אם הפתרון למשבר הכלכלי הוא הכנסה בסיסית אוניברסלית? דמיינו לרגע שהמדינה מעניקה לכם הכנסה בסיסית שתאפשר לכם ביטחון כלכלי. הכנסה שתפיג את החשש מדאגות קיומיות ותאפשר יזמות וצמיחה. נשמע דמיוני? פעם זכויות אזרח, זכות הצבעה לנשים, ביטול העבדות והקמת מדינת היהודים – נשמעו אוטופיים. ממש כמוהם, רעיון ההכנסה הבסיסית נשמע אוטופי בקריאה ראשונה וריאליסטי והוגן בקריאה שנייה. כל אזרח יקבל הכנסה בסיסית (Universal Basic Income) המאפשרת חיים בכבוד, מבלי שיצטרך לחשוש מפני מחסור במזון או בקורת גג. נשמע כמו משימה בלתי אפשרית? זמנים קשים מצריכים פעולות נחרצות. עלינו לשלב כוחות ולהמציא פתרונות. *הפתרון למשבר הכלכלי - הכנסה בסיסית אוניברסלית* הכנסה בסיסית לכל אזרח מובילה לפחות עוני, פחות קצבאות, פחות פשיעה, פחות ימי מחלה; וגם ליותר צריכה, יותר ביטחון ויותר חשיבה יזמית. מחקרים אפילו מראים עלייה באיי-קיו וברמת האושר. כשהאזרח הקטן מרוויח יותר, הכלכלה כולה מרוויחה. ולסיום, וחשוב מאוד, במיוחד בתקופה שכזו: תנו את מה שאתם רוצים לקבל. אתם חייבים לשדר לתת המודע שכבר יש לכם שפע כדי לקבל שפע. לכן לתרום זה חשוב מאין כמוהו. זה מדהים וזה עובד בצורה פלאית. אולי בגלל שבפועל אנחנו אחד. המחשבה המרכזית להיום: *"אם תרצו אין זו אגדה".* ''' , "20:00:13": "image/C18Heb/8.opus" , "20:00:20": "image/C18Heb/8.2.1.png", "20:00:25": "image/C18Heb/8.2.2.png", "20:00:30": "image/C18Heb/8.2.3.png", "20:00:35": "image/C18Heb/8.2.4.png", "18:00:00": ''' *נותרו שעתיים להשלמת המשימות של היום* _ העלאת האנושות על דרך חדשה - הכל מתחיל בכם_ 🐋🌸🙏 ''' , }, ################################################################# ################################################################# ################################################################# # # ENTER CORRECT DAY MESSAGES FOR DAY 9 # ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# # ''' DAY 9 ''' 9:{ "19:45:00": "#totalPoints", "20:00:00": "image/C18Heb/9.png" , "20:00:05": ''' 1. *צפו בסרטון* – זמנים מודרניים ורשמו במחברת מה פס הייצור שלכם? ❤️ https://www.youtube.com/watch?v=6n9ESFJTnHs 2. *חייזרים בישראל* - תארו לעצמכם שחייזרים מכוכב ששמו טינג נחתו אצלכם בבית והם מציעים לכם שלוש משאלות לחדש דברים בחייכם ועוד שלוש משאלות לחדש דברים במדינה שלכם – מה הייתם מחדשים?? ❤️❤️ 3. *מוצץ דיגיטלי* משימת דיאטה סלולרית - עליכם להימנע משימוש בטלפון משך זמן רצוף של 1 שעה קבועה, בשעות היום ומדי יום – מהיום ועד סוף האתגר. אם בחרתם במשימה זו סמנו בקבוצה 📱9 מדי יום. להימנע הכוונה לסגור אותו לגמרי למשך שעה. 4. *סעיף 4* באתגר של 3 משימות מדי יום - זו חדשנות  מהיום ועד סוף האתגר רשמו במחברת הרעיונות שלכם 3 רעיונות חדשים מדי יום, סמנו 💡 ערב קסום מלא בקיימות, מנהיגות וחדשנות 💕 *כי כיף זהו שם המשחק אי~יה* ''' , "20:00:10": ''' *ברוכים הבאים ליום התשיעי של האתגר - יעד מספר 9: תעשייה חדשנות ותשתיות* כל תעשייה חייבת חדשנות, ותשתיות חסינות המתפקדות כהלכה הן אבני הבניין עבור כל קהילה משגשגת. *חדשנות ותשתיות הן גם ברמה הרוחנית* - ברמה האישית, זה להשתחרר מכבלי העבר, מאותן דעות קדומות ומנהגים אשר מגבילים את הצמיחה שלכם. חדשנות זה להמציא את עצמכם מחדש, לזהות הזדמנויות בתקופה של משבר, להתחבר לעצמכם, לרוח ולנפש. *הגיע הזמן לקבל החלטות משנות חיים!* - איך? מיקוד והתכוונות לגבי מה שאנחנו רוצים. *זכרו* - מה שאנחנו שמים לב אליו מתרחב בחיינו, וההתכוונות שלנו תעזור ליקום לתמוך בתוצאה הרצויה. כן, בעזרת מיקוד והתכוונות, תוכלו לשנות את חייכם ולשקף את רצונותיכם ביתר דיוק. המחשבה המרכזית להיום: *"חזק, חזק ונתחדש"* ''' , "20:00:13": "image/C18Heb/9.opus" , "18:00:00": ''' *נותרו שעתיים להשלמת המשימות של היום* _ העלאת האנושות על דרך חדשה - הכל מתחיל בכם_ 🐋🌸🙏 ''' , }, ################################################################# ################################################################# ################################################################# # # ENTER CORRECT DAY MESSAGES FOR DAY 10 # ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# # ''' DAY 10 ''' 10:{ "19:45:00": "#totalPoints", "20:00:00": "image/C18Heb/10.png" , "20:00:05": ''' *הקשבה* ערב מהמם לכולם 💜💜💜 אנחנו ביום ה-10 מה שאומר שאתם כבר 10 ימים בתהליך הזה ואני גאה בכל אחד/ת מכם/ן ההשקעה שלכם לא מובן מאליה, פרגנו לעצמכם! 🙏🙏🙏🙏🙏🙏🙏🙏🙏🙏🙏 מאז ומתמיד חינכו אותנו לחשוב דואלי, אנחנו והם, ימין ושמאל ואף גרוע מכך כמו למשל, לאומי-מקומי וגלובלי, עשירים ועניים, וכן הלאה. לכן הייתי רוצה שביום הקרוב נתרגל הקשבה 👂🏼👁️💜. בכלל, אחת המשימות הקשות כיום היא איך לנתב את דרכנו בכל הקולות הדורשים את תשומת הלב שלנו - מדיה (אינטרנט, טלוויזיה, רדיו, עיתונים, פרסומות וכו '), קולות של המדינה, ביטחון, כלכלה, משפחה - ולמצוא את החשוב לנו ולמצוא את עצמנו בקרב אלה. כדי להיות אדם טוב יותר, עליך תחילה ללמוד להקשיב - המילה הסינית העתיקה להקשיב, TING, לוכדת את רוח ההקשבה: אוזניים - להיות קשוב בזמן שהאחר מדבר ולא לחשוב על מה לומר. עיניים - להיות מסוגלים לשים את עצמנו במקום של האחר ולראות דברים דרך נקודת מבט שלו. נפש - היכולת להבין את דברי האחר. לב - רק כאשר באמת אכפת לנו אז אנחנו באמת קשובים. מילה אחרונה, אם אתם צריכים לזכור דבר אחד מהיום הזה, אני רוצה שתזכרו: אם אתם נתקלים בקשיים, מה שזה לא יהיה, הפתרון הוא בדיאלוג, בשיח אמיתי, בטלפון או פנים אל פנים. אז להפסיק מיד לשלוח הודעות טקסט, דואר אלקטרוני ו googling, ובמקום זאת , תתחילו להפנים את עקרונות ההקשבה - TING 聽. הימים הבאים יהיו מיוחדים ומשופעים מתנות מהיקום. למדו להקשיב ולהסתכל סביב ולהבחין בהן 🌾🌿🌺👂🏼👁️💜🌈🌲🌳💐🌼 *משימה ליום העשירי לאתגר* 👂🏼👁️💜 השיעור/משימה של היום היא ליהנות מהיום על ידי הקשבה אמיתית. בסוף היום, כתבו במחברת לפחות שלוש דרכים שבהן שמתם לב לדברים שלא הבחנתם בהם קודם לכן (אפילו בדברים קטנים). ❤️❤️❤️ המחשבה של היום: *הקשבה אמיתית היא מהאוזניים, העיניים והלב.* ערב מהמם ומלא בקיימות, מנהיגות והקשבה! 🦋🦄 ''', "20:00:08": ''' *יעד 10 – צמצום אי השיוויון* ברוכים הבאים ליום העשירי של אתגר 18 הימים לקיימות ומנהיגות של טינג גלובל. חוסר שוויון בהכנסות ובעושר הינן חמורות והם נעשים רחבים בכל העולם. 1% מהאנשים העשירים ביותר באוכלוסיית העולם שולטים כיום בכ-40% מסך הנכסים הגלובלי, בעוד שהמחצית הענייה ביותר מחזיקים רק ב 1%. במילים פשוטות - מרבית מהעושר העולמי מוחזק על ידי קבוצה קטנה מאוד של אנשים. מה המשמעות האמיתית של ההבדלים הללו? על מנת שמדינות ישגשגו, שוויון וצמיחה חייבות להיות נחלתן של כלל האוכלוסייה. ללא קשר למין, גזע, דת, אמונה או מצב כלכלי. כאשר נגיע למציאות בה כל אדם עצמאי ובלתי תלוי, אז יוכל כל העולם לשגשג - נקודה. על מנת שנוכל להילחם באי שוויון בין ובתוך מדינות, עלינו לקדם מודלים עסקיים כוללניים אשר מעצימים קבוצות, הנמצאות בשוליים, במקומות העבודה, בשווקים ובקהילות. שאלו את עצמכם "איך אני יכול/ה לעזור? איך אני יכול/ה לשרת?" לשרת אחרים עם ה'מתנות' שלכם, זה הביטוי הגבוה ביותר של הייעוד שלכם, כאשר הביטוי היצירתי שלכם תואם את צרכיהם של אלה שמסביבכם, שפע זורם בקלות לחייכם, בסדר אלוהי מופתי. תוכלו להתאמן היום, פשוט הכירו כל הזמן בכישרונות הייחודיים לכם. הקשיבו לקול הפנימי שלכם ושאלו את עצמכם איך תוכלו להשתמש ב'מתנות' האלה לשרת את העולם בדרך הטובה ביותר. המחשבה המרכזית להיום: *"אני מקשיב לקול הפנימי שלי. התשובות נמצאות בתוכי!"* 👂🏼👁️💜 ''' , "20:00:13": "image/C18Heb/10.opus" , "18:00:00": ''' *נותרו שעתיים להשלמת המשימות של היום* _ העלאת האנושות על דרך חדשה - הכל מתחיל בכם_ 🐋🌸🙏 ''' , }, 10.5:{}, ################################################################# ################################################################# ################################################################# # # ENTER CORRECT DAY MESSAGES FOR DAY 11 # ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# # ''' DAY 11 ''' 11:{ "19:45:00": "#totalPoints", "20:00:00": "image/C18Heb/11.png" , "20:00:05": ''' *יעד 11 – ערים וקהילות מקיימות* בחרו לפחות משימה אחת משלוש, לא משנה איזו, העיקר שתרגישו בנוח, מחויבים ומחוברים לתהליך. 1. *צפו בסרטון* –קיימות עירונית ❤️ https://youtu.be/azSgvH4h8VE 2. *ממציאים סמלים סביבתיים חדשים* - האם אתם יכולים לייצר סמלים/לוגואים סביבתיים עתידיים? ❤️❤️ ומה לגבי סמל אישי שהוא כולו אתם?? הראו את הסמל שיצרתם לחברים ונסו לראות אם הם מצליחים להבין למה התכוונתם. הם הצליחו להבין? נפלא. לא הצליחו? שנו את הסמלים כך שיובנו יותר, והראו להם שוב. 3. *מכתב לראש העיר* - כתבו מכתב לראש העיר שלכם, שבו תציינו את כל מה שתרצו לשנות בעיר. אתם כמובן מוזמנים לשתף בקבוצה ממה שכתבתם אפילו לשלוח לראש העיר שלכם - למה לא בעצם? ❤💜💛 שיהיה לכולכם יום מלא בקיימות ומנהיגות ❤💜💛💙💚🤎 ''', "20:00:08": ''' *ברוכים הבאים ליום האחד-עשר של האתגר – ערים וקהילות מקיימות* לפי יעד 11 עד לשנת 2050, 70% מאוכלוסיית העולם תתגורר בערים, ובכך הופכות הערים להיות חיוניות בהשגת עתיד בר קיימא עבור העולם. יש לי חלום לחיות בעיר מקיימת המאפשרת חיים באושר וקיום בכבוד לכל, בתוכה ומחוצה לה. יש לי חלום לחיות בעיר המשתמשת בתבונה ובהוגנות בתשתיות ובמשאבים החומריים, הטבעיים, האנושיים והחברתיים שברשותה. יש לי חלום לחיות בעיר הלוקחת אחריות על חלקה בניהול מערכות אקולוגיות גלובליות, ואחריות למורשתה הפיזית והתרבותית ולדורות הבאים. ואתם, באיזה עיר אתם באמת רוצים לחיות? *מה מצבנו בישראל?* אוכלוסיית ישראל צפויה להמשיך ולהכפיל עצמה עד 2050. לעליה התלולה בגודל האוכלוסייה ובצפיפותה ישנן השלכות מרחיקות לכת על כל מרקם החיים בישראל בהווה ובעתיד, והיא אינה פוסחת על אף אחד מאיתנו. יש לי חלום, שנושא גידול האוכלוסייה יזכה להתייחסות ראויה בשיח הציבורי ובשקיפות ובפתיחות כלפי הציבור. יש לי חלום, שנראה שאכפת לנו ונאמץ הרגלים חדשים. המחשבה המרכזית להיום: *"אני מאתגר את עצמי כי אכפת לי מהעיר שלי, מהמדינה שלי ומהעולם כולו."* ''' , "20:00:13": "image/C18Heb/11.opus" , "18:00:00": ''' *נותרו שעתיים להשלמת המשימות של היום* _ העלאת האנושות על דרך חדשה - הכל מתחיל בכם_ 🐋🌸🙏 ''' , }, ################################################################# ################################################################# ################################################################# # # ENTER CORRECT DAY MESSAGES FOR DAY 12 # ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# # ''' DAY 12 ''' 12:{ "19:45:00": "#totalPoints", "20:00:00": "image/C18Heb/12.png" , "20:00:05": ''' שבת שלום ומבורכת, היום אשלח מוקדם יותר את המשימה כדי להתחשב בשומרי השבת בקבוצה הקסומה שלנו ❤️ *משימה ליום השנים-עשר 💖💖- צריכה וייצור אחראיים* אתגרו את עצמכם להרגלים חדשים ללא חד-פעמי וללא שקיות. בחרו לפחות משימה אחת משלוש, העיקר שתרגישו בנוח ותרגישו מחויבים ומחוברים לתהליך. 1. *צפו בסרטון* – סרטון על איך הזבל של לורן, צעירה ניו-יורקית ובוגרת לימודי סביבה מסתכם בצנצנת אחת בלבד. ❤️12 https://www.youtube.com/watch?v=pF72px2R3Hg 2. *נקיון סביבתי* - ניקיון של פסולת בסביבה הקרובה אליי. פשוט לקחת שקית ולנקות, לא לבקש מאף אחד לנקות, ולא לצפות, האחרים יראו ותראו איך הם יחקו אתכם (זה עובד, מנסיון). אתם מוזמנים לצלם את המקום לפני ואחרי שניקיתם ולשתף איתנו כאן בקבוצה או במדיה החברתית. ❤❤ 12 3. *אני אומר לא לחד-פעמי* - מהיום ועד סוף האתגר, לא להשתמש בחד פעמי, לא בקבוקי שתיה, לא משקאות מוגזים חד פעמיים. *בסיום התרגול יש לסמן את מספר היום ו- 🥥 בכל יום מעתה ועד סוף האתגר.* ''', "20:00:08": ''' *ברוכים הבאים ליום השנים-עשר של האתגר – צריכה וייצור אחראיים* אמא אדמה סיפקה לנו שפע של משאבי טבע. אולם לא השתמשנו בהם באופן אחראי. כיום אנחנו צורכים משאבים בקצב הרבה מעבר למה שכדור הארץ מסוגל לספק לנו. חובה עלינו ללמוד איך להשתמש ולייצר חלופות ברות קיימא ולתקן את הנזק שהסבנו לסביבה ולא זה הסוף שלנו. ישראל היא מעצמת זבל. שמעתם נכון. אנחנו מייצרים את כמות הפסולת הגדולה ביותר לנפש בקרב מדיניות הOECD מידי שנה, כשכל תושב בישראל מייצר 1.7 ק”ג פסולת ביום! *למה?* לא מגעיל אתכם ללכת לים ולשבת על תערובת חול וזבל פלסטי? הגיע הזמן לשנות הרגלים באמת ולתבוע מהחברות היצרניות לקחת אחריות על כל מה שהם מייצרים, הגיע הזמן לצאת לרחובות ולהרים צעקה ולהחתים על עצומה כי די כבר נמאס. העולם שלנו מתכלה. חייבים להתחיל למחזר, לצמצם, ולייצר כמה שפחות זבל בעולם. כי הזבל לא נעלם סתם. הוא רק מטואטא מתחת לשטיח העולמי או נשפך לים. על כל אחד מאיתנו החובה לקחת החלטה ולהתחיל להפחית את *טביעת הרגל האקולוגיות האישית* על מנת לאפשר התחדשות של משאבים טבעיים שעליהם הן חיינו, עתיד ילדינו והמגוון הביולוגי נסמכים. המחשבה המרכזית להיום: *אני עוצר לחשוב שלוש פעמים לפני שאני צורך לעצמי* ''' , "20:00:13": "image/C18Heb/12.opus" , "18:00:00": ''' *נותרו שעתיים להשלמת המשימות של היום* _ העלאת האנושות על דרך חדשה - הכל מתחיל בכם_ 🐋🌸🙏 ''' , }, ################################################################# ################################################################# ################################################################# # # ENTER CORRECT DAY MESSAGES FOR DAY 13 # ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# # ''' DAY 13 ''' 13:{ "19:45:00": "#totalPoints", "20:00:00": "image/C18Heb/13.png" , "20:00:05": ''' 1. *צפו בסרטון* –המצב חמור- של מכון השל - אני מציע לכל אחד להשקיע כ 10 דקות קצרות בחייו הארוכים כדי שהמודעות תמשיך לעלות. ערב קסום מלא בקיימות, מנהיגות והעברת הטוב הלאה 💕 https://endoftheworld.heschel.org.il/disaster/?fbclid=IwAR24O0OWn36EOEp_91sGpPtvGZHSXTjZ85mBg5Hi-uvt4A6Vx8ogby84Cj4 ''', "20:00:08": ''' 2. *אז מהי טביעת הרגל האקולוגית שלכם?* - מוזמנים לשחק ולבחון עצמכם. מהאתר של מכון השל לקיימות. ❤💜 https://www.heschel.org.il/heschel-media-story-136349 3. *אני משתף כי אכפת לי* - המשימה של היום היא לשתף על האתגר עם החברים, מהיום ועד סוף האתגר, אפילו 3 אנשים מדי יום מספיקים, למרות שככל שתשתפו עם יותר אנשים האנרגיה מעצימה יותר בעיניי. זכרו לשתף, חוויות, רשמים, מה למדתם חדש וכולי. הסימון לשיתוף הוא אימוג'י 🐜 🐜 🐜 🐜 🐜 🐜 ''' , "20:00:10": ''' *ברוכים הבאים ליום השלושה-עשר של האתגר – שינויי אקלים* עוד שהייתי סטודנט צעיר דיברו על ההתחממות הגלובלית. אני זוכר איך יצאתי מהשיעור מפוחד שהנה סוף העולם קרב ובא. כן, למעלה מ-30 שנה שהקהילה המדעית מתריעה על עליה בשיעורי פחמן דו-חמצני באטמוספירה. קצב הגדילה של הפליטות שלנו עולה, והוא עולה אפילו יותר מהר ממה שחשבנו שהוא התרחיש הגרוע ביותר רק לפני מספר שנים. גרטה ת'ונברג, בגיל 15, הבינה את השוני במה שאמרו כמה מומחי אקלים ובפעולות שנעשו בחברה. ההבדל היה כה דרסטי לדעתה שהיא החליטה לקחת את העניינים לידיים. *המטרה שלי ושל כולנו חייבת:* 1. לעורר שיח בנושא הקידמה – טוב? רע? איך נדע? 2. לתקשר את משבר האקלים לדור הצעיר 3. לערב את הדור של היום בתהליך קבלת ההחלטות של המחר *סכנה!* - שינויי האקלים הם סכנה ממשית ובלתי נמנעת לכלל האנושות. ההשפעות של שינויי האקלים כבר ניכרות ועלולות להיות טרגיות, אם לא נפעל כעת! באמצעות חינוך, חדשנות ומתוך דבקות לאחריות שלנו, האזרחים, הן המבוגרים והן הצעירים, נוכל אולי לבצע את השינויים ההכרחיים כדי להגן על המשך קיומו של המין האנושי. המחשבה המרכזית להיום: *"משבר האקלים הוא גם האתגר שלי"* ''' , "20:00:13": "image/C18Heb/13.opus" , "18:00:00": ''' *נותרו שעתיים להשלמת המשימות של היום* _ העלאת האנושות על דרך חדשה - הכל מתחיל בכם_ 🐋🌸🙏 ''' , }, ################################################################# ################################################################# ################################################################# # # ENTER CORRECT DAY MESSAGES FOR DAY 14 # ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# # ''' DAY 14 ''' 14:{ "19:45:00": "#totalPoints", "20:00:00": "image/C18Heb/14.png" , "20:00:05": ''' היום אשלח מוקדם יותר את המשימה כדי להתחשב בשומרי השבת בקבוצה הקסומה שלנו ❤️ *משימה ליום הארבעה-עשר* 🐋14 – *החיים מתחת לפני המים* בחרו לפחות אחת משלוש, העיקר שתרגישו בנוח ותרגישו מחויבים ומחוברים לתהליך. ''', "20:00:08": ''' 1. *צפו בסרטון* - הצלת צבי ים שהסתבך בשק ניילון ❤️ https://www.youtube.com/watch?v=FpUzdXcJ6zc 2. *הלויתן האחר* - מצאו באינטרנט תמונה של לויתנים שאהבתם במיוחד. שתפו עם 5 חברים בווטסאפ או בפוסט במדיה החברתית והסבירו למה אתם חושבים שזה יפה וחשוב. דרך אגב, האם מצאתם את הלויתן האחר? 🐋14 (רמז: נסו לחפש באנגלית) או *הים שבתוכנו* - מצאו בבית פריט כלשהו שקשור לים (צדפה שאספתם, צעצוע או משחק עם תמונות של דגים, בובה וכולי), הצטלמו איתו ושתפו עם 5 חברים בווטסאפ או בפוסט במדיה החברתית והסבירו למה אתם חושבים שזה יפה וחשוב. ❤️❤️ 3. *מלאכי שינוי* - המשימה של היום ועד סוף האתגר היא לפתוח *קבוצת אתגר18 משלכם* לקיימות ומנהיגות.💝בקבוצה זו לא חייבים חברים רבים, אפילו 3 אנשים מספיקים, למרות שככל שיש יותר אנשים האנרגיה מעצימה יותר בעיניי. הסימון לביצוע המשימה הוא 🤹🏻‍♂️🥇⌛ כל יום, תהיה לכם את המשימה להעביר את ההנחיות מיום עד סוף 18 הימים, בדיוק כפי שקיבלתם אותם ותמשיכו לקבל אותם ממני. אפשר להשתמש בהודעות הקודמות ששלחתי. זכרו כי תצטרכו לשלוח את המסר ותרגילי המחברת כל יום, ולעקוב אחר האנשים בקבוצתכם שעשו את המשימות (או להוציא מהקבוצה) - תרגלו נתינה. ''' , "20:00:10": ''' *ברוכים הבאים ליום הארבעה-עשר של האתגר – החיים מתחת לפני המים* יעד מספר 14 עוסק בשימור האוקיינוסים, הימים והחיים התת-ימיים. נכון להיום למעלה ממחצית משוניות האלמוגים בעולם מצויות בסכנת הכחדה. אוקיינוסים עומדים בפני איום של זיהום ימי ותזונתי, דלדול במשאבים ושינויי אקלים, כל אלה הם תוצאה בעיקר של מעשי האדם וההשלכות מגיעות גם אלינו. אלו יוצרים בעיות סוציו-אקונומיות עולמיות, ובכלל זה סיכונים בריאותיים, בטיחותיים וכלכליים. האם זה באחריותנו לוודא כי מנהיגי העולם יעשו תפקידם נאמנה להגן על בעלי החיים הימיים? האם זה באחריותנו לתמוך באנשים התלויים באוקיינוסים, בין אם התלות היא לתעסוקה, משאבים או הנאה? רציתי לנצל את הבמה ולהציג בגאווה שתי יזמיות ישראליות שמקיימות ושומרות על הסביבה הימית: אינה ברוורמן מייסדת שותפה בחברת אקו וייב פאוור שעוסקת בייצור חשמל מגלי הים ודר שמרית פרקול-פינקל ז"ל יהי זכרה ברוך, מייסדת שותפה בחברת אקונקריט שפיתחה בטון אקולוגי המכיל תערובת חומרים המעודדים התיישבות של בעלי חיים ימיים. המחשבה המרכזית להיום במילים של נתן יונתן: *"כמו תהום הים - כל זוהמת עולם אליו שוטפת והוא טהור - כך נפש האדם, כמו הים"* – האמנם? ''' , "20:00:13": "image/C18Heb/14.opus" , "18:00:00": ''' *נותרו שעתיים להשלמת המשימות של היום* _ העלאת האנושות על דרך חדשה - הכל מתחיל בכם_ 🐋🌸🙏 ''' , }, ################################################################# ################################################################# ################################################################# # # ENTER CORRECT DAY MESSAGES FOR DAY 15 # ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# # ''' DAY 15 ''' 15:{ "19:45:00": "#totalPoints", "20:00:00": "image/C18Heb/15.png" , "20:00:05": ''' *משימה ליום החמישה-עשר 💖💖החיים על פני היבשה* אפשרו לעצמכם זמן שקט ורגוע, נשמו עמוק ...אם אפשר בצעו את המשימה בטבע, זה יהיה מעולה 🌸🐛🦄🌼🌻🌹🌷💐🌺🍄☘🍀🌱🌴🌲 בחרו לפחות אחת משלוש, לא משנה מה העיקר שתרגישו בנוח ותרגישו מחויבים ומחוברים לתהליך. 1. *קראו את הכתבה* באתר "עושים קיימות", מה דעתכם על רעיון היער הוורטיקלי כחלק מהנוף העירוני באזור המחיה שלכם? ❤️ https://amalnet.org/earth/?p=127 2. *לנטוע עץ* או לתרום לגוף המקדם נטיעת עצים באזורכם. צלמו או הסריטו את עצמכם והנציחו את הקסם. ❤️❤️ 3. *קפסולת זק"מ זמן-קיימות-מנהיגות* : כתבו מכתב בקשה ליקום, עטרו אותו, כתבו את שמכם ואת התאריך, והטמינו אותו באדמה בקפסולה (קופסה קטנה, בקבוק זכוכית ואפילו בתוך עציץ). זכרו: אתם שותלים תקוות, ציפיות חלומות והיקום יצמיח אותם. הגדירו במדויק מה אתם רוצים ומתי. ❤️❤️❤️ *הערה*: אפשר גם להעביר המכתב לאדם קרוב, כמו מורה או חבר, שישמור על מכתב ויחזיר לכם אותו כעבור שנה. ערב קסום מלא בקיימות ומנהיגות 🌈❤️💙 ''', "20:00:08": ''' *ברוכים הבאים ליום החמישה-עשר של האתגר – החיים על פני היבשה* יעד מספר 15 – הגנה על מערכות אקולוגיות ביבשה, שחזורן וקידום שימוש בר קיימא בהן, ניהול בר קיימא של יערות, מאבק נגד מדבור, עצירה והפיכה של הרס קרקעות, עצירת האובדן של מגוון ביולוגי. אנו חיים בעיצומו של עידן הכחדת מינים, רובו מעשה ידי האדם. אם תימשכנה המגמות הנוכחיות צפויים כמחצית המינים החיים כיום להיכחד עד סוף המאה. רוב המינים הנכחדים אינם מוכרים ולכן איננו יודעים איך תשפיע הכחדתם. האקולוג הנודע פול ארליך ממשיל את המצב הזה לנוסעים במטוס שמפרקים ברגים בחלקים שונים של המטוס תוך כדי טיסה. ייתכן שהבורג מחבר את המשענת למושב, אך ייתכן גם שפירוק הבורג יפגע בפעולת המנוע. כשם שקיימים הרבה ברגים במטוס, כך קיימים מינים רבים של יצורים חיים על פני כדור הארץ ואין לנו אפשרות לצפות מראש מה תהיינה ההשלכות של הכחדתם. מה שאנחנו כן יודעים הוא שאנו מכחידים חלקים מהמערכות תומכות החיים בכדור הארץ, שכן החיים עצמם הם שיוצרים את התנאים תומכי החיים של כוכב הלכת שלנו. האם עלינו לשמר יער גשם רק בשל הערך הכלכלי של התרופות, ומיני המזון הפוטנציאלי המתחבאים בין ענפיו? האם עלינו לשמר את היער בגלל שהוא חלק מהמערכות תומכות החיים של כדור הארץ? ואולי התשובה נמצאת בערכי התרבות שלנו? באיזו קלות אנו מרשים לעצמנו להתעלם מכל היופי וההוד שבטבע? האם אכן אנחנו רוצים לחיות בעולם שכולו מלאכותי? מהן ההשלכות של מצב כזה על הרווחה הנפשית של בני האדם? מדינת ישראל, בעלת מגוון ביולוגי עשיר במיוחד עקב מיקומה הייחודי בחיבור היבשות, נופה המגוון, ותנאי האקלים שלה, טרם השכילה להפוך את השיקול של בריאות המערכות האקולוגיות לחלק בלתי נפרד ממערכת קבלת ההחלטות של בינוי ופיתוח ועל כן מצער לציין שהמגוון הביולוגי נמצא בה עדיין בסכנה חמורה. המחשבה המרכזית להיום: *"כי האדם הוא עץ השדה - כמו העץ הוא שואף למעלה"* - ואתם? ''' , "20:00:13": "image/C18Heb/15.opus" , "18:00:00": ''' *נותרו שעתיים להשלמת המשימות של היום* _ העלאת האנושות על דרך חדשה - הכל מתחיל בכם_ 🐋🌸🙏 ''' , }, ################################################################# ################################################################# ################################################################# # # ENTER CORRECT DAY MESSAGES FOR DAY 16 # ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# # ''' DAY 16 ''' 16:{ "19:45:00": "#totalPoints", "20:00:00": "image/C18Heb/16.png" , "20:00:05": ''' *ברוכים הבאים ליום השישה-עשר של האתגר – שלום, צדק וחוזק המוסדות* יעד מספר 16 מזכיר לנו כי חמלה ומצפן מוסרי חזק הם חיוניים לכל חברה דמוקרטית. רדיפה של החלשים, שחיתויות, עוול והתעללות עדיין משתוללים חופשי וקורעים את מרקם הציוויליזציה. עד כמה שקשה להודות, אף מדינה על פני כדור הארץ אינה מתקיימת בדמוקרטיה אמיתית. לאנשים יש אשליה של בחירה והם למעשה מנוהלים ולא מיוצגים. כל עוד מדובר במערכת המנוהלת על ידי בני אדם תמיד יהיו שחיתות וחוסר צדק, הגיע הזמן לקדם חקיקה לשילוב טכנולוגיות מתקדמות כחלק מהמערכת השלטונית. עלינו להבטיח שיהיו לנו מוסדות חזקים, סטנדרטים עולמיים של צדק ומחויבות לשלום בכל מקום. *משימות ליום השישה-עשר 💖💖 שלום, צדק וחוזק המוסדות* בחרו לפחות אחת משלוש, לא משנה מה העיקר שתרגישו בנוח ותרגישו מחויבים ומחוברים לתהליך. 1. *צפו בסרטון* – האזינו לשיר וכתבו במחברת או שתפו כאן בקבוצה מהו צדק עבורכם ❤️. אני מזמין אתכם ליצור קשר עם חברים לקבוצה שמספר הטלפון שלהם מסתיים באותה ספרה כמו שלכם ולדון איתם בנושא צדק חברתי – יש? https://www.youtube.com/watch?v=BCNctor2Dek 2. *התבוננות לאחור* - הסתכלו אחורה במחברת המשימות שלכם וקראו את כל המשימות שביצעתם. בדקו אם יש לכם פערים באתגרים או משהו להוסיף. ❤️❤️ ''', "20:00:08": ''' 3. *השיר של סופי* - קראו את הסיפור שתמצאו למטה לפחות פעמיים או שלוש ואז תכתבו את ההשתקפויות האישיות שלכם והמחשבות במחברת. ואפילו יותר טוב – שתפו בקבוצת השיתופים אם תרצו🌹🌹🌹 כל מה שעולה ברוחכם לכתוב שמתאים לרוח הדברים. *השיר של סופי* / גלאוריאן סופי התעוררה 😨 מחלומה ב😭 אז היא ארזה את ה👜 שלה ויצאה למסע לראות את ה🌍 היא פגשה לראשונה 🐕 והוא ביקש 💧 אז היא חלקה את ה💧שלה. היא פגשה 🧍‍♂️שהיה רעב, אז היא חלקה את ה🍌שלה. היא פגשה 🐤 קטנה שנפלה מה 🌳 אז היא הורידה את ה👟שלה ועשתה 🏠 ל🐤. היא פגשה 👴🏼 שהיה לו 🥶 אז היא נתנה לו את ה🧥 שלה. היא פגשה 🐑 אבודה שבכתה אז היא שרה לה שיר 😁🎶🎵🎶. כשהתחיל לרדת 🌧 היא ישבה מתחת ל 🌳 היא הייתה צמאה ורעבה היה לה 🥶 והייתה לה רק 👟 אחת אבל היא כבר לא 😨. סופי 😴. היא התעוררה עם 😀 סופי הייתה 😁. ''' , "20:00:10": ''' המסע הרגשי של סופי מורכב משלושה שלבים: פחד -> אמפתיה -> אושר. שחרור סמלי של רוח האדם מחושך לאור. משימה: בחרו אדם שאתם חושבים שהוא עצוב / אומלל / לא טוב לו/לה, שהוא/היא מתלונן/נת כבר זמן מה על החיים ושתפו את הסיפור של סופי עם אדם זה. לאחר מכן, כתבו במחברת: איך היתה חוויית השיתוף ומה היא עוררה בכם? האם הוא / היא ענה לכם? האם דיברתם ואיך? האם האדם הודה לכם? איך השיתוף גרם לכם להרגיש? המחשבה המרכזית להיום: *צדק-צדק תרדוף* ערב קסום מלא בקיימות, מנהיגות, שלום, צדק, מוסדות חזקים והעברת הטוב הלאה 💕 *לודולנד לעולם לא מתה – היא נולדת מחדש* שרון גל-אור שגריר תרבותי גלובלי טינג גלובל ''' , "20:00:13": "image/C18Heb/16.opus" , "18:00:00": ''' *נותרו שעתיים להשלמת המשימות של היום* _ העלאת האנושות על דרך חדשה - הכל מתחיל בכם_ 🐋🌸🙏 ''' , }, 16.5:{}, ################################################################# ################################################################# ################################################################# # # ENTER CORRECT DAY MESSAGES FOR DAY 17 # ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# # ''' DAY 17 ''' 17:{ "19:45:00": "#totalPoints", "20:00:00": "image/C18Heb/17.png" , "20:00:05": ''' אני גאה בכם שהתמדתם והגעתם עד ליום ה – 17 של האתגר, עוד יום... היו גאים בעצמכם, עטפו את עצמכם בשפע של אהבה על ההתמדה והרצון לחיים של קיימות ומנהיגות. דרך אגב, זו ההזדמנות שלכם להשלים המשימות שלא הספקתם, המשך יום מקסים, שרון ''', "20:00:08": ''' *משימה ליום השבעה-עשר 💖💖 – שותפות להשגת היעדים* משימת היום: *שיטת טינג לחשיבה בינתחומית בשילוב קידוד סמלים* בחרו לפחות אחת משלוש המשימות, לא משנה מה העיקר שתרגישו בנוח ותרגישו מחויבים ומחוברים לתהליך. 1. *צפו בסרטון* – עפים על החיים, ואז תכתבו את המחשבות והתובנות שלכם במחברת. ❤️ https://youtu.be/rlwfd1ZaDJ4 ''' , "20:00:10": ''' 2. קראו על סיפור *"נוח והמבול"* מתוך האתר של *SDG ישראל* - על כיצד בזמן ששהו בתיבה היה לנוח, משפחתו והחיות זמן לשוחח על כיצד יראה העולם החדש כשיצאו מהתיבה. המשימה שלכם היא לנסות לענות על השאלות שבסוף המאמר. ❤️❤️ https://www.sdgi.org.il/articles_/%d7%aa%d7%99%d7%91%d7%aa-%d7%a0%d7%97-%d7%94%d7%a1%d7%99%d7%a4%d7%95%d7%a8-%d7%a9%d7%9c%d7%90-%d7%a1%d7%95%d7%a4%d7%a8-%d7%95%d7%94%d7%99%d7%a2%d7%93%d7%99%d7%9d-%d7%9c%d7%aa%d7%99%d7%a7%d7%95%d7%9f/ ''' , "20:00:13": ''' צרו קשר עם חבר/ים לקבוצה - כן, אל תתביישו, גם חברים כאלה שאתם לא מכירים שהרי אנחנו מטפסים את אותו ההר, בדיוק כאן ובדיוק עכשיו, במסע הגיבור האישי של כל אחד ואחת, ועשו את התרגיל יחד. אתם מוזמנים לשתף איתנו כאן רעיונות שנוצרו תוך כדי התהליך. 🐋🌸🙏 3. *בחרו אחת מהאפשרויות*: ❤️❤️❤️ *טינג – חשיבה יצירתית לרעיונות למיזמים חדשים* I. בחרו שלושה תחומי עניין/עיסוק II. פרקו כל תחום ל 3 אסוציאציות שעולות לכם בראש III. חברו בין תחומי הפעילות לכדי השראה ליצירת רעיון למיזם חדש. *טינג – חשיבה יצירתית מציאת פתרונות לבעיות* השלבים ביצירת הרעיון: I. הגדירו בעיה שלכם או בעיה כלשהי. II. לאילו מהיעדים בטבלת יעדי קיום היקום, הבעיה קשורה? III. מצאו שלושה תחומי פעילות הקשורים לבעיה שהגדרתם. IV. פרקו כל תחום ל 3 אסוציאציות שעולות לכם בראש V. חברו בין תחומי הפעילות, למציאת פתרון לבעיה שהגדרתם. VI. ציירו ייצוג סמלי לפתרון. אתם כמובן מוזמנים גם לשתף בקבוצת השיתופים והחוויות ממה שכתבתם. ערב קסום מלא בקיימות, מנהיגות והעברת הטוב הלאה 💕 שרון גל-אור שגריר תרבותי גלובלי טינג גלובל ''' , "20:00:15": ''' *ברוכים הבאים ליום השבעה-עשר של האתגר – שותפות להשגת היעדים* *אז מה זה אומר שיתוף פעולה?* המשמעות האמיתית של שיתוף פעולה היא ביצוע משותף של פעולות שמחברות אותנו לשפע משמעותי חדש במקומות בהם הוא לא היה גלוי קודם ואותו לא היה ניתן לחשוף כאשר כל צד פעל לבדו. *זוהי שעת חירום וזהו משבר אמיתי וצריך להתייחס לכך בהתאם* ולפעול לצמצום השימוש במקורות אנרגיה מתכלים ולעשות יד אחת לפתרון המשבר האקלימי. אם ברצוננו לשרוד על כדור הארץ יש לנו כעשור, עד שנת 2030, לשנות לחלוטין את מה ואיך שאנחנו מלמדים בבתי הספר, צורכים אנרגיה, מקיימים חקלאות, מנהלים מים, מדברים עם שכנינו ועוד. לא מדובר בטרנד חולף, אלא בשינוי עולמי באופן בו אנחנו מלמדים, בצורה שנעשים עסקים, באחריות חברתית ותאגידית, וחשיבה לטווח ארוך. “*יש לנו עשר שנים לתקן את הנזק שגרמנו ולמנוע משבר אקלימי*”, מזהיר האו”ם. המחשבה המרכזית להיום: *"אם אתם רוצים ללכת מהר – לכו לבד* *אם אתם רוצים להגיע רחוק – לכו ביחד"* ''' , "20:00:17": "image/C18Heb/17.opus" , "20:00:25": "image/C18Heb/17.1.png", "18:00:00": ''' *נותרו שעתיים להשלמת המשימות של היום* _ העלאת האנושות על דרך חדשה - הכל מתחיל בכם_ 🐋🌸🙏 ''' , }, ################################################################# ################################################################# ################################################################# # # ENTER CORRECT DAY MESSAGES FOR DAY 18 # ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# # ''' DAY 18 ''' 18:{ "19:45:00": "#totalPoints", "20:00:00": "image/C18Heb/18.png" , "20:00:05": ''' *ברוכים הבאים ליום השמונה-עשר של האתגר – חי - למען עתיד האנושות* שבוע מקסים לכולכם ותודה שהתמדתם וצלחתם את המסע המדהים הזה. כולי תקווה שתמשיכו לאתגר את עצמכם ושתנהיגו גם אתם רבים אחרים לקיימות ומנהיגות. נכון, כפי שהבנתם, זו היתה רק טעימה מהירה ומתוקה, טריילר לסרט שנקרא החיים, בו אנחנו לא רק הצופים אלא גם השחקנים, שמחפשים משמעות, צמיחה להישגים ועולם טוב יותר לכולנו. זו השנה שלכם. תבקשו, תצהירו, תקשיבו, תתחילו לקיים, ותראו איך אתם מצליחים לצמוח בכל יום עם הבחירות וההחלטות שיצרתם, באופן שבו אתם מבלים את זמנכם, האנשים איתם אתם מסתובבים ואיך שאתם מרגישים בכל רגע נתון. בשבילי, קיימות היא בראש ובראשונה להקשיב לעצמי הפנימי, ללב ולראש, בשבילי קיימות זה לחיות בהוויות שבחרתי לעצמי, שמחה, נתינה והקשבה, בזה מסתכם הכל למעשה. אני רוצה שוב להודות לכם על שהשתתפתם באתגר, כמעט סיימנו ... עוד צעד והגענו לעומק המסע הקטן והגדול הזה. למעשה בואו ונצעד 18 צעדים חגיגיים, תזכורת למסע המשותף שלנו יחד: דמיינו איזה עולם תרצו לדמיין שתומך בכולנו ? אשמח לשמוע כאן או בפרטי אתם יכולים להגיד שאני חולם אבל אני לא היחיד אני מקווה שיום אחד תצטרפו אליי והעולם יהיה כאחד ''', "20:00:08": ''' איזה כיף. אתם מדהימים! אני שמח ואני מברך אתכם. *המשימה להיום* - שלחו לקבוצה זו סרטון או הודעה כתובה, רצוי סרטון. 🎬🎥הציגו את עצמכם, רובכם לא מכירים אחד את השני ויהיה מרגש לדעת מי היה חלק מהאנרגיה שתמכה בכולנו ולהכיר חברים ושותפים חדשים. תארו את החוויה שלכם מ 18 ימי הקיימות ומנהיגות האלה: רגשות, תצפיות, סימנים, הפתעות, מה השתנה בכם ומחוץ לכם וכן הלאה. אם תרצו להדהד משהו ליקום זה הזמן! אם יש לכם רעיונות ופרויקטים זה חשוב מכיוון שבקבוצה זו עשויים להיות אנשים שמעוניינים במה שאתם עושים, בשירותים שאתם מציעים או ברעיונות שלכם. היו יצירתיים ואמיתיים כשאתם מתארים את עצמכם, אל תתביישו, עלו על הבמה. אני מקווה שנהנתם, אני בהחלט מרגיש זכות לעבור את הדרך הזו איתכם וממש נהניתי וצמחתי, ואשמח לראות כמה שיותר סרטונים שלכם או לשמוע מכם בכתב לגבי החוויה שלכם, ואפילו יותר מזה, לשמוע שפתחתם קבוצת אתגר18 משלכם. המשך שבוע קסום מלא בקיימות, מנהיגות והעברת הטוב הלאה 💕🌸🐋🙏🌎🌻 שרון גל-אור שגריר תרבותי גלובלי טינג גלובל לפרטים נוספים ויצירת קשר: 055-9721123 | ting.go.global@gmail.com ''' , "20:00:13": "image/C18Heb/18.opus" , }, ################################################################# ################################################################# ################################################################# # # ENTER CORRECT DAY MESSAGES FOR DAY 19 # ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# # ''' DAY 19 ''' 19:{ "19:45:00": "#totalPoints", "10:00:00": "image/C18Heb/19.png" , "10:00:05": ''' תודה שסיימתם בהצלחה את אתגר 18 ימים של קיימות ומנהיגות 💖 💖 💖 ''' }, } #https://docs.google.com/document/d/1DH2D7TbobiQDnjnoJM_TQ7tFBn4GIiftSz1Awz_UHVg/edit #XXX
40.511512
671
0.621287
acefafa7a325607efdff8b0d966aa7e53b5d4718
8,627
py
Python
character_art.py
CapnS/AcademyNEXTPlatformer
8ffe4af1830c03e03946b46ef719e40132277e29
[ "MIT" ]
null
null
null
character_art.py
CapnS/AcademyNEXTPlatformer
8ffe4af1830c03e03946b46ef719e40132277e29
[ "MIT" ]
null
null
null
character_art.py
CapnS/AcademyNEXTPlatformer
8ffe4af1830c03e03946b46ef719e40132277e29
[ "MIT" ]
2
2020-06-29T18:01:00.000Z
2020-07-10T23:09:03.000Z
import pygame import os import sys from bullet import Bullet class Character(pygame.sprite.Sprite): ''' Character class that handles movement and user input ''' def __init__(self, platforms, bullets): pygame.sprite.Sprite.__init__(self) image = pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','run5.png')) self.image = pygame.transform.scale(image, (50,50)) self.width, self.height = 50, 50 self.rect = self.image.get_rect() self.rect.x += 50 self.x = 0 self.y = 0 self.X = 600 self.Y = 600 self.X_pos = 0 self.Run_right = [] self.Run_right.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','run0.png'))) self.Run_right.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','run1.png'))) self.Run_right.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','run3.png'))) self.Run_right.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','run4.png'))) self.Run_right.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','run5.png'))) self.Run_right.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','run6.png'))) self.Run_right.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','run7.png'))) self.Run_right.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','run8.png'))) self.Run_left = [] self.Run_left.append(pygame.transform.flip(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','run0.png')), True, False)) self.Run_left.append(pygame.transform.flip(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','run1.png')), True, False)) self.Run_left.append(pygame.transform.flip(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','run3.png')), True, False)) self.Run_left.append(pygame.transform.flip(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','run4.png')), True, False)) self.Run_left.append(pygame.transform.flip(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','run5.png')), True, False)) self.Run_left.append(pygame.transform.flip(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','run6.png')), True, False)) self.Run_left.append(pygame.transform.flip(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','run7.png')), True, False)) self.Run_left.append(pygame.transform.flip(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','run8.png')), True, False)) self.MidAir = [] self.MidAir.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','mid_air1.gif'))) self.MidAir.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','mid_air2.gif'))) self.idle = [] self.idle.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','idle1.gif'))) self.idle.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','idle2.gif'))) self.idle.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','idle3.gif'))) self.idle.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','idle4.gif'))) self.idle.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','idle5.gif'))) self.idle.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','idle6.gif'))) self.idle.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','idle7.gif'))) self.idle.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','idle8.gif'))) self.idle.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','idle9.gif'))) self.idle.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','idle10.gif'))) self.idle.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','idle11.gif'))) self.idle.append(pygame.image.load(os.path.join('sprite_art','Jungle Asset Pack','Character','sprites','idle12.gif'))) self.left_bool = False self.right_bool = False self.chillCount = 0 self.runCount = 0 self.airTicks = 0 self.gravity = 4.9 self.platforms = platforms self.bullets = bullets self.clock = pygame.time.Clock() self.jumping = False self.jump = 10 self.hit = False self.can_jump = False def move(self, x, y): self.x += x self.y += y def update(self, tick): self.rect.x += self.x self.x = 0 fitness = 0 if self.jumping: if self.jump >= 0: self.rect.y -= (self.jump * abs(self.jump)) * 0.5 self.jump -= 1 else: self.jump = 10 self.jumping = False if not any([pygame.sprite.collide_rect(self, platform) for platform in self.platforms]): self.rect.y += self.y + self.gravity self.can_jump = False else: for platform in self.platforms: if pygame.sprite.collide_rect(self, platform): if self.rect.bottom > platform.rect.bottom: self.rect.y += self.y + self.gravity self.can_jump = False else: self.rect.y += self.y self.jumping = False self.can_jump = True fitness += 2 #self.move_left(tick) if any([pygame.sprite.collide_rect(self, bullet) for bullet in self.bullets]): self.hit = True return fitness def redrawGameWindow(self): if self.runCount + 1 >= 24: self.runCount = 0 if self.airTicks + 1 >= 6: self.airTicks = 0 if self.chillCount + 1 >= 36: self.chillCount = 0 if self.jumping: self.image_png = self.MidAir[self.airTicks//3] self.airTicks += 1 elif self.left_bool: self.image_png = self.Run_left[self.runCount//3] self.runCount += 1 elif self.right_bool: self.image_png = self.Run_right[self.runCount//3] self.runCount += 1 else: self.image_png = self.idle[self.chillCount//3] self.chillCount += 1 self.image = pygame.transform.scale(self.image_png, (50,50)) return self.image def move_left(self, tick): speed = .0589 #clock = pygame.time.Clock() left = tick*speed self.rect.x = self.rect.x - left def shoot(self): return Bullet(self.rect.x, self.rect.y, 1) if __name__ == "__main__": display = pygame.display.set_mode([400, 400]) clock = pygame.time.Clock() character = Character() characters = pygame.sprite.Group() characters.add(character) while True: for e in pygame.event.get(): if e.type is pygame.KEYDOWN: if e.key == ord('a'): character.move(-10, 0) elif e.key == ord('d'): character.move(10, 0) elif e.type is pygame.K_SPACE: print("space") if e.type is pygame.KEYUP: if e.key == ord('a'): character.move(10, 0) elif e.key == ord('d'): character.move(-10, 0) elif e.type is pygame.K_SPACE: print("space") if e.type is pygame.QUIT: pygame.quit() sys.exit(0) character.update() display.fill((25,25,200)) characters.draw(display) pygame.display.flip() clock.tick(30)
48.466292
164
0.606352
acefb025baebae4f5a0aec986cba4cf6a9c2c62f
24,252
py
Python
src/hypertrace/agent/config/config_pb2.py
hypertrace/pythonagent
283e18c61807f4ae653d147be9ff1424b0b0a6eb
[ "Apache-2.0" ]
4
2021-05-19T16:16:26.000Z
2022-01-16T04:48:43.000Z
src/hypertrace/agent/config/config_pb2.py
hypertrace/pythonagent
283e18c61807f4ae653d147be9ff1424b0b0a6eb
[ "Apache-2.0" ]
48
2021-04-27T07:25:48.000Z
2021-08-30T21:27:27.000Z
src/hypertrace/agent/config/config_pb2.py
hypertrace/pythonagent
283e18c61807f4ae653d147be9ff1424b0b0a6eb
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: config.proto """Generated protocol buffer code.""" from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='config.proto', package='org.hypertrace.agent.config', syntax='proto3', serialized_options=b'\n\033org.hypertrace.agent.configZ$github.com/hypertrace/goagent/config', create_key=_descriptor._internal_create_key, serialized_pb=b'\n\x0c\x63onfig.proto\x12\x1borg.hypertrace.agent.config\x1a\x1egoogle/protobuf/wrappers.proto\"\x8b\x04\n\x0b\x41gentConfig\x12\x32\n\x0cservice_name\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x39\n\treporting\x18\x02 \x01(\x0b\x32&.org.hypertrace.agent.config.Reporting\x12>\n\x0c\x64\x61ta_capture\x18\x03 \x01(\x0b\x32(.org.hypertrace.agent.config.DataCapture\x12K\n\x13propagation_formats\x18\x04 \x03(\x0e\x32..org.hypertrace.agent.config.PropagationFormat\x12+\n\x07\x65nabled\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x39\n\tjavaagent\x18\x06 \x01(\x0b\x32&.org.hypertrace.agent.config.JavaAgent\x12]\n\x13resource_attributes\x18\x07 \x03(\x0b\x32@.org.hypertrace.agent.config.AgentConfig.ResourceAttributesEntry\x1a\x39\n\x17ResourceAttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x90\x02\n\tReporting\x12.\n\x08\x65ndpoint\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12*\n\x06secure\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12+\n\x05token\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x03opa\x18\x04 \x01(\x0b\x32 .org.hypertrace.agent.config.Opa\x12K\n\x13trace_reporter_type\x18\x05 \x01(\x0e\x32..org.hypertrace.agent.config.TraceReporterType\"\x9c\x01\n\x03Opa\x12.\n\x08\x65ndpoint\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x38\n\x13poll_period_seconds\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12+\n\x07\x65nabled\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\"d\n\x07Message\x12+\n\x07request\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12,\n\x08response\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\"\xb0\x02\n\x0b\x44\x61taCapture\x12:\n\x0chttp_headers\x18\x01 \x01(\x0b\x32$.org.hypertrace.agent.config.Message\x12\x37\n\thttp_body\x18\x02 \x01(\x0b\x32$.org.hypertrace.agent.config.Message\x12:\n\x0crpc_metadata\x18\x03 \x01(\x0b\x32$.org.hypertrace.agent.config.Message\x12\x36\n\x08rpc_body\x18\x04 \x01(\x0b\x32$.org.hypertrace.agent.config.Message\x12\x38\n\x13\x62ody_max_size_bytes\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\"C\n\tJavaAgent\x12\x36\n\x10\x66ilter_jar_paths\x18\x01 \x03(\x0b\x32\x1c.google.protobuf.StringValue*-\n\x11PropagationFormat\x12\x06\n\x02\x42\x33\x10\x00\x12\x10\n\x0cTRACECONTEXT\x10\x01*:\n\x11TraceReporterType\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\n\n\x06ZIPKIN\x10\x01\x12\x08\n\x04OTLP\x10\x02\x42\x43\n\x1borg.hypertrace.agent.configZ$github.com/hypertrace/goagent/configb\x06proto3' , dependencies=[google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,]) _PROPAGATIONFORMAT = _descriptor.EnumDescriptor( name='PropagationFormat', full_name='org.hypertrace.agent.config.PropagationFormat', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='B3', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='TRACECONTEXT', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=1515, serialized_end=1560, ) _sym_db.RegisterEnumDescriptor(_PROPAGATIONFORMAT) PropagationFormat = enum_type_wrapper.EnumTypeWrapper(_PROPAGATIONFORMAT) _TRACEREPORTERTYPE = _descriptor.EnumDescriptor( name='TraceReporterType', full_name='org.hypertrace.agent.config.TraceReporterType', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ZIPKIN', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='OTLP', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=1562, serialized_end=1620, ) _sym_db.RegisterEnumDescriptor(_TRACEREPORTERTYPE) TraceReporterType = enum_type_wrapper.EnumTypeWrapper(_TRACEREPORTERTYPE) B3 = 0 TRACECONTEXT = 1 UNSPECIFIED = 0 ZIPKIN = 1 OTLP = 2 _AGENTCONFIG_RESOURCEATTRIBUTESENTRY = _descriptor.Descriptor( name='ResourceAttributesEntry', full_name='org.hypertrace.agent.config.AgentConfig.ResourceAttributesEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='key', full_name='org.hypertrace.agent.config.AgentConfig.ResourceAttributesEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='value', full_name='org.hypertrace.agent.config.AgentConfig.ResourceAttributesEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=b'8\001', is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=544, serialized_end=601, ) _AGENTCONFIG = _descriptor.Descriptor( name='AgentConfig', full_name='org.hypertrace.agent.config.AgentConfig', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='service_name', full_name='org.hypertrace.agent.config.AgentConfig.service_name', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='reporting', full_name='org.hypertrace.agent.config.AgentConfig.reporting', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='data_capture', full_name='org.hypertrace.agent.config.AgentConfig.data_capture', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='propagation_formats', full_name='org.hypertrace.agent.config.AgentConfig.propagation_formats', index=3, number=4, type=14, cpp_type=8, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='enabled', full_name='org.hypertrace.agent.config.AgentConfig.enabled', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='javaagent', full_name='org.hypertrace.agent.config.AgentConfig.javaagent', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='resource_attributes', full_name='org.hypertrace.agent.config.AgentConfig.resource_attributes', index=6, number=7, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[_AGENTCONFIG_RESOURCEATTRIBUTESENTRY, ], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=78, serialized_end=601, ) _REPORTING = _descriptor.Descriptor( name='Reporting', full_name='org.hypertrace.agent.config.Reporting', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='endpoint', full_name='org.hypertrace.agent.config.Reporting.endpoint', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='secure', full_name='org.hypertrace.agent.config.Reporting.secure', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='token', full_name='org.hypertrace.agent.config.Reporting.token', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='opa', full_name='org.hypertrace.agent.config.Reporting.opa', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='trace_reporter_type', full_name='org.hypertrace.agent.config.Reporting.trace_reporter_type', index=4, number=5, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=604, serialized_end=876, ) _OPA = _descriptor.Descriptor( name='Opa', full_name='org.hypertrace.agent.config.Opa', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='endpoint', full_name='org.hypertrace.agent.config.Opa.endpoint', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='poll_period_seconds', full_name='org.hypertrace.agent.config.Opa.poll_period_seconds', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='enabled', full_name='org.hypertrace.agent.config.Opa.enabled', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=879, serialized_end=1035, ) _MESSAGE = _descriptor.Descriptor( name='Message', full_name='org.hypertrace.agent.config.Message', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='request', full_name='org.hypertrace.agent.config.Message.request', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='response', full_name='org.hypertrace.agent.config.Message.response', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1037, serialized_end=1137, ) _DATACAPTURE = _descriptor.Descriptor( name='DataCapture', full_name='org.hypertrace.agent.config.DataCapture', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='http_headers', full_name='org.hypertrace.agent.config.DataCapture.http_headers', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='http_body', full_name='org.hypertrace.agent.config.DataCapture.http_body', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='rpc_metadata', full_name='org.hypertrace.agent.config.DataCapture.rpc_metadata', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='rpc_body', full_name='org.hypertrace.agent.config.DataCapture.rpc_body', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='body_max_size_bytes', full_name='org.hypertrace.agent.config.DataCapture.body_max_size_bytes', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1140, serialized_end=1444, ) _JAVAAGENT = _descriptor.Descriptor( name='JavaAgent', full_name='org.hypertrace.agent.config.JavaAgent', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='filter_jar_paths', full_name='org.hypertrace.agent.config.JavaAgent.filter_jar_paths', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1446, serialized_end=1513, ) _AGENTCONFIG_RESOURCEATTRIBUTESENTRY.containing_type = _AGENTCONFIG _AGENTCONFIG.fields_by_name['service_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE _AGENTCONFIG.fields_by_name['reporting'].message_type = _REPORTING _AGENTCONFIG.fields_by_name['data_capture'].message_type = _DATACAPTURE _AGENTCONFIG.fields_by_name['propagation_formats'].enum_type = _PROPAGATIONFORMAT _AGENTCONFIG.fields_by_name['enabled'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE _AGENTCONFIG.fields_by_name['javaagent'].message_type = _JAVAAGENT _AGENTCONFIG.fields_by_name['resource_attributes'].message_type = _AGENTCONFIG_RESOURCEATTRIBUTESENTRY _REPORTING.fields_by_name['endpoint'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE _REPORTING.fields_by_name['secure'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE _REPORTING.fields_by_name['token'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE _REPORTING.fields_by_name['opa'].message_type = _OPA _REPORTING.fields_by_name['trace_reporter_type'].enum_type = _TRACEREPORTERTYPE _OPA.fields_by_name['endpoint'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE _OPA.fields_by_name['poll_period_seconds'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT32VALUE _OPA.fields_by_name['enabled'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE _MESSAGE.fields_by_name['request'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE _MESSAGE.fields_by_name['response'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE _DATACAPTURE.fields_by_name['http_headers'].message_type = _MESSAGE _DATACAPTURE.fields_by_name['http_body'].message_type = _MESSAGE _DATACAPTURE.fields_by_name['rpc_metadata'].message_type = _MESSAGE _DATACAPTURE.fields_by_name['rpc_body'].message_type = _MESSAGE _DATACAPTURE.fields_by_name['body_max_size_bytes'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT32VALUE _JAVAAGENT.fields_by_name['filter_jar_paths'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE DESCRIPTOR.message_types_by_name['AgentConfig'] = _AGENTCONFIG DESCRIPTOR.message_types_by_name['Reporting'] = _REPORTING DESCRIPTOR.message_types_by_name['Opa'] = _OPA DESCRIPTOR.message_types_by_name['Message'] = _MESSAGE DESCRIPTOR.message_types_by_name['DataCapture'] = _DATACAPTURE DESCRIPTOR.message_types_by_name['JavaAgent'] = _JAVAAGENT DESCRIPTOR.enum_types_by_name['PropagationFormat'] = _PROPAGATIONFORMAT DESCRIPTOR.enum_types_by_name['TraceReporterType'] = _TRACEREPORTERTYPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) AgentConfig = _reflection.GeneratedProtocolMessageType('AgentConfig', (_message.Message,), { 'ResourceAttributesEntry' : _reflection.GeneratedProtocolMessageType('ResourceAttributesEntry', (_message.Message,), { 'DESCRIPTOR' : _AGENTCONFIG_RESOURCEATTRIBUTESENTRY, '__module__' : 'config_pb2' # @@protoc_insertion_point(class_scope:org.hypertrace.agent.config.AgentConfig.ResourceAttributesEntry) }) , 'DESCRIPTOR' : _AGENTCONFIG, '__module__' : 'config_pb2' # @@protoc_insertion_point(class_scope:org.hypertrace.agent.config.AgentConfig) }) _sym_db.RegisterMessage(AgentConfig) _sym_db.RegisterMessage(AgentConfig.ResourceAttributesEntry) Reporting = _reflection.GeneratedProtocolMessageType('Reporting', (_message.Message,), { 'DESCRIPTOR' : _REPORTING, '__module__' : 'config_pb2' # @@protoc_insertion_point(class_scope:org.hypertrace.agent.config.Reporting) }) _sym_db.RegisterMessage(Reporting) Opa = _reflection.GeneratedProtocolMessageType('Opa', (_message.Message,), { 'DESCRIPTOR' : _OPA, '__module__' : 'config_pb2' # @@protoc_insertion_point(class_scope:org.hypertrace.agent.config.Opa) }) _sym_db.RegisterMessage(Opa) Message = _reflection.GeneratedProtocolMessageType('Message', (_message.Message,), { 'DESCRIPTOR' : _MESSAGE, '__module__' : 'config_pb2' # @@protoc_insertion_point(class_scope:org.hypertrace.agent.config.Message) }) _sym_db.RegisterMessage(Message) DataCapture = _reflection.GeneratedProtocolMessageType('DataCapture', (_message.Message,), { 'DESCRIPTOR' : _DATACAPTURE, '__module__' : 'config_pb2' # @@protoc_insertion_point(class_scope:org.hypertrace.agent.config.DataCapture) }) _sym_db.RegisterMessage(DataCapture) JavaAgent = _reflection.GeneratedProtocolMessageType('JavaAgent', (_message.Message,), { 'DESCRIPTOR' : _JAVAAGENT, '__module__' : 'config_pb2' # @@protoc_insertion_point(class_scope:org.hypertrace.agent.config.JavaAgent) }) _sym_db.RegisterMessage(JavaAgent) DESCRIPTOR._options = None _AGENTCONFIG_RESOURCEATTRIBUTESENTRY._options = None # @@protoc_insertion_point(module_scope)
45.844991
2,540
0.772926
acefb2345f76b689d9ef3e6bf901b36e3cc20bb3
6,857
py
Python
playwright/_impl/_transport.py
wangkev/playwright-python
b62c1dbd52364c3aa4ba001bad8f94ea43ad1fc5
[ "Apache-2.0" ]
null
null
null
playwright/_impl/_transport.py
wangkev/playwright-python
b62c1dbd52364c3aa4ba001bad8f94ea43ad1fc5
[ "Apache-2.0" ]
null
null
null
playwright/_impl/_transport.py
wangkev/playwright-python
b62c1dbd52364c3aa4ba001bad8f94ea43ad1fc5
[ "Apache-2.0" ]
null
null
null
# Copyright (c) Microsoft Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import io import json import os import sys from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Dict, Optional import websockets from pyee import AsyncIOEventEmitter from playwright._impl._api_types import Error # Sourced from: https://github.com/pytest-dev/pytest/blob/da01ee0a4bb0af780167ecd228ab3ad249511302/src/_pytest/faulthandler.py#L69-L77 def _get_stderr_fileno() -> Optional[int]: try: return sys.stderr.fileno() except (AttributeError, io.UnsupportedOperation): # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file. # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors # This is potentially dangerous, but the best we can do. if not hasattr(sys, "__stderr__") or not sys.__stderr__: return None return sys.__stderr__.fileno() class Transport(ABC): def __init__(self, loop: asyncio.AbstractEventLoop) -> None: self._loop = loop self.on_message = lambda _: None self.on_error_future: asyncio.Future = loop.create_future() @abstractmethod def request_stop(self) -> None: pass def dispose(self) -> None: pass @abstractmethod async def wait_until_stopped(self) -> None: pass @abstractmethod async def run(self) -> None: pass @abstractmethod def send(self, message: Dict) -> None: pass def serialize_message(self, message: Dict) -> bytes: msg = json.dumps(message) if "DEBUGP" in os.environ: # pragma: no cover print("\x1b[32mSEND>\x1b[0m", json.dumps(message, indent=2)) return msg.encode() def deserialize_message(self, data: bytes) -> Any: obj = json.loads(data) if "DEBUGP" in os.environ: # pragma: no cover print("\x1b[33mRECV>\x1b[0m", json.dumps(obj, indent=2)) return obj class PipeTransport(Transport): def __init__( self, loop: asyncio.AbstractEventLoop, driver_executable: Path ) -> None: super().__init__(loop) self._stopped = False self._driver_executable = driver_executable def request_stop(self) -> None: self._stopped = True self._output.close() async def wait_until_stopped(self) -> None: await self._stopped_future await self._proc.wait() async def run(self) -> None: self._stopped_future: asyncio.Future = asyncio.Future() try: self._proc = proc = await asyncio.create_subprocess_exec( str(self._driver_executable), "run-driver", stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, stderr=_get_stderr_fileno(), limit=32768, ) except Exception as exc: self.on_error_future.set_exception(exc) return assert proc.stdout assert proc.stdin self._output = proc.stdin while not self._stopped: try: buffer = await proc.stdout.readexactly(4) length = int.from_bytes(buffer, byteorder="little", signed=False) buffer = bytes(0) while length: to_read = min(length, 32768) data = await proc.stdout.readexactly(to_read) length -= to_read if len(buffer): buffer = buffer + data else: buffer = data obj = self.deserialize_message(buffer) self.on_message(obj) except asyncio.IncompleteReadError: break await asyncio.sleep(0) self._stopped_future.set_result(None) def send(self, message: Dict) -> None: data = self.serialize_message(message) self._output.write( len(data).to_bytes(4, byteorder="little", signed=False) + data ) class WebSocketTransport(AsyncIOEventEmitter, Transport): def __init__( self, loop: asyncio.AbstractEventLoop, ws_endpoint: str, headers: Dict[str, str] = None, slow_mo: float = None, ) -> None: super().__init__(loop) Transport.__init__(self, loop) self._stopped = False self.ws_endpoint = ws_endpoint self.headers = headers self.slow_mo = slow_mo def request_stop(self) -> None: self._stopped = True self._loop.create_task(self._connection.close()) def dispose(self) -> None: self.on_error_future.cancel() async def wait_until_stopped(self) -> None: await self._connection.wait_closed() async def run(self) -> None: try: self._connection = await websockets.connect( self.ws_endpoint, extra_headers=self.headers ) except Exception as exc: self.on_error_future.set_exception(Error(f"websocket.connect: {str(exc)}")) return while not self._stopped: try: message = await self._connection.recv() if self.slow_mo is not None: await asyncio.sleep(self.slow_mo / 1000) if self._stopped: self.on_error_future.set_exception( Error("Playwright connection closed") ) break obj = self.deserialize_message(message) self.on_message(obj) except websockets.exceptions.ConnectionClosed: if not self._stopped: self.emit("close") self.on_error_future.set_exception( Error("Playwright connection closed") ) break except Exception as exc: self.on_error_future.set_exception(exc) break def send(self, message: Dict) -> None: if self._stopped or self._connection.closed: raise Error("Playwright connection closed") data = self.serialize_message(message) self._loop.create_task(self._connection.send(data))
32.808612
134
0.606679
acefb2bb2452a46e20719248ee0d28b5cbd5184c
12,184
py
Python
submissions/Thompson/mygames.py
dysomni/aima-python
c67104e50007ec5ac2a9aa37f0cb972cb6315528
[ "MIT" ]
null
null
null
submissions/Thompson/mygames.py
dysomni/aima-python
c67104e50007ec5ac2a9aa37f0cb972cb6315528
[ "MIT" ]
null
null
null
submissions/Thompson/mygames.py
dysomni/aima-python
c67104e50007ec5ac2a9aa37f0cb972cb6315528
[ "MIT" ]
1
2018-08-23T19:27:23.000Z
2018-08-23T19:27:23.000Z
from collections import namedtuple from games import (Game) from queue import PriorityQueue from copy import deepcopy import random class GameState: def __init__(self, to_move, board, label=None, depth=8): self.to_move = to_move self.board = board self.label = label self.maxDepth = depth def __str__(self): if self.label == None: return super(GameState, self).__str__() return self.label class FlagrantCopy(Game): """A flagrant copy of TicTacToe, from game.py It's simplified, so that moves and utility are calculated as needed Play TicTacToe on an h x v board, with Max (first player) playing 'X'. A state has the player to move and a board, in the form of a dict of {(x, y): Player} entries, where Player is 'X' or 'O'.""" def __init__(self, h=3, v=3, k=3): self.h = h self.v = v self.k = k self.initial = GameState(to_move='X', board={}) def actions(self, state): try: return state.moves except: pass "Legal moves are any square not yet taken." moves = [] for x in range(1, self.h + 1): for y in range(1, self.v + 1): if (x,y) not in state.board.keys(): moves.append((x,y)) state.moves = moves return moves # defines the order of play def opponent(self, player): if player == 'X': return 'O' if player == 'O': return 'X' return None def result(self, state, move): if move not in self.actions(state): return state # Illegal move has no effect board = state.board.copy() player = state.to_move board[move] = player next_mover = self.opponent(player) return GameState(to_move=next_mover, board=board) def utility(self, state, player): "Return the value to player; 1 for win, -1 for loss, 0 otherwise." try: return state.utility if player == 'X' else -state.utility except: pass board = state.board util = self.check_win(board, 'X') if util == 0: util = -self.check_win(board, 'O') state.utility = util return util if player == 'X' else -util # Did I win? def check_win(self, board, player): # check rows for y in range(1, self.v + 1): if self.k_in_row(board, (1,y), player, (1,0)): return 1 # check columns for x in range(1, self.h + 1): if self.k_in_row(board, (x,1), player, (0,1)): return 1 # check \ diagonal if self.k_in_row(board, (1,1), player, (1,1)): return 1 # check / diagonal if self.k_in_row(board, (3,1), player, (-1,1)): return 1 return 0 # does player have K in a row? return 1 if so, 0 if not def k_in_row(self, board, start, player, direction): "Return true if there is a line through start on board for player." (delta_x, delta_y) = direction x, y = start n = 0 # n is number of moves in row while board.get((x, y)) == player: n += 1 x, y = x + delta_x, y + delta_y x, y = start while board.get((x, y)) == player: n += 1 x, y = x - delta_x, y - delta_y n -= 1 # Because we counted start itself twice return n >= self.k def terminal_test(self, state): "A state is terminal if it is won or there are no empty squares." return self.utility(state, 'X') != 0 or len(self.actions(state)) == 0 def display(self, state): board = state.board for x in range(1, self.h + 1): for y in range(1, self.v + 1): print(board.get((x, y), '.'), end=' ') print() myGame = FlagrantCopy() won = GameState( to_move = 'O', board = {(1,1): 'X', (1,2): 'X', (1,3): 'X', (2,1): 'O', (2,2): 'O', }, label = 'won' ) winin1 = GameState( to_move = 'X', board = {(1,1): 'X', (1,2): 'X', (2,1): 'O', (2,2): 'O', }, label = 'winin1' ) losein1 = GameState( to_move = 'O', board = {(1,1): 'X', (1,2): 'X', (2,1): 'O', (2,2): 'O', (3,1): 'X', }, label = 'losein1' ) winin3 = GameState( to_move = 'X', board = {(1,1): 'X', (1,2): 'O', (2,1): 'X', (3,1): 'O', }, label = 'winin3' ) losein3 = GameState( to_move = 'O', board = {(1,1): 'X', (2,1): 'X', (3,1): 'O', (1,2): 'X', (1,2): 'O', }, label = 'losein3' ) winin5 = GameState( to_move = 'X', board = {(1,1): 'X', (1,2): 'O', (2,1): 'X', }, label = 'winin5' ) lost = GameState( to_move = 'X', board = {(1,1): 'X', (1,2): 'X', (2,1): 'O', (2,2): 'O', (2,3): 'O', (3,1): 'X' }, label = 'lost' ) class DiceState: # one way to define the state of a minimal game. def __init__(self, player): # add parameters as needed. self.to_move = player self.label = str(id(self)) # change this to something easier to read # add code and self.variables as needed. def __str__(self): # use this exact signature return self.label # class TemplateAction: # ''' # It is not necessary to define an action. # Start with actions as simple as a label (e.g., 'Down') # or a pair of coordinates (e.g., (1,2)). # # Don't un-comment this until you already have a working game, # and want to play smarter. # ''' # def __lt__(self, other): # use this exact signature # # return True when self is a better move than other. # return False # Rules: Pitcher will roll the dice. The algorithm easily explains the rules. # Instead of 4 bases there are 3, First, Second, and Home. Players will pick roll # at start. The Pitcher wins if he gets 5 outs and the Batter wins if he scores 5 points. # (may have to tweak these numbers so that it's fair.) class DiceBall(Game): def dice(): return (random.randint(1,6)) def batter(): return (random.randint(1,6)) print('Let\'s play a Game of Diceball!') print('The rules are as follows:\n') print('There is a pitcher and a hitter. The pitcher will roll and it will be decided whether\n' 'it is a strike, ball, or if the batter gets to swing. If the batter gets to swing then\n' 'the batter will roll and the roll will determine whether they strike or if they\n' 'hit the ball. The first to 5 points wins!\n') player1 = 'Pitcher' player2 = 'Batter' player1Score = 0 player2Score = 0 strike = 0 ball = 0 foul = 0 while player1Score != 5 and player2Score != 5: roll = dice() print('Pitcher throws ' + str(roll)) if roll == 1 or roll == 2: print('Pitcher throws a strike!') strike += 1 print("Strikes: " + str(strike)) print("Balls: " + str(ball)) print("Pitcher Score: " + str(player1Score)) print("Batter Score: " + str(player2Score)) if strike == 3: print('\nPitcher struck a batter out!') player1Score += 1 print("Strikes: " + str(strike)) print("Balls: " + str(ball)) print("Pitcher Score: " + str(player1Score)) print("Batter Score: " + str(player2Score)) elif roll == 4 or roll == 3: print('Pitcher throws a ball!') ball += 1 print("Strikes: " + str(strike)) print("Balls: " + str(ball)) print("Pitcher Score: " + str(player1Score)) print("Batter Score: " + str(player2Score)) if ball == 4: print('\nPitcher walks the Batter') player2Score += 1 print("Strikes: " + str(strike)) print("Balls: " + str(ball)) print("Pitcher Score: " + str(player1Score)) print("Batter Score: " + str(player2Score)) elif roll == 5 or roll == 6: print('The Batter gets to swing!') swing = batter() if swing == 1 or swing == 2: print('The batter hits the ball and scores!') player2Score += 1 print("Pitcher Score: " + str(player1Score)) print("Batter Score: " + str(player2Score)) strike = 0 ball = 0 elif swing == 3 or swing == 4: print('The batter hit a foul ball!') if strike != 3: strike += 1 print("Strikes: " + str(strike)) print("Balls: " + str(ball)) print("Pitcher Score: " + str(player1Score)) print("Batter Score: " + str(player2Score)) elif strike == 3: strike -= 1 print("Strikes: " + str(strike)) print("Balls: " + str(ball)) print("Pitcher Score: " + str(player1Score)) print("Batter Score: " + str(player2Score)) elif swing == 5 or swing == 6: print('The batter struck out!') player1Score +=1 print("Strikes: " + str(strike)) print("Balls: " + str(ball)) print("Pitcher Score: " + str(player1Score)) print("Batter Score: " + str(player2Score)) print('\n') if player1Score == 5: print('The Pitcher Wins!!!!') elif player2Score == 5: print('The Batter wins!!!!') #Commit ''' This is a minimal Game definition, the shortest implementation I could run without errors. ''' def __init__(self, initial): # add parameters if needed. self.initial = initial # add code and self.variables if needed. def actions(self, state): # use this exact signature. acts = [] # append all moves, which are legal in this state, # to the list of acts. return acts def result(self, state, move): # use this exact signature. newState = deepcopy(state) # use the move to modify the newState return newState def terminal_test(self, state): # use this exact signature. # return True only when the state of the game is over. return True def utility(self, state, player): # use this exact signature. ''' return: >0 if the player is winning, <0 if the player is losing, 0 if the state is a tie. ''' return 0 def display(self, state): # use this exact signature. # pretty-print the game state, using ASCII art, # to help a human player understand his options. print(state) tg = DiceBall(DiceState('A')) # this is the game we play interactively. myGames = { DiceBall:[ ] # myGame: [ # won, # winin1, losein1, winin3, losein3, winin5, # lost, # ], # # tg: [ # # these are the states we tabulate when we test AB(1), AB(2), etc. # TemplateState('B'), # TemplateState('C'), # ] } # ~~~~~~~~~~~~~~~~~~~~~~~~~~Algorithm~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Rules: Pitcher will roll the dice. The algorithm easily explains the rules. # Instead of 4 bases there are 3, First, Second, and Home. Players will pick roll # at start. The Pitcher wins if he gets 5 outs and the Batter wins if he scores 5 points. # (may have to tweak these numbers so that it's fair.) # # pitcher rolls # if(die=1 || die=2) # strike(); # elif(die=4 || die=3) # ball(); # else # batterswing(); # batter rolls # if(die=1||die=2) # strike(); # elif(die=4) # firstBase(); # elif(die=5) # secondBase(); # else # homeRun();
31.646753
100
0.519616
acefb2ed09365d5ac13fdafb04e8d2f6592316d2
2,947
py
Python
addons/sketchfab-plugin-1-4-2/blender/com/gltf2_blender_extras.py
trisadmeslek/V-Sekai-Blender-tools
0d8747387c58584b50c69c61ba50a881319114f8
[ "MIT" ]
null
null
null
addons/sketchfab-plugin-1-4-2/blender/com/gltf2_blender_extras.py
trisadmeslek/V-Sekai-Blender-tools
0d8747387c58584b50c69c61ba50a881319114f8
[ "MIT" ]
null
null
null
addons/sketchfab-plugin-1-4-2/blender/com/gltf2_blender_extras.py
trisadmeslek/V-Sekai-Blender-tools
0d8747387c58584b50c69c61ba50a881319114f8
[ "MIT" ]
null
null
null
# Copyright 2018-2021 The glTF-Blender-IO authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import bpy from .gltf2_blender_json import is_json_convertible # Custom properties, which are in most cases present and should not be imported/exported. BLACK_LIST = ['cycles', 'cycles_visibility', 'cycles_curves', '_RNA_UI'] def generate_extras(blender_element): """Filter and create a custom property, which is stored in the glTF extra field.""" if not blender_element: return None extras = {} for custom_property in blender_element.keys(): if custom_property in BLACK_LIST: continue value = __to_json_compatible(blender_element[custom_property]) if value is not None: extras[custom_property] = value if not extras: return None return extras def __to_json_compatible(value): """Make a value (usually a custom property) compatible with json""" if isinstance(value, bpy.types.ID): return value elif isinstance(value, str): return value elif isinstance(value, (int, float)): return value # for list classes elif isinstance(value, list): value = list(value) # make sure contents are json-compatible too for index in range(len(value)): value[index] = __to_json_compatible(value[index]) return value # for IDPropertyArray classes elif hasattr(value, "to_list"): value = value.to_list() return value elif hasattr(value, "to_dict"): value = value.to_dict() if is_json_convertible(value): return value return None def set_extras(blender_element, extras, exclude=[]): """Copy extras onto a Blender object.""" if not extras or not isinstance(extras, dict): return for custom_property, value in extras.items(): if custom_property in BLACK_LIST: continue if custom_property in exclude: continue try: blender_element[custom_property] = value except Exception: # Try to convert to string try: blender_element[custom_property] = str(value) except Exception: print('Error setting property %s to value of type %s' % (custom_property, type(value)))
30.071429
104
0.643366
acefb3bbf46994e467bb0e3fb5b45a1e2ee57951
7,853
py
Python
sdk/python/pulumi_google_native/compute/v1/get_target_vpn_gateway.py
AaronFriel/pulumi-google-native
75d1cda425e33d4610348972cd70bddf35f1770d
[ "Apache-2.0" ]
44
2021-04-18T23:00:48.000Z
2022-02-14T17:43:15.000Z
sdk/python/pulumi_google_native/compute/v1/get_target_vpn_gateway.py
AaronFriel/pulumi-google-native
75d1cda425e33d4610348972cd70bddf35f1770d
[ "Apache-2.0" ]
354
2021-04-16T16:48:39.000Z
2022-03-31T17:16:39.000Z
sdk/python/pulumi_google_native/compute/v1/get_target_vpn_gateway.py
AaronFriel/pulumi-google-native
75d1cda425e33d4610348972cd70bddf35f1770d
[ "Apache-2.0" ]
8
2021-04-24T17:46:51.000Z
2022-01-05T10:40:21.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities __all__ = [ 'GetTargetVpnGatewayResult', 'AwaitableGetTargetVpnGatewayResult', 'get_target_vpn_gateway', 'get_target_vpn_gateway_output', ] @pulumi.output_type class GetTargetVpnGatewayResult: def __init__(__self__, creation_timestamp=None, description=None, forwarding_rules=None, kind=None, name=None, network=None, region=None, self_link=None, status=None, tunnels=None): if creation_timestamp and not isinstance(creation_timestamp, str): raise TypeError("Expected argument 'creation_timestamp' to be a str") pulumi.set(__self__, "creation_timestamp", creation_timestamp) if description and not isinstance(description, str): raise TypeError("Expected argument 'description' to be a str") pulumi.set(__self__, "description", description) if forwarding_rules and not isinstance(forwarding_rules, list): raise TypeError("Expected argument 'forwarding_rules' to be a list") pulumi.set(__self__, "forwarding_rules", forwarding_rules) if kind and not isinstance(kind, str): raise TypeError("Expected argument 'kind' to be a str") pulumi.set(__self__, "kind", kind) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if network and not isinstance(network, str): raise TypeError("Expected argument 'network' to be a str") pulumi.set(__self__, "network", network) if region and not isinstance(region, str): raise TypeError("Expected argument 'region' to be a str") pulumi.set(__self__, "region", region) if self_link and not isinstance(self_link, str): raise TypeError("Expected argument 'self_link' to be a str") pulumi.set(__self__, "self_link", self_link) if status and not isinstance(status, str): raise TypeError("Expected argument 'status' to be a str") pulumi.set(__self__, "status", status) if tunnels and not isinstance(tunnels, list): raise TypeError("Expected argument 'tunnels' to be a list") pulumi.set(__self__, "tunnels", tunnels) @property @pulumi.getter(name="creationTimestamp") def creation_timestamp(self) -> str: """ Creation timestamp in RFC3339 text format. """ return pulumi.get(self, "creation_timestamp") @property @pulumi.getter def description(self) -> str: """ An optional description of this resource. Provide this property when you create the resource. """ return pulumi.get(self, "description") @property @pulumi.getter(name="forwardingRules") def forwarding_rules(self) -> Sequence[str]: """ A list of URLs to the ForwardingRule resources. ForwardingRules are created using compute.forwardingRules.insert and associated with a VPN gateway. """ return pulumi.get(self, "forwarding_rules") @property @pulumi.getter def kind(self) -> str: """ Type of resource. Always compute#targetVpnGateway for target VPN gateways. """ return pulumi.get(self, "kind") @property @pulumi.getter def name(self) -> str: """ Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "name") @property @pulumi.getter def network(self) -> str: """ URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created. """ return pulumi.get(self, "network") @property @pulumi.getter def region(self) -> str: """ URL of the region where the target VPN gateway resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body. """ return pulumi.get(self, "region") @property @pulumi.getter(name="selfLink") def self_link(self) -> str: """ Server-defined URL for the resource. """ return pulumi.get(self, "self_link") @property @pulumi.getter def status(self) -> str: """ The status of the VPN gateway, which can be one of the following: CREATING, READY, FAILED, or DELETING. """ return pulumi.get(self, "status") @property @pulumi.getter def tunnels(self) -> Sequence[str]: """ A list of URLs to VpnTunnel resources. VpnTunnels are created using the compute.vpntunnels.insert method and associated with a VPN gateway. """ return pulumi.get(self, "tunnels") class AwaitableGetTargetVpnGatewayResult(GetTargetVpnGatewayResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetTargetVpnGatewayResult( creation_timestamp=self.creation_timestamp, description=self.description, forwarding_rules=self.forwarding_rules, kind=self.kind, name=self.name, network=self.network, region=self.region, self_link=self.self_link, status=self.status, tunnels=self.tunnels) def get_target_vpn_gateway(project: Optional[str] = None, region: Optional[str] = None, target_vpn_gateway: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTargetVpnGatewayResult: """ Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request. """ __args__ = dict() __args__['project'] = project __args__['region'] = region __args__['targetVpnGateway'] = target_vpn_gateway if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('google-native:compute/v1:getTargetVpnGateway', __args__, opts=opts, typ=GetTargetVpnGatewayResult).value return AwaitableGetTargetVpnGatewayResult( creation_timestamp=__ret__.creation_timestamp, description=__ret__.description, forwarding_rules=__ret__.forwarding_rules, kind=__ret__.kind, name=__ret__.name, network=__ret__.network, region=__ret__.region, self_link=__ret__.self_link, status=__ret__.status, tunnels=__ret__.tunnels) @_utilities.lift_output_func(get_target_vpn_gateway) def get_target_vpn_gateway_output(project: Optional[pulumi.Input[Optional[str]]] = None, region: Optional[pulumi.Input[str]] = None, target_vpn_gateway: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTargetVpnGatewayResult]: """ Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request. """ ...
41.331579
444
0.655928
acefb3c8ff633bc33cde33af57eddc40a956c220
1,901
py
Python
synapse/tests/test_tools_pullfile.py
cmd-not-found/synapse
a768c441623d369c0c0ef59bc0d1278cd63bb8de
[ "Apache-2.0" ]
null
null
null
synapse/tests/test_tools_pullfile.py
cmd-not-found/synapse
a768c441623d369c0c0ef59bc0d1278cd63bb8de
[ "Apache-2.0" ]
null
null
null
synapse/tests/test_tools_pullfile.py
cmd-not-found/synapse
a768c441623d369c0c0ef59bc0d1278cd63bb8de
[ "Apache-2.0" ]
null
null
null
import os import asyncio import hashlib import pathlib import synapse.tests.utils as s_t_utils import synapse.tools.pullfile as s_pullfile class TestPullFile(s_t_utils.SynTest): async def test_pullfile(self): async with self.getTestAxon() as axon: axonurl = axon.getLocalUrl() testhash = hashlib.sha256(b'test').hexdigest() visihash = hashlib.sha256(b'visi').hexdigest() nonehash = hashlib.sha256(b'none').hexdigest() testbash = hashlib.sha256(b'test').digest() visibash = hashlib.sha256(b'visi').digest() self.eq(((4, visibash), (4, testbash)), await axon.puts([b'visi', b'test'])) def pullfile(): with self.getTestDir() as wdir: outp = self.getTestOutp() self.eq(0, s_pullfile.main(['-a', axonurl, '-o', wdir, '-l', testhash, '-l', nonehash], outp)) oldcwd = os.getcwd() os.chdir(wdir) self.eq(0, s_pullfile.main(['-a', axonurl, '-l', visihash], outp)) os.chdir(oldcwd) with open(pathlib.Path(wdir, testhash), 'rb') as fd: self.eq(b'test', fd.read()) with open(pathlib.Path(wdir, visihash), 'rb') as fd: self.eq(b'visi', fd.read()) self.true(outp.expect(f'{nonehash} not in axon store')) self.true(outp.expect(f'Fetching {testhash} to file')) self.true(outp.expect(f'Fetching {visihash} to file')) loop = asyncio.get_running_loop() await loop.run_in_executor(None, pullfile)
35.203704
88
0.48869
acefb4236810a0cb498f63e782e895e0da6926d6
3,419
py
Python
tests/unit/responses/test_activation_response.py
jaswilli/globus-sdk-python
35579f5520150a28ee4e375802d7c863ed3c21ad
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
tests/unit/responses/test_activation_response.py
jaswilli/globus-sdk-python
35579f5520150a28ee4e375802d7c863ed3c21ad
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
tests/unit/responses/test_activation_response.py
jaswilli/globus-sdk-python
35579f5520150a28ee4e375802d7c863ed3c21ad
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
import json import time import pytest import requests import six from globus_sdk.transfer.response import ActivationRequirementsResponse def make_response( activated=True, expires_in=0, auto_activation_supported=True, oauth_server=None, DATA=None, ): """ Helper for making ActivationRequirementsResponses with known fields """ DATA = DATA or [] data = { "activated": activated, "expires_in": expires_in, "oauth_server": oauth_server, "DATA": DATA, "auto_activation_supported": auto_activation_supported, } response = requests.Response() response.headers["Content-Type"] = "application/json" response._content = six.b(json.dumps(data)) return ActivationRequirementsResponse(response) def test_expires_at(): """ Confirms expires_at is set properly by __init__ """ for seconds in [0, 10, 100, 1000, -10]: response = make_response(expires_in=seconds) expected = int(time.time()) + seconds # make sure within a 1 second range of expected value assert response.expires_at in (expected - 1, expected, expected + 1) # -1 marks no expiration response = make_response(expires_in=-1) assert response.expires_at is None @pytest.mark.parametrize("value", (True, False)) def test_supports_auto_activation(value): """ Gets supports_auto_activation from made responses, validates results """ response = make_response(auto_activation_supported=value) assert response.supports_auto_activation == value def test_supports_web_activation(): """ Gets supports_web_activation from made responses, validates results """ # true if auto_activatable, response = make_response(auto_activation_supported=True) assert response.supports_web_activation # has an oauth server, response = make_response(auto_activation_supported=False, oauth_server="server") assert response.supports_web_activation # or one of the other documents is myproxy or delegate_myproxy, response = make_response( auto_activation_supported=False, DATA=[{"type": "myproxy"}] ) assert response.supports_web_activation response = make_response( auto_activation_supported=False, DATA=[{"type": "delegate_myproxy"}] ) assert response.supports_web_activation # otherwise false response = make_response(auto_activation_supported=False) assert not response.supports_web_activation def test_active_until(): """ Calls active_until on made responses, validates results """ # not active at all response = make_response(activated=False) assert not response.active_until(0) # always active response = make_response(expires_in=-1) assert response.active_until(0) response = make_response(expires_in=10) # relative time assert response.active_until(5) assert not response.active_until(15) # absolute time now = int(time.time()) assert response.active_until(now + 5, relative_time=False) assert not response.active_until(now + 15, relative_time=False) def test_always_activated(): """ Gets always_activated property from made responses, validates results """ response = make_response(expires_in=-1) assert response.always_activated response = make_response(expires_in=0) assert not response.always_activated
29.730435
84
0.716876
acefb4ab14a2e5022543f770a0f9e9dc25121d19
11,017
py
Python
src/k8s-configuration/azext_k8s_configuration/validators.py
calvin197/azure-cli-extensions
65fac44b8dd0e1523c3866be5ec92eeb9b3a54f5
[ "MIT" ]
1
2022-02-18T00:16:47.000Z
2022-02-18T00:16:47.000Z
src/k8s-configuration/azext_k8s_configuration/validators.py
calvin197/azure-cli-extensions
65fac44b8dd0e1523c3866be5ec92eeb9b3a54f5
[ "MIT" ]
null
null
null
src/k8s-configuration/azext_k8s_configuration/validators.py
calvin197/azure-cli-extensions
65fac44b8dd0e1523c3866be5ec92eeb9b3a54f5
[ "MIT" ]
null
null
null
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import re import io from urllib.parse import urlparse from knack.log import get_logger from azure.cli.core.azclierror import ( InvalidArgumentValueError, RequiredArgumentMissingError, MutuallyExclusiveArgumentError, ) from paramiko.hostkeys import HostKeyEntry from paramiko.ed25519key import Ed25519Key from paramiko.ssh_exception import SSHException from Crypto.PublicKey import RSA, ECC, DSA from .utils import from_base64 from ._client_factory import resource_providers_client from . import consts logger = get_logger(__name__) def validate_namespace(namespace): if namespace.namespace: __validate_k8s_name(namespace.namespace, "--namespace", 253) def validate_configuration_name(namespace): __validate_k8s_name(namespace.name, "--name", 63) def validate_fluxconfig_name(namespace): __validate_k8s_cr_name(namespace.name, "--name", 63) def validate_operator_instance_name(namespace): if namespace.operator_instance_name: __validate_k8s_name( namespace.operator_instance_name, "--operator-instance-name", 23 ) def validate_operator_namespace(namespace): if namespace.operator_namespace: __validate_k8s_name(namespace.operator_namespace, "--operator-namespace", 23) def validate_kustomization(values): required_keys = consts.REQUIRED_KUSTOMIZATION_KEYS for item in values: key, value = item.split("=", 1) if key == "name": __validate_k8s_cr_name(value, key, 63) elif key in consts.SYNC_INTERVAL_KEYS: validate_duration("sync-interval", value) elif key in consts.TIMEOUT_KEYS: validate_duration("timeout", value) elif key in consts.RETRY_INTERVAL_KEYS: validate_duration("retry-interval", value) if key in required_keys: required_keys.remove(key) if required_keys: raise RequiredArgumentMissingError( consts.KUSTOMIZATION_REQUIRED_VALUES_MISSING_ERROR.format(required_keys), consts.KUSTOMIZATION_REQUIRED_VALUES_MISSING_HELP, ) def validate_repository_ref(repository_ref): num_set_args = 0 if repository_ref: for elem in [ repository_ref.branch, repository_ref.tag, repository_ref.semver, repository_ref.commit, ]: if elem: num_set_args += 1 if num_set_args == 0: raise RequiredArgumentMissingError( consts.REPOSITORY_REF_REQUIRED_VALUES_MISSING_ERROR, consts.REPOSITORY_REF_REQUIRED_VALUES_MISSING_HELP, ) if num_set_args == 1: return raise MutuallyExclusiveArgumentError( consts.REPOSITORY_REF_TOO_MANY_VALUES_ERROR, consts.REPOSITORY_REF_TOO_MANY_VALUES_HELP, ) def validate_duration(arg_name: str, duration: str): if not duration: return regex = re.compile(r"((?P<hours>\d+?)h)?((?P<minutes>\d+?)m)?((?P<seconds>\d+?)s)?") parts = regex.match(duration) if duration and not parts: raise InvalidArgumentValueError( consts.INVALID_DURATION_ERROR.format(arg_name), consts.INVALID_DURATION_HELP ) parts = parts.groupdict() if not any(parts.values()): raise InvalidArgumentValueError( consts.INVALID_DURATION_ERROR.format(arg_name), consts.INVALID_DURATION_HELP ) def validate_git_url(url: str): if not re.match(consts.VALID_GIT_URL_REGEX, url): raise InvalidArgumentValueError( consts.INVALID_URL_ERROR, consts.INVALID_URL_HELP ) def validate_bucket_url(url: str): if not re.match(consts.VALID_BUCKET_URL_REGEX, url): raise InvalidArgumentValueError( consts.INVALID_URL_ERROR, consts.INVALID_URL_HELP ) # Helper def __validate_k8s_name(param_value, param_name, max_len): if len(param_value) > max_len: raise InvalidArgumentValueError( consts.INVALID_KUBERNETES_NAME_LENGTH_ERROR.format(param_name), consts.INVALID_KUBERNETES_NAME_LENGTH_HELP.format(param_name, max_len), ) if not re.match(consts.VALID_KUBERNETES_DNS_NAME_REGEX, param_value): if param_value[0] == "-" or param_value[-1] == "-": raise InvalidArgumentValueError( consts.INVALID_KUBERNETES_NAME_HYPHEN_ERROR.format(param_name), consts.INVALID_KUBERNETES_NAME_HYPHEN_HELP.format(param_name), ) raise InvalidArgumentValueError( consts.INVALID_KUBERNETES_DNS_NAME_ERROR.format(param_name), consts.INVALID_KUBERNETES_DNS_NAME_HELP.format(param_name), ) def __validate_k8s_cr_name(param_value, param_name, max_len): if len(param_value) > max_len: raise InvalidArgumentValueError( consts.INVALID_KUBERNETES_NAME_LENGTH_ERROR.format(param_name), consts.INVALID_KUBERNETES_NAME_LENGTH_HELP.format(param_name, max_len), ) if not re.match(consts.VALID_KUBERNETES_DNS_SUBDOMAIN_NAME_REGEX, param_value): if param_value[0] == "-" or param_value[-1] == "-": raise InvalidArgumentValueError( consts.INVALID_KUBERNETES_NAME_HYPHEN_ERROR.format(param_name), consts.INVALID_KUBERNETES_NAME_HYPHEN_HELP.format(param_name), ) if param_value[0] == "." or param_value[-1] == ".": raise InvalidArgumentValueError( consts.INVALID_KUBERNETES_NAME_PERIOD_ERROR.format(param_name), consts.INVALID_KUBERNETES_NAME_PERIOD_HELP.format(param_name), ) raise InvalidArgumentValueError( consts.INVALID_KUBERNETES_DNS_SUBDOMAIN_NAME_ERROR.format(param_name), consts.INVALID_KUBERNETES_DNS_SUBDOMAIN_NAME_ERROR.format(param_name), ) def validate_url_with_params( url: str, ssh_private_key, ssh_private_key_file, known_hosts, known_hosts_file, https_user, https_key, ): scheme = urlparse(url).scheme if scheme.lower() in ("http", "https"): if ssh_private_key or ssh_private_key_file: raise MutuallyExclusiveArgumentError( consts.SSH_PRIVATE_KEY_WITH_HTTP_URL_ERROR, consts.SSH_PRIVATE_KEY_WITH_HTTP_URL_HELP, ) if known_hosts or known_hosts_file: raise MutuallyExclusiveArgumentError( consts.KNOWN_HOSTS_WITH_HTTP_URL_ERROR, consts.KNOWN_HOSTS_WITH_HTTP_URL_HELP, ) if not (https_user and https_key) and scheme == "https": logger.warning(consts.HTTP_URL_NO_AUTH_WARNING) else: if https_user or https_key: raise MutuallyExclusiveArgumentError( consts.HTTPS_AUTH_WITH_SSH_URL_ERROR, consts.HTTPS_AUTH_WITH_SSH_URL_HELP, ) if https_user and https_key: return # If we just provide one or the other raise an error if https_user or https_key: raise RequiredArgumentMissingError( consts.HTTPS_USER_KEY_MATCH_ERROR, consts.HTTPS_USER_KEY_MATCH_HELP ) def validate_known_hosts(knownhost_data): try: knownhost_str = from_base64(knownhost_data).decode("utf-8") except Exception as ex: raise InvalidArgumentValueError( consts.KNOWN_HOSTS_BASE64_ENCODING_ERROR, consts.KNOWN_HOSTS_BASE64_ENCODING_HELP, ) from ex lines = knownhost_str.split("\n") for line in lines: line = line.strip(" ") line_len = len(line) if (line_len == 0) or (line[0] == "#"): continue try: host_key = HostKeyEntry.from_line(line) if not host_key: raise Exception("not enough fields found in known_hosts line") except Exception as ex: raise InvalidArgumentValueError( consts.KNOWN_HOSTS_FORMAT_ERROR, consts.KNOWN_HOSTS_FORMAT_HELP ) from ex def validate_private_key(ssh_private_key_data): try: RSA.import_key(from_base64(ssh_private_key_data)) return except ValueError: try: ECC.import_key(from_base64(ssh_private_key_data)) return except ValueError: try: DSA.import_key(from_base64(ssh_private_key_data)) return except ValueError: try: key_obj = io.StringIO( from_base64(ssh_private_key_data).decode("utf-8") ) Ed25519Key(file_obj=key_obj) return except SSHException as ex: raise InvalidArgumentValueError( consts.SSH_PRIVATE_KEY_ERROR, consts.SSH_PRIVATE_KEY_HELP ) from ex # pylint: disable=broad-except def validate_cc_registration(cmd): try: rp_client = resource_providers_client(cmd.cli_ctx) registration_state = rp_client.get( consts.CC_PROVIDER_NAMESPACE ).registration_state if registration_state.lower() != consts.REGISTERED.lower(): logger.warning( consts.CC_REGISTRATION_WARNING, consts.CC_PROVIDER_NAMESPACE, consts.CC_REGISTRATION_LINK, ) except Exception: logger.warning(consts.CC_REGISTRATION_ERROR, consts.CC_PROVIDER_NAMESPACE) def validate_scope_and_namespace(scope, release_namespace, target_namespace): if scope == "cluster": if target_namespace is not None: message = "When --scope is 'cluster', --target-namespace must not be given." raise MutuallyExclusiveArgumentError(message) else: if release_namespace is not None: message = ( "When --scope is 'namespace', --release-namespace must not be given." ) raise MutuallyExclusiveArgumentError(message) def validate_scope_after_customization(scope_obj): if ( scope_obj is not None and scope_obj.namespace is not None and scope_obj.namespace.target_namespace is None ): message = "When --scope is 'namespace', --target-namespace must be given." raise RequiredArgumentMissingError(message) def validate_version_and_auto_upgrade(version, auto_upgrade_minor_version): if version is not None: if auto_upgrade_minor_version: message = "To pin to specific version, auto-upgrade-minor-version must be set to 'false'." raise MutuallyExclusiveArgumentError(message) auto_upgrade_minor_version = False
35.885993
102
0.659163
acefb4c593f26bb7e2cb5a12869edc7ce5d13ab7
565
py
Python
efficientdet/hierarchy.py
nodiz/automl
617f9a4096cd7da21936d24cea12932bbfcd0c81
[ "Apache-2.0" ]
null
null
null
efficientdet/hierarchy.py
nodiz/automl
617f9a4096cd7da21936d24cea12932bbfcd0c81
[ "Apache-2.0" ]
null
null
null
efficientdet/hierarchy.py
nodiz/automl
617f9a4096cd7da21936d24cea12932bbfcd0c81
[ "Apache-2.0" ]
null
null
null
import pickle from os.path import join import numpy as np import tensorflow as tf leaves_hierachy = None def getHierarchy(basePath='config/'): global leaves_hierachy if leaves_hierachy is None: leaves_hierachy = pickle.load(open(join(basePath, "hierarchy_vects.pkl"), "rb")) return tf.constant(leaves_hierachy) hierarchyMatrix = getHierarchy() if __name__ == "__main__": leaves = getHierarchy() print(type(leaves)) print(leaves.shape) a = tf.Variable(np.random.rand(8, 80, 80, 9, 101)) b = a@leaves print(b.shape)
21.730769
88
0.700885
acefb5219abdeabb2acdb0380742ad58d1ffda06
7,223
py
Python
custom_components/wyzeapi/light.py
np-hacs/ha-wyzeapi
8abc6af59d36514008f696310b290a046d7c7a72
[ "Apache-2.0" ]
null
null
null
custom_components/wyzeapi/light.py
np-hacs/ha-wyzeapi
8abc6af59d36514008f696310b290a046d7c7a72
[ "Apache-2.0" ]
null
null
null
custom_components/wyzeapi/light.py
np-hacs/ha-wyzeapi
8abc6af59d36514008f696310b290a046d7c7a72
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python3 """Platform for light integration.""" import logging # Import the device class from the component that you want to support from datetime import timedelta from typing import Any, List import homeassistant.util.color as color_util from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_HS_COLOR, SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, SUPPORT_COLOR, LightEntity ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import ATTR_ATTRIBUTION from homeassistant.core import HomeAssistant from wyzeapy.base_client import AccessTokenError, Device, DeviceTypes from wyzeapy.client import Client from wyzeapy.types import PropertyIDs from .const import DOMAIN _LOGGER = logging.getLogger(__name__) ATTRIBUTION = "Data provided by Wyze" SCAN_INTERVAL = timedelta(seconds=30) async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities): _LOGGER.debug("""Creating new WyzeApi light component""") client: Client = hass.data[DOMAIN][config_entry.entry_id] def get_bulbs() -> List[Device]: try: return client.get_bulbs() except AccessTokenError as e: _LOGGER.warning(e) client.reauthenticate() return client.get_bulbs() lights = [WyzeLight(client, light) for light in await hass.async_add_executor_job(get_bulbs)] async_add_entities(lights, True) class WyzeLight(LightEntity): """Representation of a Wyze Bulb.""" _brightness: int _color_temp: int _color: str _on: bool _available: bool _just_updated = False def __init__(self, client: Client, device: Device): """Initialize a Wyze Bulb.""" self._device = device self._device_type = DeviceTypes(self._device.product_type) if self._device_type not in [ DeviceTypes.LIGHT, DeviceTypes.MESH_LIGHT ]: raise AttributeError("Device type not supported") self._client = client @property def device_info(self): return { "identifiers": { (DOMAIN, self._device.mac) }, "name": self.name, "manufacturer": "WyzeLabs", "model": self._device.product_model } @property def should_poll(self) -> bool: return True @staticmethod def translate(value, input_min, input_max, output_min, output_max): if value is None: return None # Figure out how 'wide' each range is left_span = input_max - input_min right_span = output_max - output_min # Convert the left range into a 0-1 range (float) value_scaled = float(value - input_min) / float(left_span) # Convert the 0-1 range into a value in the right range. return output_min + (value_scaled * right_span) def turn_on(self, **kwargs: Any) -> None: pids = [] if kwargs.get(ATTR_BRIGHTNESS) is not None: _LOGGER.debug("Setting brightness") self._brightness = self.translate(kwargs.get(ATTR_BRIGHTNESS), 1, 255, 1, 100) pids.append(self._client.create_pid_pair(PropertyIDs.BRIGHTNESS, str(int(self._brightness)))) if kwargs.get(ATTR_COLOR_TEMP) is not None: _LOGGER.debug("Setting color temp") self._color_temp = self.translate(kwargs.get(ATTR_COLOR_TEMP), 500, 140, 2700, 6500) pids.append(self._client.create_pid_pair(PropertyIDs.COLOR_TEMP, str(int(self._color_temp)))) if self._device_type is DeviceTypes.MESH_LIGHT and kwargs.get(ATTR_HS_COLOR) is not None: _LOGGER.debug("Setting color") self._color = color_util.color_rgb_to_hex(*color_util.color_hs_to_RGB(*kwargs.get(ATTR_HS_COLOR))) pids.append(self._client.create_pid_pair(PropertyIDs.COLOR, self._color)) _LOGGER.debug("Turning on light") try: self._client.turn_on(self._device, pids) except AccessTokenError: self._client.reauthenticate() self._client.turn_on(self._device, pids) self._on = True self._just_updated = True def turn_off(self, **kwargs: Any) -> None: try: self._client.turn_off(self._device) except AccessTokenError: self._client.reauthenticate() self._client.turn_off(self._device) self._on = False self._just_updated = True @property def name(self): """Return the display name of this light.""" return self._device.nickname @property def unique_id(self): return self._device.mac @property def available(self): """Return the connection status of this light""" return self._available @property def hs_color(self): return color_util.color_RGB_to_hs(*color_util.rgb_hex_to_rgb_list(self._color)) @property def device_state_attributes(self): """Return device attributes of the entity.""" return { ATTR_ATTRIBUTION: ATTRIBUTION, "state": self.is_on, "available": self.available, "device model": self._device.product_model, "mac": self.unique_id } @property def brightness(self): """Return the brightness of the light. This method is optional. Removing it indicates to Home Assistant that brightness is not supported for this light. """ return self.translate(self._brightness, 1, 100, 1, 255) @property def color_temp(self): """Return the CT color value in mired.""" return self.translate(self._color_temp, 2700, 6500, 500, 140) @property def is_on(self): """Return true if light is on.""" return self._on @property def supported_features(self): if self._device_type is DeviceTypes.MESH_LIGHT: return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_COLOR return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP def update(self): if not self._just_updated: try: device_info = self._client.get_info(self._device) except AccessTokenError: self._client.reauthenticate() device_info = self._client.get_info(self._device) for property_id, value in device_info: if property_id == PropertyIDs.BRIGHTNESS: self._brightness = int(value) elif property_id == PropertyIDs.COLOR_TEMP: try: self._color_temp = int(value) except ValueError: self._color_temp = 2700 elif property_id == PropertyIDs.ON: self._on = True if value == "1" else False elif property_id == PropertyIDs.AVAILABLE: self._available = True if value == "1" else False elif self._device_type is DeviceTypes.MESH_LIGHT and property_id == PropertyIDs.COLOR: self._color = value else: self._just_updated = False
32.981735
110
0.636854
acefb554ab1997faddef57dfc333311f3bd44e67
4,182
py
Python
sdk/python/pulumi_azure_nextgen/web/list_static_site_secrets.py
pulumi/pulumi-azure-nextgen
452736b0a1cf584c2d4c04666e017af6e9b2c15c
[ "Apache-2.0" ]
31
2020-09-21T09:41:01.000Z
2021-02-26T13:21:59.000Z
sdk/python/pulumi_azure_nextgen/web/list_static_site_secrets.py
pulumi/pulumi-azure-nextgen
452736b0a1cf584c2d4c04666e017af6e9b2c15c
[ "Apache-2.0" ]
231
2020-09-21T09:38:45.000Z
2021-03-01T11:16:03.000Z
sdk/python/pulumi_azure_nextgen/web/list_static_site_secrets.py
pulumi/pulumi-azure-nextgen
452736b0a1cf584c2d4c04666e017af6e9b2c15c
[ "Apache-2.0" ]
4
2020-09-29T14:14:59.000Z
2021-02-10T20:38:16.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables from . import outputs __all__ = [ 'ListStaticSiteSecretsResult', 'AwaitableListStaticSiteSecretsResult', 'list_static_site_secrets', ] @pulumi.output_type class ListStaticSiteSecretsResult: """ String dictionary resource. """ def __init__(__self__, id=None, kind=None, name=None, properties=None, system_data=None, type=None): if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if kind and not isinstance(kind, str): raise TypeError("Expected argument 'kind' to be a str") pulumi.set(__self__, "kind", kind) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if properties and not isinstance(properties, dict): raise TypeError("Expected argument 'properties' to be a dict") pulumi.set(__self__, "properties", properties) if system_data and not isinstance(system_data, dict): raise TypeError("Expected argument 'system_data' to be a dict") pulumi.set(__self__, "system_data", system_data) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter def id(self) -> str: """ Resource Id. """ return pulumi.get(self, "id") @property @pulumi.getter def kind(self) -> Optional[str]: """ Kind of resource. """ return pulumi.get(self, "kind") @property @pulumi.getter def name(self) -> str: """ Resource Name. """ return pulumi.get(self, "name") @property @pulumi.getter def properties(self) -> Mapping[str, str]: """ Settings. """ return pulumi.get(self, "properties") @property @pulumi.getter(name="systemData") def system_data(self) -> 'outputs.SystemDataResponse': """ The system metadata relating to this resource. """ return pulumi.get(self, "system_data") @property @pulumi.getter def type(self) -> str: """ Resource type. """ return pulumi.get(self, "type") class AwaitableListStaticSiteSecretsResult(ListStaticSiteSecretsResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return ListStaticSiteSecretsResult( id=self.id, kind=self.kind, name=self.name, properties=self.properties, system_data=self.system_data, type=self.type) def list_static_site_secrets(name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListStaticSiteSecretsResult: """ String dictionary resource. API Version: 2020-10-01. :param str name: Name of the static site. :param str resource_group_name: Name of the resource group to which the resource belongs. """ __args__ = dict() __args__['name'] = name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-nextgen:web:listStaticSiteSecrets', __args__, opts=opts, typ=ListStaticSiteSecretsResult).value return AwaitableListStaticSiteSecretsResult( id=__ret__.id, kind=__ret__.kind, name=__ret__.name, properties=__ret__.properties, system_data=__ret__.system_data, type=__ret__.type)
31.443609
138
0.627451
acefb80fd0785095dbcae9f83d993f74b22fe6ce
1,509
py
Python
sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/pipeline_run_invoked_by.py
tzhanl/azure-sdk-for-python
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
[ "MIT" ]
1
2021-09-07T18:36:04.000Z
2021-09-07T18:36:04.000Z
sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/pipeline_run_invoked_by.py
tzhanl/azure-sdk-for-python
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
[ "MIT" ]
2
2019-10-02T23:37:38.000Z
2020-10-02T01:17:31.000Z
sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/pipeline_run_invoked_by.py
tzhanl/azure-sdk-for-python
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
[ "MIT" ]
1
2019-06-17T22:18:23.000Z
2019-06-17T22:18:23.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class PipelineRunInvokedBy(Model): """Provides entity name and id that started the pipeline run. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: Name of the entity that started the pipeline run. :vartype name: str :ivar id: The ID of the entity that started the run. :vartype id: str :ivar invoked_by_type: The type of the entity that started the run. :vartype invoked_by_type: str """ _validation = { 'name': {'readonly': True}, 'id': {'readonly': True}, 'invoked_by_type': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'invoked_by_type': {'key': 'invokedByType', 'type': 'str'}, } def __init__(self, **kwargs): super(PipelineRunInvokedBy, self).__init__(**kwargs) self.name = None self.id = None self.invoked_by_type = None
32.804348
76
0.584493
acefb92009c91ee531373337c4072f630802b64e
3,386
py
Python
labelme2coco/utils.py
Mostro-Complexity/pytorch-YOLOv4
08512845a6824e8dc8fdd7e4976e2f4eca1daf7b
[ "Apache-2.0" ]
null
null
null
labelme2coco/utils.py
Mostro-Complexity/pytorch-YOLOv4
08512845a6824e8dc8fdd7e4976e2f4eca1daf7b
[ "Apache-2.0" ]
null
null
null
labelme2coco/utils.py
Mostro-Complexity/pytorch-YOLOv4
08512845a6824e8dc8fdd7e4976e2f4eca1daf7b
[ "Apache-2.0" ]
null
null
null
import os import json import jsonschema image_schema = { "type": "object", "properties": { "file_name": { "type": "string" }, "id": { "type": "integer" } }, "required": ["file_name", "id"] } segmentation_schema = { "type": "array", "items": { "type": "array", "items": { "type": "number", }, "additionalItems": False }, "additionalItems": False } annotation_schema = { "type": "object", "properties": { "image_id": { "type": "integer" }, "category_id": { "type": "integer" }, "segmentation": segmentation_schema }, "required": ["image_id", "category_id", "segmentation"] } category_schema = { "type": "object", "properties": { "name": { "type": "string" }, "id": { "type": "integer" } }, "required": ["name", "id"] } coco_schema = { "type": "object", "properties": { "images": { "type": "array", "items": image_schema, "additionalItems": False }, "annotations": { "type": "array", "items": annotation_schema, "additionalItems": False }, "categories": { "type": "array", "items": category_schema, "additionalItems": False } }, "required": ["images", "annotations", "categories"] } def read_and_validate_coco_annotation( coco_annotation_path: str) -> (dict, bool): """ Reads coco formatted annotation file and validates its fields. """ try: with open(coco_annotation_path) as json_file: coco_dict = json.load(json_file) jsonschema.validate(coco_dict, coco_schema) response = True except jsonschema.exceptions.ValidationError as e: print("well-formed but invalid JSON:", e) response = False except json.decoder.JSONDecodeError as e: print("poorly-formed text, not JSON:", e) response = False return coco_dict, response def create_dir(_dir): """ Creates given directory if it is not present. """ if not os.path.exists(_dir): os.makedirs(_dir) def list_jsons_recursively(directory, silent=True): """ Accepts a folder directory containing json files. Returns a list of json file paths present in given directory. """ target_extension_list = ["json"] # walk directories recursively and find json files abs_filepath_list = [] relative_filepath_list = [] # r=root, d=directories, f=files for r, _, f in os.walk(directory): for file in f: if file.split(".")[-1] in target_extension_list: abs_filepath = os.path.join(r, file) abs_filepath_list.append(abs_filepath) relative_filepath = abs_filepath.split(directory)[-1] relative_filepath_list.append(relative_filepath) number_of_files = len(relative_filepath_list) folder_name = directory.split(os.sep)[-1] if not silent: print("There are {} json files in folder {}.".format(number_of_files, folder_name)) return relative_filepath_list, abs_filepath_list
25.081481
91
0.550502
acefb92e507c025f1f4488431c545c3c6db9df25
14,846
py
Python
contrib/gitian-build.py
litecoinz-core/bitcoin
73867bf443a309fad2a81491758527a80eb75c8c
[ "MIT" ]
null
null
null
contrib/gitian-build.py
litecoinz-core/bitcoin
73867bf443a309fad2a81491758527a80eb75c8c
[ "MIT" ]
null
null
null
contrib/gitian-build.py
litecoinz-core/bitcoin
73867bf443a309fad2a81491758527a80eb75c8c
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) 2016-2019 The Bitcoin Core developers # Copyright (c) 2017-2020 The LitecoinZ Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. import argparse import os import subprocess import sys def setup(): global args, workdir programs = ['ruby', 'git', 'make', 'wget', 'curl'] if args.kvm: programs += ['apt-cacher-ng', 'python-vm-builder', 'qemu-kvm', 'qemu-utils'] elif args.docker and not os.path.isfile('/lib/systemd/system/docker.service'): dockers = ['docker.io', 'docker-ce'] for i in dockers: return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i]) if return_code == 0: break if return_code != 0: print('Cannot find any way to install Docker.', file=sys.stderr) sys.exit(1) else: programs += ['apt-cacher-ng', 'lxc', 'debootstrap'] subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs) if not os.path.isdir('gitian.sigs'): subprocess.check_call(['git', 'clone', 'https://github.com/litecoinz-core/gitian.sigs.git']) if not os.path.isdir('litecoinz-detached-sigs'): subprocess.check_call(['git', 'clone', 'https://github.com/litecoinz-core/litecoinz-detached-sigs.git']) if not os.path.isdir('gitian-builder'): subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git']) if not os.path.isdir('litecoinz'): subprocess.check_call(['git', 'clone', 'https://github.com/litecoinz-core/litecoinz.git']) os.chdir('gitian-builder') make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64', '--disksize', '20480'] if args.docker: make_image_prog += ['--docker'] elif not args.kvm: make_image_prog += ['--lxc'] subprocess.check_call(make_image_prog) os.chdir(workdir) if args.is_bionic and not args.kvm and not args.docker: subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net']) print('Reboot is required') sys.exit(0) def build(): global args, workdir os.makedirs('litecoinz-binaries/' + args.version, exist_ok=True) print('\nBuilding Dependencies\n') os.chdir('gitian-builder') os.makedirs('inputs', exist_ok=True) subprocess.check_call(['wget', '-O', 'inputs/osslsigncode-2.0.tar.gz', 'https://github.com/mtrojnar/osslsigncode/archive/2.0.tar.gz']) subprocess.check_call(["echo '5a60e0a4b3e0b4d655317b2f12a810211c50242138322b16e7e01c6fbb89d92f inputs/osslsigncode-2.0.tar.gz' | sha256sum -c"], shell=True) subprocess.check_call(['make', '-C', '../litecoinz/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common']) if args.linux: print('\nCompiling ' + args.version + ' Linux') subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'litecoinz='+args.commit, '--url', 'litecoinz='+args.url, '../litecoinz/contrib/gitian-descriptors/gitian-linux.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../litecoinz/contrib/gitian-descriptors/gitian-linux.yml']) subprocess.check_call('mv build/out/litecoinz-*.tar.gz build/out/src/litecoinz-*.tar.gz ../litecoinz-binaries/'+args.version, shell=True) if args.windows: print('\nCompiling ' + args.version + ' Windows') subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'litecoinz='+args.commit, '--url', 'litecoinz='+args.url, '../litecoinz/contrib/gitian-descriptors/gitian-win.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../litecoinz/contrib/gitian-descriptors/gitian-win.yml']) subprocess.check_call('mv build/out/litecoinz-*-win-unsigned.tar.gz inputs/', shell=True) subprocess.check_call('mv build/out/litecoinz-*.zip build/out/litecoinz-*.exe build/out/src/litecoinz-*.tar.gz ../litecoinz-binaries/'+args.version, shell=True) if args.macos: print('\nCompiling ' + args.version + ' MacOS') subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'litecoinz='+args.commit, '--url', 'litecoinz='+args.url, '../litecoinz/contrib/gitian-descriptors/gitian-osx.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../litecoinz/contrib/gitian-descriptors/gitian-osx.yml']) subprocess.check_call('mv build/out/litecoinz-*-osx-unsigned.tar.gz inputs/', shell=True) subprocess.check_call('mv build/out/litecoinz-*.tar.gz build/out/litecoinz-*.dmg build/out/src/litecoinz-*.tar.gz ../litecoinz-binaries/'+args.version, shell=True) os.chdir(workdir) if args.commit_files: print('\nCommitting '+args.version+' Unsigned Sigs\n') os.chdir('gitian.sigs') subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer]) subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer]) subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer]) subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer]) os.chdir(workdir) def sign(): global args, workdir os.chdir('gitian-builder') if args.windows: print('\nSigning ' + args.version + ' Windows') subprocess.check_call('cp inputs/litecoinz-' + args.version + '-win-unsigned.tar.gz inputs/litecoinz-win-unsigned.tar.gz', shell=True) subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../litecoinz/contrib/gitian-descriptors/gitian-win-signer.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../litecoinz/contrib/gitian-descriptors/gitian-win-signer.yml']) subprocess.check_call('mv build/out/litecoinz-*win64-setup.exe ../litecoinz-binaries/'+args.version, shell=True) if args.macos: print('\nSigning ' + args.version + ' MacOS') subprocess.check_call('cp inputs/litecoinz-' + args.version + '-osx-unsigned.tar.gz inputs/litecoinz-osx-unsigned.tar.gz', shell=True) subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../litecoinz/contrib/gitian-descriptors/gitian-osx-signer.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../litecoinz/contrib/gitian-descriptors/gitian-osx-signer.yml']) subprocess.check_call('mv build/out/litecoinz-osx-signed.dmg ../litecoinz-binaries/'+args.version+'/litecoinz-'+args.version+'-osx.dmg', shell=True) os.chdir(workdir) if args.commit_files: print('\nCommitting '+args.version+' Signed Sigs\n') os.chdir('gitian.sigs') subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer]) subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer]) subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer]) os.chdir(workdir) def verify(): global args, workdir rc = 0 os.chdir('gitian-builder') print('\nVerifying v'+args.version+' Linux\n') if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../litecoinz/contrib/gitian-descriptors/gitian-linux.yml']): print('Verifying v'+args.version+' Linux FAILED\n') rc = 1 print('\nVerifying v'+args.version+' Windows\n') if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../litecoinz/contrib/gitian-descriptors/gitian-win.yml']): print('Verifying v'+args.version+' Windows FAILED\n') rc = 1 print('\nVerifying v'+args.version+' MacOS\n') if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../litecoinz/contrib/gitian-descriptors/gitian-osx.yml']): print('Verifying v'+args.version+' MacOS FAILED\n') rc = 1 print('\nVerifying v'+args.version+' Signed Windows\n') if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../litecoinz/contrib/gitian-descriptors/gitian-win-signer.yml']): print('Verifying v'+args.version+' Signed Windows FAILED\n') rc = 1 print('\nVerifying v'+args.version+' Signed MacOS\n') if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../litecoinz/contrib/gitian-descriptors/gitian-osx-signer.yml']): print('Verifying v'+args.version+' Signed MacOS FAILED\n') rc = 1 os.chdir(workdir) return rc def main(): global args, workdir parser = argparse.ArgumentParser(description='Script for running full Gitian builds.') parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch') parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request') parser.add_argument('-u', '--url', dest='url', default='https://github.com/litecoinz-core/litecoinz', help='Specify the URL of the repository. Default is %(default)s') parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build') parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build') parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS') parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries') parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS') parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s') parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s') parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC') parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC') parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Only works on Debian-based systems (Ubuntu, Debian)') parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.') parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git') parser.add_argument('signer', nargs='?', help='GPG signer to sign each build assert file') parser.add_argument('version', nargs='?', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified') args = parser.parse_args() workdir = os.getcwd() args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs']) if args.kvm and args.docker: raise Exception('Error: cannot have both kvm and docker') # Ensure no more than one environment variable for gitian-builder (USE_LXC, USE_VBOX, USE_DOCKER) is set as they # can interfere (e.g., USE_LXC being set shadows USE_DOCKER; for details see gitian-builder/libexec/make-clean-vm). os.environ['USE_LXC'] = '' os.environ['USE_VBOX'] = '' os.environ['USE_DOCKER'] = '' if args.docker: os.environ['USE_DOCKER'] = '1' elif not args.kvm: os.environ['USE_LXC'] = '1' if 'GITIAN_HOST_IP' not in os.environ.keys(): os.environ['GITIAN_HOST_IP'] = '10.0.3.1' if 'LXC_GUEST_IP' not in os.environ.keys(): os.environ['LXC_GUEST_IP'] = '10.0.3.5' if args.setup: setup() if args.buildsign: args.build = True args.sign = True if not args.build and not args.sign and not args.verify: sys.exit(0) args.linux = 'l' in args.os args.windows = 'w' in args.os args.macos = 'm' in args.os # Disable for MacOS if no SDK found if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.14.sdk.tar.gz'): print('Cannot build for MacOS, SDK does not exist. Will build for other OSes') args.macos = False args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign' script_name = os.path.basename(sys.argv[0]) if not args.signer: print(script_name+': Missing signer') print('Try '+script_name+' --help for more information') sys.exit(1) if not args.version: print(script_name+': Missing version') print('Try '+script_name+' --help for more information') sys.exit(1) # Add leading 'v' for tags if args.commit and args.pull: raise Exception('Cannot have both commit and pull') args.commit = ('' if args.commit else 'v') + args.version os.chdir('litecoinz') if args.pull: subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge']) os.chdir('../gitian-builder/inputs/litecoinz') subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge']) args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip() args.version = 'pull-' + args.version print(args.commit) subprocess.check_call(['git', 'fetch']) subprocess.check_call(['git', 'checkout', args.commit]) os.chdir(workdir) os.chdir('gitian-builder') subprocess.check_call(['git', 'pull']) os.chdir(workdir) if args.build: build() if args.sign: sign() if args.verify: os.chdir('gitian.sigs') subprocess.check_call(['git', 'pull']) os.chdir(workdir) sys.exit(verify()) if __name__ == '__main__': main()
56.234848
233
0.653981
acefb98175627d9bbd314fd559f16eb33ce6406d
11,315
py
Python
lib/spack/spack/test/git_fetch.py
patrickb314/spack
9030033cc0d9729f82a8681f26d5e53f74919c70
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
null
null
null
lib/spack/spack/test/git_fetch.py
patrickb314/spack
9030033cc0d9729f82a8681f26d5e53f74919c70
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
5
2021-11-29T17:23:20.000Z
2022-03-02T17:29:12.000Z
lib/spack/spack/test/git_fetch.py
patrickb314/spack
9030033cc0d9729f82a8681f26d5e53f74919c70
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
null
null
null
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import copy import os import shutil import pytest from llnl.util.filesystem import mkdirp, touch, working_dir import spack.config import spack.repo from spack.fetch_strategy import GitFetchStrategy from spack.spec import Spec from spack.stage import Stage from spack.util.executable import which from spack.version import ver pytestmark = pytest.mark.skipif( not which('git'), reason='requires git to be installed') _mock_transport_error = 'Mock HTTP transport error' @pytest.fixture(params=[None, '1.8.5.2', '1.8.5.1', '1.7.10', '1.7.1', '1.7.0']) def git_version(request, monkeypatch): """Tests GitFetchStrategy behavior for different git versions. GitFetchStrategy tries to optimize using features of newer git versions, but needs to work with older git versions. To ensure code paths for old versions still work, we fake it out here and make it use the backward-compatibility code paths with newer git versions. """ git = which('git', required=True) real_git_version = ( spack.fetch_strategy.GitFetchStrategy.version_from_git(git)) if request.param is None: # Don't patch; run with the real git_version method. yield real_git_version else: test_git_version = ver(request.param) if test_git_version > real_git_version: pytest.skip("Can't test clone logic for newer version of git.") # Patch the fetch strategy to think it's using a lower git version. # we use this to test what we'd need to do with older git versions # using a newer git installation. monkeypatch.setattr(GitFetchStrategy, 'git_version', test_git_version) yield test_git_version @pytest.fixture def mock_bad_git(monkeypatch): """ Test GitFetchStrategy behavior with a bad git command for git >= 1.7.1 to trigger a SpackError. """ def bad_git(*args, **kwargs): """Raise a SpackError with the transport message.""" raise spack.error.SpackError(_mock_transport_error) # Patch the fetch strategy to think it's using a git version that # will error out when git is called. monkeypatch.setattr(GitFetchStrategy, 'git', bad_git) monkeypatch.setattr(GitFetchStrategy, 'git_version', ver('1.7.1')) yield def test_bad_git(tmpdir, mock_bad_git): """Trigger a SpackError when attempt a fetch with a bad git.""" testpath = str(tmpdir) with pytest.raises(spack.error.SpackError): fetcher = GitFetchStrategy(git='file:///not-a-real-git-repo') with Stage(fetcher, path=testpath): fetcher.fetch() @pytest.mark.parametrize("type_of_test", ['master', 'branch', 'tag', 'commit']) @pytest.mark.parametrize("secure", [True, False]) def test_fetch(type_of_test, secure, mock_git_repository, config, mutable_mock_repo, git_version, monkeypatch): """Tries to: 1. Fetch the repo using a fetch strategy constructed with supplied args (they depend on type_of_test). 2. Check if the test_file is in the checked out repository. 3. Assert that the repository is at the revision supplied. 4. Add and remove some files, then reset the repo, and ensure it's all there again. """ # Retrieve the right test parameters t = mock_git_repository.checks[type_of_test] h = mock_git_repository.hash # Construct the package under test spec = Spec('git-test') spec.concretize() pkg = spack.repo.get(spec) monkeypatch.setitem(pkg.versions, ver('git'), t.args) # Enter the stage directory and check some properties with pkg.stage: with spack.config.override('config:verify_ssl', secure): pkg.do_stage() with working_dir(pkg.stage.source_path): assert h('HEAD') == h(t.revision) file_path = os.path.join(pkg.stage.source_path, t.file) assert os.path.isdir(pkg.stage.source_path) assert os.path.isfile(file_path) os.unlink(file_path) assert not os.path.isfile(file_path) untracked_file = 'foobarbaz' touch(untracked_file) assert os.path.isfile(untracked_file) pkg.do_restage() assert not os.path.isfile(untracked_file) assert os.path.isdir(pkg.stage.source_path) assert os.path.isfile(file_path) assert h('HEAD') == h(t.revision) @pytest.mark.skipif(str(spack.platforms.host()) == 'windows', reason=('Git fails to clone because the src/dst paths' ' are too long: the name of the staging directory' ' for ad-hoc Git commit versions is longer than' ' other staged sources')) @pytest.mark.disable_clean_stage_check def test_adhoc_version_submodules( mock_git_repository, config, mutable_mock_repo, monkeypatch, mock_stage): t = mock_git_repository.checks['tag'] # Construct the package under test pkg_class = spack.repo.path.get_pkg_class('git-test') monkeypatch.setitem(pkg_class.versions, ver('git'), t.args) monkeypatch.setattr(pkg_class, 'git', 'file://%s' % mock_git_repository.path, raising=False) spec = Spec('git-test@{0}'.format(mock_git_repository.unversioned_commit)) spec.concretize() spec.package.do_stage() collected_fnames = set() for root, dirs, files in os.walk(spec.package.stage.source_path): collected_fnames.update(files) # The submodules generate files with the prefix "r0_file_" assert set(['r0_file_0', 'r0_file_1']) < collected_fnames @pytest.mark.parametrize("type_of_test", ['branch', 'commit']) def test_debug_fetch( mock_packages, type_of_test, mock_git_repository, config, monkeypatch ): """Fetch the repo with debug enabled.""" # Retrieve the right test parameters t = mock_git_repository.checks[type_of_test] # Construct the package under test spec = Spec('git-test') spec.concretize() pkg = spack.repo.get(spec) monkeypatch.setitem(pkg.versions, ver('git'), t.args) # Fetch then ensure source path exists with pkg.stage: with spack.config.override('config:debug', True): pkg.do_fetch() assert os.path.isdir(pkg.stage.source_path) def test_git_extra_fetch(tmpdir): """Ensure a fetch after 'expanding' is effectively a no-op.""" testpath = str(tmpdir) fetcher = GitFetchStrategy(git='file:///not-a-real-git-repo') with Stage(fetcher, path=testpath) as stage: mkdirp(stage.source_path) fetcher.fetch() # Use fetcher to fetch for code coverage shutil.rmtree(stage.source_path) def test_needs_stage(): """Trigger a NoStageError when attempt a fetch without a stage.""" with pytest.raises(spack.fetch_strategy.NoStageError, match=r"set_stage.*before calling fetch"): fetcher = GitFetchStrategy(git='file:///not-a-real-git-repo') fetcher.fetch() @pytest.mark.parametrize("get_full_repo", [True, False]) def test_get_full_repo(get_full_repo, git_version, mock_git_repository, config, mutable_mock_repo, monkeypatch): """Ensure that we can clone a full repository.""" if git_version < ver('1.7.1'): pytest.skip('Not testing get_full_repo for older git {0}'. format(git_version)) secure = True type_of_test = 'tag-branch' t = mock_git_repository.checks[type_of_test] spec = Spec('git-test') spec.concretize() pkg = spack.repo.get(spec) args = copy.copy(t.args) args['get_full_repo'] = get_full_repo monkeypatch.setitem(pkg.versions, ver('git'), args) with pkg.stage: with spack.config.override('config:verify_ssl', secure): pkg.do_stage() with working_dir(pkg.stage.source_path): branches\ = mock_git_repository.git_exe('branch', '-a', output=str).splitlines() nbranches = len(branches) commits\ = mock_git_repository.\ git_exe('log', '--graph', '--pretty=format:%h -%d %s (%ci) <%an>', '--abbrev-commit', output=str).splitlines() ncommits = len(commits) if get_full_repo: assert(nbranches >= 5) assert(ncommits == 2) else: assert(nbranches == 2) assert(ncommits == 1) @pytest.mark.disable_clean_stage_check @pytest.mark.parametrize("submodules", [True, False]) def test_gitsubmodule(submodules, mock_git_repository, config, mutable_mock_repo, monkeypatch): """ Test GitFetchStrategy behavior with submodules. This package has a `submodules` property which is always True: when a specific version also indicates to include submodules, this should not interfere; if the specific version explicitly requests that submodules *not* be initialized, this should override the Package-level request. """ type_of_test = 'tag-branch' t = mock_git_repository.checks[type_of_test] # Construct the package under test spec = Spec('git-test') spec.concretize() pkg = spack.repo.get(spec) args = copy.copy(t.args) args['submodules'] = submodules monkeypatch.setitem(pkg.versions, ver('git'), args) pkg.do_stage() with working_dir(pkg.stage.source_path): for submodule_count in range(2): file_path = os.path.join(pkg.stage.source_path, 'third_party/submodule{0}/r0_file_{0}' .format(submodule_count)) if submodules: assert os.path.isfile(file_path) else: assert not os.path.isfile(file_path) @pytest.mark.disable_clean_stage_check def test_gitsubmodules_delete( mock_git_repository, config, mutable_mock_repo, monkeypatch ): """ Test GitFetchStrategy behavior with submodules_delete """ type_of_test = 'tag-branch' t = mock_git_repository.checks[type_of_test] # Construct the package under test spec = Spec('git-test') spec.concretize() pkg = spack.repo.get(spec) args = copy.copy(t.args) args['submodules'] = True args['submodules_delete'] = ['third_party/submodule0', 'third_party/submodule1'] monkeypatch.setitem(pkg.versions, ver('git'), args) pkg.do_stage() with working_dir(pkg.stage.source_path): file_path = os.path.join(pkg.stage.source_path, 'third_party/submodule0') assert not os.path.isdir(file_path) file_path = os.path.join(pkg.stage.source_path, 'third_party/submodule1') assert not os.path.isdir(file_path)
35.806962
81
0.64251
acefba1c91bb85796900e6c94feec0ce8633d890
441
py
Python
setup.py
dboczek/django-admin-webapi
7fbe992473ea42eae5f673625411b231b50ba23c
[ "Unlicense" ]
1
2018-11-21T12:39:45.000Z
2018-11-21T12:39:45.000Z
setup.py
dboczek/django-admin-webapi
7fbe992473ea42eae5f673625411b231b50ba23c
[ "Unlicense" ]
null
null
null
setup.py
dboczek/django-admin-webapi
7fbe992473ea42eae5f673625411b231b50ba23c
[ "Unlicense" ]
null
null
null
from setuptools import setup # before loosening requirements it should be tested first install_requires = [ 'djangorestframework==3.7.7', ] setup( name="django-admin-webapi", version="0.1", license='UNLINCENSE', description="Admin WebAPI based on Django Rest Framework", author='Daniel Boczek', packages=['admin_webapi'], package_dir={'': '.'}, install_requires=install_requires, zip_safe=False, )
22.05
62
0.69161
acefbba6733113fbd0c50a70c5ca461b4c1adf15
10,053
py
Python
contrib/spendfrom/spendfrom.py
Frigyes06/peacecoin
341c0a275dded85638e3e9435679fe1ba1c5deca
[ "MIT" ]
null
null
null
contrib/spendfrom/spendfrom.py
Frigyes06/peacecoin
341c0a275dded85638e3e9435679fe1ba1c5deca
[ "MIT" ]
null
null
null
contrib/spendfrom/spendfrom.py
Frigyes06/peacecoin
341c0a275dded85638e3e9435679fe1ba1c5deca
[ "MIT" ]
null
null
null
#!/usr/bin/env python # # Use the raw transactions API to spend bitcoins received on particular addresses, # and send any change back to that same address. # # Example usage: # spendfrom.py # Lists available funds # spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00 # # Assumes it will talk to a bitcoind or Bitcoin-Qt running # on localhost. # # Depends on jsonrpc # from decimal import * import getpass import math import os import os.path import platform import sys import time from jsonrpc import ServiceProxy, json BASE_FEE=Decimal("0.001") def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def determine_db_dir(): """Return the default location of the bitcoin data directory""" if platform.system() == "Darwin": return os.path.expanduser("~/Library/Application Support/Bitcoin/") elif platform.system() == "Windows": return os.path.join(os.environ['APPDATA'], "Bitcoin") return os.path.expanduser("~/.bitcoin") def read_bitcoin_config(dbdir): """Read the bitcoin.conf file from dbdir, returns dictionary of settings""" from ConfigParser import SafeConfigParser class FakeSecHead(object): def __init__(self, fp): self.fp = fp self.sechead = '[all]\n' def readline(self): if self.sechead: try: return self.sechead finally: self.sechead = None else: s = self.fp.readline() if s.find('#') != -1: s = s[0:s.find('#')].strip() +"\n" return s config_parser = SafeConfigParser() config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf")))) return dict(config_parser.items("all")) def connect_JSON(config): """Connect to a bitcoin JSON-RPC server""" testnet = config.get('testnet', '0') testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False if not 'rpcport' in config: config['rpcport'] = 14847 if testnet else 4847 connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport']) try: result = ServiceProxy(connect) # ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors, # but also make sure the bitcoind we're talking to is/isn't testnet: if result.getmininginfo()['testnet'] != testnet: sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n") sys.exit(1) return result except: sys.stderr.write("Error connecting to RPC server at "+connect+"\n") sys.exit(1) def unlock_wallet(bitcoind): info = bitcoind.getinfo() if 'unlocked_until' not in info: return True # wallet is not encrypted t = int(info['unlocked_until']) if t <= time.time(): try: passphrase = getpass.getpass("Wallet is locked; enter passphrase: ") bitcoind.walletpassphrase(passphrase, 5) except: sys.stderr.write("Wrong passphrase\n") info = bitcoind.getinfo() return int(info['unlocked_until']) > time.time() def list_available(bitcoind): address_summary = dict() address_to_account = dict() for info in bitcoind.listreceivedbyaddress(0): address_to_account[info["address"]] = info["account"] unspent = bitcoind.listunspent(0) for output in unspent: # listunspent doesn't give addresses, so: rawtx = bitcoind.getrawtransaction(output['txid'], 1) vout = rawtx["vout"][output['vout']] pk = vout["scriptPubKey"] # This code only deals with ordinary pay-to-bitcoin-address # or pay-to-script-hash outputs right now; anything exotic is ignored. if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash": continue address = pk["addresses"][0] if address in address_summary: address_summary[address]["total"] += vout["value"] address_summary[address]["outputs"].append(output) else: address_summary[address] = { "total" : vout["value"], "outputs" : [output], "account" : address_to_account.get(address, "") } return address_summary def select_coins(needed, inputs): # Feel free to improve this, this is good enough for my simple needs: outputs = [] have = Decimal("0.0") n = 0 while have < needed and n < len(inputs): outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]}) have += inputs[n]["amount"] n += 1 return (outputs, have-needed) def create_tx(bitcoind, fromaddresses, toaddress, amount, fee): all_coins = list_available(bitcoind) total_available = Decimal("0.0") needed = amount+fee potential_inputs = [] for addr in fromaddresses: if addr not in all_coins: continue potential_inputs.extend(all_coins[addr]["outputs"]) total_available += all_coins[addr]["total"] if total_available < needed: sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed)); sys.exit(1) # # Note: # Python's json/jsonrpc modules have inconsistent support for Decimal numbers. # Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode # Decimals, I'm casting amounts to float before sending them to bitcoind. # outputs = { toaddress : float(amount) } (inputs, change_amount) = select_coins(needed, potential_inputs) if change_amount > BASE_FEE: # don't bother with zero or tiny change change_address = fromaddresses[-1] if change_address in outputs: outputs[change_address] += float(change_amount) else: outputs[change_address] = float(change_amount) rawtx = bitcoind.createrawtransaction(inputs, outputs) signed_rawtx = bitcoind.signrawtransaction(rawtx) if not signed_rawtx["complete"]: sys.stderr.write("signrawtransaction failed\n") sys.exit(1) txdata = signed_rawtx["hex"] return txdata def compute_amount_in(bitcoind, txinfo): result = Decimal("0.0") for vin in txinfo['vin']: in_info = bitcoind.getrawtransaction(vin['txid'], 1) vout = in_info['vout'][vin['vout']] result = result + vout['value'] return result def compute_amount_out(txinfo): result = Decimal("0.0") for vout in txinfo['vout']: result = result + vout['value'] return result def sanity_test_fee(bitcoind, txdata_hex, max_fee): class FeeError(RuntimeError): pass try: txinfo = bitcoind.decoderawtransaction(txdata_hex) total_in = compute_amount_in(bitcoind, txinfo) total_out = compute_amount_out(txinfo) if total_in-total_out > max_fee: raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out)) tx_size = len(txdata_hex)/2 kb = tx_size/1000 # integer division rounds down if kb > 1 and fee < BASE_FEE: raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes") if total_in < 0.01 and fee < BASE_FEE: raise FeeError("Rejecting no-fee, tiny-amount transaction") # Exercise for the reader: compute transaction priority, and # warn if this is a very-low-priority transaction except FeeError as err: sys.stderr.write((str(err)+"\n")) sys.exit(1) def main(): import optparse parser = optparse.OptionParser(usage="%prog [options]") parser.add_option("--from", dest="fromaddresses", default=None, help="addresses to get bitcoins from") parser.add_option("--to", dest="to", default=None, help="address to get send bitcoins to") parser.add_option("--amount", dest="amount", default=None, help="amount to send") parser.add_option("--fee", dest="fee", default="0.0", help="fee to include") parser.add_option("--datadir", dest="datadir", default=determine_db_dir(), help="location of bitcoin.conf file with RPC username/password (default: %default)") parser.add_option("--testnet", dest="testnet", default=False, action="store_true", help="Use the test network") parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true", help="Don't broadcast the transaction, just create and print the transaction data") (options, args) = parser.parse_args() check_json_precision() config = read_bitcoin_config(options.datadir) if options.testnet: config['testnet'] = True bitcoind = connect_JSON(config) if options.amount is None: address_summary = list_available(bitcoind) for address,info in address_summary.iteritems(): n_transactions = len(info['outputs']) if n_transactions > 1: print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions)) else: print("%s %.8f %s"%(address, info['total'], info['account'])) else: fee = Decimal(options.fee) amount = Decimal(options.amount) while unlock_wallet(bitcoind) == False: pass # Keep asking for passphrase until they get it right txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee) sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01")) if options.dry_run: print(txdata) else: txid = bitcoind.sendrawtransaction(txdata) print(txid) if __name__ == '__main__': main()
37.511194
111
0.632249
acefbbf930b4d71d8b1cb50aa90373740b0665e4
9,064
py
Python
setup.py
birchmd/cryptography
87b660d5802e22d56a8d17a18bdadfc1082739fd
[ "PSF-2.0", "Apache-2.0", "BSD-3-Clause" ]
null
null
null
setup.py
birchmd/cryptography
87b660d5802e22d56a8d17a18bdadfc1082739fd
[ "PSF-2.0", "Apache-2.0", "BSD-3-Clause" ]
null
null
null
setup.py
birchmd/cryptography
87b660d5802e22d56a8d17a18bdadfc1082739fd
[ "PSF-2.0", "Apache-2.0", "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import os import platform import sys from distutils.command.build import build import pkg_resources import setuptools from setuptools import find_packages, setup from setuptools.command.install import install if ( pkg_resources.parse_version(setuptools.__version__) < pkg_resources.parse_version("18.5") ): raise RuntimeError( "cryptography requires setuptools 18.5 or newer, please upgrade to a " "newer version of setuptools" ) base_dir = os.path.dirname(__file__) src_dir = os.path.join(base_dir, "src") # When executing the setup.py, we need to be able to import ourselves, this # means that we need to add the src/ directory to the sys.path. sys.path.insert(0, src_dir) about = {} with open(os.path.join(src_dir, "cryptography", "__about__.py")) as f: exec(f.read(), about) # `setup_requirements` must be kept in sync with `pyproject.toml` setup_requirements = ["cffi>=1.8,!=1.11.3"] if platform.python_implementation() == "PyPy": if sys.pypy_version_info < (5, 4): raise RuntimeError( "cryptography is not compatible with PyPy < 5.4. Please upgrade " "PyPy to use this library." ) def keywords_with_side_effects(argv): """ Get a dictionary with setup keywords that (can) have side effects. :param argv: A list of strings with command line arguments. :returns: A dictionary with keyword arguments for the ``setup()`` function. This setup.py script uses the setuptools 'setup_requires' feature because this is required by the cffi package to compile extension modules. The purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi build process as a result of setup.py invocations that don't need the cffi module to be built (setup.py serves the dual purpose of exposing package metadata). All of the options listed by ``python setup.py --help`` that print information should be recognized here. The commands ``clean``, ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized. Any combination of these options and commands is also supported. This function was originally based on the `setup.py script`_ of SciPy (see also the discussion in `pip issue #25`_). .. _pip issue #25: https://github.com/pypa/pip/issues/25 .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py """ no_setup_requires_arguments = ( '-h', '--help', '-n', '--dry-run', '-q', '--quiet', '-v', '--verbose', '-V', '--version', '--author', '--author-email', '--classifiers', '--contact', '--contact-email', '--description', '--egg-base', '--fullname', '--help-commands', '--keywords', '--licence', '--license', '--long-description', '--maintainer', '--maintainer-email', '--name', '--no-user-cfg', '--obsoletes', '--platforms', '--provides', '--requires', '--url', 'clean', 'egg_info', 'register', 'sdist', 'upload', ) def is_short_option(argument): """Check whether a command line argument is a short option.""" return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-' def expand_short_options(argument): """Expand combined short options into canonical short options.""" return ('-' + char for char in argument[1:]) def argument_without_setup_requirements(argv, i): """Check whether a command line argument needs setup requirements.""" if argv[i] in no_setup_requires_arguments: # Simple case: An argument which is either an option or a command # which doesn't need setup requirements. return True elif (is_short_option(argv[i]) and all(option in no_setup_requires_arguments for option in expand_short_options(argv[i]))): # Not so simple case: Combined short options none of which need # setup requirements. return True elif argv[i - 1:i] == ['--egg-base']: # Tricky case: --egg-info takes an argument which should not make # us use setup_requires (defeating the purpose of this code). return True else: return False if all(argument_without_setup_requirements(argv, i) for i in range(1, len(argv))): return { "cmdclass": { "build": DummyBuild, "install": DummyInstall, } } else: cffi_modules = [ "src/_cffi_src/build_openssl.py:ffi", "src/_cffi_src/build_padding.py:ffi", ] return { "setup_requires": setup_requirements, "cffi_modules": cffi_modules } setup_requires_error = ("Requested setup command that needs 'setup_requires' " "while command line arguments implied a side effect " "free command or option.") class DummyBuild(build): """ This class makes it very obvious when ``keywords_with_side_effects()`` has incorrectly interpreted the command line arguments to ``setup.py build`` as one of the 'side effect free' commands or options. """ def run(self): raise RuntimeError(setup_requires_error) class DummyInstall(install): """ This class makes it very obvious when ``keywords_with_side_effects()`` has incorrectly interpreted the command line arguments to ``setup.py install`` as one of the 'side effect free' commands or options. """ def run(self): raise RuntimeError(setup_requires_error) with open(os.path.join(base_dir, "README.rst")) as f: long_description = f.read() setup( name=about["__title__"], version=about["__version__"], description=about["__summary__"], long_description=long_description, long_description_content_type="text/x-rst", license=about["__license__"], url=about["__uri__"], author=about["__author__"], author_email=about["__email__"], classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "License :: OSI Approved :: BSD License", "Natural Language :: English", "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX", "Operating System :: POSIX :: BSD", "Operating System :: POSIX :: Linux", "Operating System :: Microsoft :: Windows", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Security :: Cryptography", ], package_dir={"": "src"}, packages=find_packages(where="src", exclude=["_cffi_src", "_cffi_src.*"]), include_package_data=True, python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*', install_requires=[ "six >= 1.4.1", ] + setup_requirements, extras_require={ ":python_version < '3'": ["enum34", "ipaddress"], "test": [ "pytest>=3.6.0,!=3.9.0,!=3.9.1,!=3.9.2", "pretend", "iso8601", "pytz", "hypothesis>=1.11.4,!=3.79.2", ], "docs": [ "sphinx >= 1.6.5,!=1.8.0,!=3.1.0,!=3.1.1", "sphinx_rtd_theme", ], "docstest": [ "doc8", "pyenchant >= 1.6.11", "twine >= 1.12.0", "sphinxcontrib-spelling >= 4.0.1", ], "pep8test": [ "flake8", "flake8-import-order", "pep8-naming", ], # This extra is for OpenSSH private keys that use bcrypt KDF # Versions: v3.1.3 - ignore_few_rounds, v3.1.5 - abi3 "ssh": ["bcrypt >= 3.1.5"], # This extra is for the U-label support that was deprecated in # cryptography 2.1. If you need this deprecated path install with # pip install cryptography[idna] "idna": [ "idna >= 2.1", ] }, # for cffi zip_safe=False, ext_package="cryptography.hazmat.bindings", **keywords_with_side_effects(sys.argv) )
32.604317
79
0.600066
acefbcf788f285dfc908b3e8d9ae2224a7c81211
568
py
Python
network-analysis/bcc/save.py
Dunateo/Menga
724057051091b93c342cd3c51705e04e298a43da
[ "Apache-2.0" ]
1
2022-03-17T18:27:11.000Z
2022-03-17T18:27:11.000Z
network-analysis/bcc/save.py
Dunateo/Menga
724057051091b93c342cd3c51705e04e298a43da
[ "Apache-2.0" ]
null
null
null
network-analysis/bcc/save.py
Dunateo/Menga
724057051091b93c342cd3c51705e04e298a43da
[ "Apache-2.0" ]
null
null
null
import shutil import os #add content to a file def add_content(filename,content, mode): fname = "network-"+filename finalfile = open(fname, mode) finalfile.write(content) finalfile.close() #delete complete directory with files def delete_directory(path): try: shutil.rmtree(path) except OSError as e: print("Error on delete: %s : %s" % (path, e.strerror)) #create directory def create_directory(path): try: os.mkdir(path) except OSError as e: print("Error on creation: %s : %s" % (path, e.strerror))
23.666667
64
0.65669
acefbd12d28ef14f1849b8f19e3898db9f59cccf
4,365
py
Python
simulation/gym/env.py
ShawK91/ProjectApollo
9b37b40f4a5959c3c732d93d093b9584f2ac338d
[ "MIT" ]
null
null
null
simulation/gym/env.py
ShawK91/ProjectApollo
9b37b40f4a5959c3c732d93d093b9584f2ac338d
[ "MIT" ]
null
null
null
simulation/gym/env.py
ShawK91/ProjectApollo
9b37b40f4a5959c3c732d93d093b9584f2ac338d
[ "MIT" ]
null
null
null
"""Apollo task""" import numpy as np from gym import core, spaces from gym.utils import seeding import psa2 as psa import importlib """ ACTIONS   'real_cycle_time': [Range =10, 20] Discrete   'vent_time_fract': [Range = 0.60, 0.95] Continous STATE ['P','T','Tw','xA','xB','yA','yB'] """ class ApolloEnv(core.Env): def __init__(self): self.viewer = None #TODO VERIFY THAT THIS CONSTRAINT IS TRUE OR IF WE HAVE A LARGER RANGE TO CHOOSE FROM #Action Ranges self.min_action = np.array([10, 0.6]) self.max_action = np.array([20, 0.95]) self.action_space = spaces.Box( low=self.min_action, high=self.max_action, shape=(2,), dtype=np.float32 ) #State/Observation Varible Range self.low_state = -float('inf') self.high_state = float('inf') self.observation_space = spaces.Box( low=self.low_state, high=self.high_state, shape = (7,), dtype=np.float32 ) #TODO Wrap PSA in a class such as to avoid the following hack #Defult values for params and norm needed to init_state self.params=importlib.import_module('params') dummy_action = get_dummy_action() self.params = self.params.create_param(dummy_action, lambda *args : None) self.norm = create_norm(self.params) def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] def reset(self): state = psa.init(self.params, self.norm) return state def step(self, action, outdir='test'): """ Action is a list of 6 inputs that is in the continous range [-1.0, 1.0] This is constrained so that we can use a hyperbolic tangent output activation for our opolicy network. This is particualarly helpful for stochastic policies (Gaussain) often used in SAC """ #TODO Clean this routine #Translate action (list) to override params dict #Translation is min + ([-1,1] --> [0,1]) * (mx - min) mods = AttrDict() mods.real_cycle_time= self.min_action[0] + (0.5 * (action[0]+1)) * (self.max_action[0] - self.min_action[0]) mods.vent_time_fract= self.min_action[1] + (0.5 * (action[1]+1)) * (self.max_action[1] - self.min_action[1]) #Hardcoded mods.cycles=31 mods.real_vent_time=mods.vent_time_fract*mods.real_cycle_time mods.input_orifice=2.78 mods.vent_orifice=1.56 mods.blowdown_orifice=3.5 #Simulate data, ret, param, pickle_name,out_place= psa.simulate(mods, outdir=outdir, params_file='params', verbose=False) #TODO Verify if we care about prod_y or counter_y? Also [-1] refers to the last timestep but why [1]? We care about the middle cylinder? reward = ret.container_y[-1][1] #New state state = psa.init(self.params, self.norm) return state, reward, True, {} def render(self, mode='human'): #TODO Add a viz pass def close(self): pass class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self def create_norm(param): norm=AttrDict() #The normalization values, from Table 1 in Supplementary Info for the Ref paper norm.P0=param.PH # 300kPa [Pa] norm.T0=293.15 #Room temp = 20C, [K] norm.qs0=param.qBs*1000 #The normalization amount mol/kg norm.xA=norm.qs0 # qs,0 [mol/kg] norm.xB=norm.qs0 # qs,0 [mol/kg] norm.v0=param.norm_v0 # [m/s] norm.z0=param.L # Length of cylinder [m] norm.t0=param.norm_t0 # so t=time/norm.t0 #norm.t0=norm.z0/norm.v0 # so t=time/norm.t0 #norm.alphaA=param.kA*param.L/norm.v0 #norm.alphaB=param.kB*param.L/norm.v0 return norm def get_dummy_action(): mods=AttrDict() mods.cycles=31 mods.input_orifice=2.78 mods.vent_orifice=1.56 mods.blowdown_orifice=3.5 mods.vent_time_fract=0.75 mods.real_cycle_time=15 mods.real_vent_time=mods.vent_time_fract*mods.real_cycle_time return mods
31.402878
144
0.603895
acefbd761e3cc2597b50c17b309514dc09406c67
2,204
py
Python
Reverse_Shell_Controller/Scripts/soundRecorder.py
Ricky-001/Awesome-Python-Scripts
03f168c5ac03c6044d346f0fbb5f877b6c09e284
[ "MIT" ]
1
2020-10-02T09:43:35.000Z
2020-10-02T09:43:35.000Z
Reverse_Shell_Controller/Scripts/soundRecorder.py
Ricky-001/Awesome-Python-Scripts
03f168c5ac03c6044d346f0fbb5f877b6c09e284
[ "MIT" ]
null
null
null
Reverse_Shell_Controller/Scripts/soundRecorder.py
Ricky-001/Awesome-Python-Scripts
03f168c5ac03c6044d346f0fbb5f877b6c09e284
[ "MIT" ]
null
null
null
import pyaudio import wave import os, sys import threading chunk = 1024 Format = pyaudio.paInt16 channel = 2 RATE = 44100 # THIS FUNCTION IS MAINLY RESPONSIBLE FOR RECORDING THE AUDIO # AND WRITING THE DATA IN CHUNKS OF 1024 BYTES AND ADDING THEM # TO THE frames ARRAY WHICH WILL THEN BE WRITTEN INTO THE FILE # MADE BY THE makeFile() FUNCTION THAT CALLS THIS METHOD # THE OPERATIONS IN THIS FUNCTION RUNS AS LONG AS THE stop_thread EVENT # IS NOT SET BY THE PARENT THREAD SCRIPT # ONCE SET, THE WHILE LOOP BREAKS AND THE RECORDED DATA IS THEN RETURNED # TO THE makeFile() FUNCTION TO BE WRITTEN TO THE FILE def record(stop_thread): p = pyaudio.PyAudio() stream = p.open(format=Format, channels=channel, rate=RATE, input=True, frames_per_buffer=chunk) frames = [] try: while not stop_thread.is_set(): data = stream.read(chunk) frames.append(data) except Exception as e: print(str(e)) sample_width = p.get_sample_size(Format) stream.stop_stream() stream.close() p.terminate() return sample_width, frames # THIS FUNCTION TAKES IN THE file_path AND THE STOPPING EVENT AS ARGUMENTS # IT CREATES THE FILE IN file_path ACCORDING TO THE PROPERTIES OF THE RECORDED AUDIO FILE # AND SIMPLY PASSES ON THE EVENT TO THE record() FUNCTION # WHERE IT IS HANDLED def makeFile(file_path, stop_thread): # opens the output file as named in file_path wf = wave.open(file_path, 'wb') wf.setnchannels(channel) # get the audio output generated by the record() function sample_width, frames = record(stop_thread) wf.setsampwidth(sample_width) wf.setframerate(RATE) wf.writeframes(b''.join(frames)) wf.close() # THIS FUNCTION IS CALLED FROM THE MAIN THREAD (reverse_shell SCRIPT) # TAKES IN stop_thread AS AN ARGUMENT # OF TYPE threading.Event # AND PASSES IT ON TO THE makeFile() FUNCTION, # WHICH AGAIN PASSES IT TO THE record() FUNCTION WHICH HANDLES THIS EVENT # WHEN THIS EVENT IS SET, THIS SCRIPT STOPS RUNNING # HANDLED IN THE record() METHOD def startRecording(stop_thread): try: rec_path = os.environ["appdata"] + "\\Record.wav" except: rec_path = os.environ["HOME"] + "/Record.wav" makeFile(rec_path, stop_thread) #startRecording()
25.045455
89
0.744102
acefbe0fb442513c3622226f5d31d14f691d98fb
1,232
py
Python
pwdlearn/__main__.py
ghadd/password_learner
310e5f37661c52d1ba265f2b68d49932c0a299c2
[ "MIT" ]
null
null
null
pwdlearn/__main__.py
ghadd/password_learner
310e5f37661c52d1ba265f2b68d49932c0a299c2
[ "MIT" ]
null
null
null
pwdlearn/__main__.py
ghadd/password_learner
310e5f37661c52d1ba265f2b68d49932c0a299c2
[ "MIT" ]
null
null
null
#!/usr/bin/env python from pwdgen.password import Password from pwdgen import PasswordGenerator as PGen from argparse import ArgumentParser from termcolor import colored from pathlib import Path parser = ArgumentParser(description="Learn passwords!") parser.add_argument("--new", action="store_true", dest="create_new") parser.add_argument("--allowed-chars", dest="allowed_chars", default=None) parser.add_argument( "-e", "--exclude-ambigous", dest="exclude_ambigous_chars", action="store_true" ) parser.add_argument("-l", "--length", dest="length", default=16) if __name__ == "__main__": Path("~/.pwdlearner").mkdir(parents=True, exist_ok=True) args = parser.parse_args() if not args.create_new: try: pwd = Password.load() except IOError: print("Please, specify tag --new to create a new password.") exit(1) else: try: gen = PGen( int(args.length), allowed_chars=args.allowed_chars, exclude_ambigous_chars=args.exclude_ambigous_chars, ) except ValueError as e: print(colored(e, "red")) exit(1) pwd = gen.generate() pwd.learn()
28.651163
82
0.637175
acefc08cef461d22cea6c4b5f12d02959f5a0ff9
3,985
py
Python
sdk/python/kfp/deprecated/dsl/_metadata.py
RonsenbergVI/pipelines
a85dc4f5f1f65f14bd807dec9ab25d8dafb34379
[ "Apache-2.0" ]
1
2022-03-30T05:22:19.000Z
2022-03-30T05:22:19.000Z
sdk/python/kfp/deprecated/dsl/_metadata.py
RonsenbergVI/pipelines
a85dc4f5f1f65f14bd807dec9ab25d8dafb34379
[ "Apache-2.0" ]
1
2020-02-06T12:53:44.000Z
2020-02-06T12:53:44.000Z
sdk/python/kfp/deprecated/dsl/_metadata.py
RonsenbergVI/pipelines
a85dc4f5f1f65f14bd807dec9ab25d8dafb34379
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 The Kubeflow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from .types import BaseType, _check_valid_type_dict from ..components._data_passing import serialize_value from ..components.structures import ComponentSpec, InputSpec, OutputSpec def _annotation_to_typemeta(annotation): """_annotation_to_type_meta converts an annotation to a type structure. Args: annotation(BaseType/str/dict): input/output annotations BaseType: registered in kfp.dsl.types str: either a string of a dict serialization or a string of the type name dict: type name and properties. note that the properties values can be dict. Returns: dict or string representing the type """ if isinstance(annotation, BaseType): arg_type = annotation.to_dict() elif isinstance(annotation, str): arg_type = annotation elif isinstance(annotation, dict): if not _check_valid_type_dict(annotation): raise ValueError('Annotation ' + str(annotation) + ' is not a valid type dictionary.') arg_type = annotation else: return None return arg_type def _extract_pipeline_metadata(func): """Creates pipeline metadata structure instance based on the function signature.""" # Most of this code is only needed for verifying the default values against # "openapi_schema_validator" type properties. # TODO: Move the value verification code to some other place from ._pipeline_param import PipelineParam import inspect fullargspec = inspect.getfullargspec(func) args = fullargspec.args annotations = fullargspec.annotations # defaults arg_defaults = {} if fullargspec.defaults: for arg, default in zip( reversed(fullargspec.args), reversed(fullargspec.defaults)): arg_defaults[arg] = default for arg in args: arg_type = None arg_default = arg_defaults[arg] if arg in arg_defaults else None if isinstance(arg_default, PipelineParam): warnings.warn( 'Explicit creation of `kfp.dsl.PipelineParam`s by the users is ' 'deprecated. The users should define the parameter type and default ' 'values using standard pythonic constructs: ' 'def my_func(a: int = 1, b: str = "default"):') arg_default = arg_default.value if arg in annotations: arg_type = _annotation_to_typemeta(annotations[arg]) arg_type_properties = list(arg_type.values())[0] if isinstance( arg_type, dict) else {} if 'openapi_schema_validator' in arg_type_properties and (arg_default is not None): from jsonschema import validate import json schema_object = arg_type_properties['openapi_schema_validator'] if isinstance(schema_object, str): # In case the property value for the schema validator is a string # instead of a dict. schema_object = json.loads(schema_object) # Only validating non-serialized values validate(instance=arg_default, schema=schema_object) from kfp.deprecated.components._python_op import _extract_component_interface component_spec = _extract_component_interface(func) return component_spec
40.252525
85
0.677792
acefc0a31d7a9774a9ed1fe9dda5f2a3b8f42f61
2,011
py
Python
catalyst/contrib/models/segmentation/fpn.py
162/catalyst
b4ba36be52c51160e0fabecdcb084a8d5cd96cb7
[ "MIT" ]
null
null
null
catalyst/contrib/models/segmentation/fpn.py
162/catalyst
b4ba36be52c51160e0fabecdcb084a8d5cd96cb7
[ "MIT" ]
null
null
null
catalyst/contrib/models/segmentation/fpn.py
162/catalyst
b4ba36be52c51160e0fabecdcb084a8d5cd96cb7
[ "MIT" ]
null
null
null
from typing import Dict from .blocks import EncoderDownsampleBlock from .encoder import UnetEncoder, ResnetEncoder from .bridge import UnetBridge from .decoder import FPNDecoder from .head import FPNHead from .core import UnetSpec, ResnetUnetSpec class FPNUnet(UnetSpec): def _get_components( self, encoder: UnetEncoder, num_classes: int, bridge_params: Dict, decoder_params: Dict, head_params: Dict, ): bridge = UnetBridge( in_channels=encoder.out_channels, in_strides=encoder.out_strides, out_channels=encoder.out_channels[-1] * 2, block_fn=EncoderDownsampleBlock, **bridge_params ) decoder = FPNDecoder( in_channels=bridge.out_channels, in_strides=bridge.out_strides, **decoder_params ) head = FPNHead( in_channels=decoder.out_channels, in_strides=decoder.out_strides, out_channels=num_classes, upsample_scale=decoder.out_strides[-1], interpolation_mode="bilinear", align_corners=True, **head_params ) return encoder, bridge, decoder, head class ResnetFPNUnet(ResnetUnetSpec): def _get_components( self, encoder: ResnetEncoder, num_classes: int, bridge_params: Dict, decoder_params: Dict, head_params: Dict, ): bridge = None decoder = FPNDecoder( in_channels=encoder.out_channels, in_strides=encoder.out_strides, **decoder_params ) head = FPNHead( in_channels=decoder.out_channels, in_strides=decoder.out_strides, out_channels=num_classes, upsample_scale=decoder.out_strides[-1], interpolation_mode="bilinear", align_corners=True, **head_params ) return encoder, bridge, decoder, head
28.323944
54
0.60915
acefc15164ad37efd7e257f232c5131d06890214
1,952
py
Python
tests/test_registry_local.py
cincanproject/cincan-registry
ec606acaf7920ce808398dbd0286a8ca8ea49928
[ "MIT" ]
null
null
null
tests/test_registry_local.py
cincanproject/cincan-registry
ec606acaf7920ce808398dbd0286a8ca8ea49928
[ "MIT" ]
null
null
null
tests/test_registry_local.py
cincanproject/cincan-registry
ec606acaf7920ce808398dbd0286a8ca8ea49928
[ "MIT" ]
null
null
null
import docker import requests from cincanregistry.daemon import DaemonRegistry from cincanregistry.utils import parse_file_time from unittest import mock from .fake_instances import FAKE_IMAGE_ATTRS, TEST_REPOSITORY, FAKE_IMAGE, FAKE_IMAGE2, FAKE_IMAGE3 def test_get_version_by_image_id(mocker, config): mock_image = mock.Mock(spec=docker.models.images.Image) mock_image.attrs = FAKE_IMAGE_ATTRS reg = DaemonRegistry(configuration=config) reg.client = mock.Mock() mocker.patch.object( reg.client, "ping", return_value=True, autospec=True, ) assert reg._is_docker_running() mocker.patch.object( reg.client.images, "get", return_value=mock_image, autospec=docker.models.images.Image, create=True, spec_set=True, ) assert reg.get_version_by_image_id("test_id") == "1.0" def test_create_local_tool_info_by_name(mocker, config): reg = DaemonRegistry(configuration=config) reg.client = mock.Mock() mocker.patch.object( reg.client, "ping", return_value=False, side_effect=requests.exceptions.ConnectionError(), ) assert not reg.create_local_tool_info_by_name(TEST_REPOSITORY) mocker.patch.object( reg.client, "ping", return_value=True, ) mocker.patch.object(reg.client.images, "list", return_value=[FAKE_IMAGE, FAKE_IMAGE2, FAKE_IMAGE3], create=True) tool_info = reg.create_local_tool_info_by_name(TEST_REPOSITORY) assert tool_info.name == TEST_REPOSITORY assert len(tool_info.versions) == 2 assert tool_info.versions[0].version == "1.0" assert tool_info.versions[0].tags == {"cincan/test:latest"} assert tool_info.versions[0].size == "5.59 MB" assert tool_info.versions[0].updated == parse_file_time("2020-05-23T19:43:14.106177342Z") mocker.patch.object(reg.client.images, "list", return_value=[], create=True) assert not reg.create_local_tool_info_by_name(TEST_REPOSITORY)
39.836735
116
0.733607
acefc1c4658b9761d407e2287e7e2b1e1acf3b2a
1,832
py
Python
src/api/store/tag.py
massenergize/api
0df3368cb763e9160229f48138b7706a9d0569aa
[ "MIT" ]
2
2020-07-24T12:58:17.000Z
2020-12-17T02:26:13.000Z
src/api/store/tag.py
massenergize/api
0df3368cb763e9160229f48138b7706a9d0569aa
[ "MIT" ]
214
2019-06-26T17:33:54.000Z
2022-03-26T00:02:34.000Z
src/api/store/tag.py
massenergize/api
0df3368cb763e9160229f48138b7706a9d0569aa
[ "MIT" ]
6
2020-03-13T20:29:06.000Z
2021-08-20T16:15:08.000Z
from database.models import Tag, UserProfile from _main_.utils.massenergize_errors import MassEnergizeAPIError, InvalidResourceError, ServerError, CustomMassenergizeError from _main_.utils.massenergize_response import MassenergizeResponse from _main_.utils.context import Context from sentry_sdk import capture_message from typing import Tuple class TagStore: def __init__(self): self.name = "Tag Store/DB" def get_tag_info(self, tag_id) -> Tuple[dict, MassEnergizeAPIError]: tag = Tag.objects.filter(id=tag_id) if not tag: return None, InvalidResourceError() return tag, None def list_tags(self, community_id) -> Tuple[list, MassEnergizeAPIError]: tags = Tag.objects.filter(community__id=community_id) if not tags: return [], None return tags, None def create_tag(self, args) -> Tuple[dict, MassEnergizeAPIError]: try: new_tag = Tag.objects.objects.create(**args) new_tag.save() return new_tag, None except Exception: return None, ServerError() def update_tag(self, tag_id, args) -> Tuple[dict, MassEnergizeAPIError]: tag = Tag.objects.filter(id=tag_id) if not tag: return None, InvalidResourceError() tag.update(**args) return tag, None def delete_tag(self, tag_id) -> Tuple[dict, MassEnergizeAPIError]: tags = Tag.objects.filter(id=tag_id) if not tags: return None, InvalidResourceError() tags.delete() return tags.first() def list_tags_for_community_admin(self, community_id) -> Tuple[list, MassEnergizeAPIError]: return self.list_tags_for_super_admin() def list_tags_for_super_admin(self): try: tags = Tag.objects.all() return tags, None except Exception as e: capture_message(str(e), level="error") return None, CustomMassenergizeError(str(e))
29.548387
125
0.721616
acefc1d215b2843310e885b4b9027d62ae05a73f
4,466
py
Python
moe/tests/optimal_learning/python/cpp_wrappers/expected_improvement_test.py
dstoeckel/MOE
5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c
[ "Apache-2.0" ]
966
2015-01-10T05:27:30.000Z
2022-03-26T21:04:36.000Z
moe/tests/optimal_learning/python/cpp_wrappers/expected_improvement_test.py
dstoeckel/MOE
5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c
[ "Apache-2.0" ]
46
2015-01-16T22:33:08.000Z
2019-09-04T16:33:27.000Z
moe/tests/optimal_learning/python/cpp_wrappers/expected_improvement_test.py
dstoeckel/MOE
5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c
[ "Apache-2.0" ]
143
2015-01-07T03:57:19.000Z
2022-02-28T01:10:45.000Z
# -*- coding: utf-8 -*- """Test the C++ implementation of expected improvement against the Python implementation.""" import numpy import pytest import moe.optimal_learning.python.cpp_wrappers.covariance import moe.optimal_learning.python.cpp_wrappers.expected_improvement import moe.optimal_learning.python.cpp_wrappers.gaussian_process from moe.optimal_learning.python.geometry_utils import ClosedInterval import moe.optimal_learning.python.python_version.covariance import moe.optimal_learning.python.python_version.domain import moe.optimal_learning.python.python_version.expected_improvement import moe.optimal_learning.python.python_version.gaussian_process from moe.tests.optimal_learning.python.gaussian_process_test_case import GaussianProcessTestCase, GaussianProcessTestEnvironmentInput class TestExpectedImprovement(GaussianProcessTestCase): """Test C++ vs Python implementations of Expected Improvement. Currently only checks that the 1D, analytic EI & gradient match. Checking monte carlo would be very expensive (b/c of the need to converge the MC) or very difficult (to make python & C++ use the exact same sequence of random numbers). """ precompute_gaussian_process_data = True noise_variance_base = 0.0002 dim = 3 num_hyperparameters = dim + 1 gp_test_environment_input = GaussianProcessTestEnvironmentInput( dim, num_hyperparameters, 0, noise_variance_base=noise_variance_base, hyperparameter_interval=ClosedInterval(0.1, 0.3), lower_bound_interval=ClosedInterval(-1.0, 0.5), upper_bound_interval=ClosedInterval(2.0, 3.5), covariance_class=moe.optimal_learning.python.python_version.covariance.SquareExponential, spatial_domain_class=moe.optimal_learning.python.python_version.domain.TensorProductDomain, hyperparameter_domain_class=moe.optimal_learning.python.python_version.domain.TensorProductDomain, gaussian_process_class=moe.optimal_learning.python.python_version.gaussian_process.GaussianProcess, ) num_sampled_list = (1, 2, 5, 10, 16, 20, 42, 50) @classmethod @pytest.fixture(autouse=True, scope='class') def base_setup(cls): """Run the standard setup but seed the RNG first (for repeatability). It is easy to stumble into test cases where EI is very small (e.g., < 1.e-20), which makes it difficult to set meaningful tolerances for the checks. """ numpy.random.seed(8794) super(TestExpectedImprovement, cls).base_setup() def test_python_and_cpp_return_same_1d_analytic_ei_and_gradient(self): """Compare the 1D analytic EI/grad EI results from Python & C++, checking several random points per test case.""" num_tests_per_case = 10 ei_tolerance = 6.0e-14 # TODO(GH-240): set RNG seed for this case and restore toleranace to 6.0e-14 or better grad_ei_tolerance = 6.0e-13 for test_case in self.gp_test_environments: domain, python_gp = test_case python_cov, historical_data = python_gp.get_core_data_copy() points_to_sample = domain.generate_random_point_in_domain() python_ei_eval = moe.optimal_learning.python.python_version.expected_improvement.ExpectedImprovement(python_gp, points_to_sample) cpp_cov = moe.optimal_learning.python.cpp_wrappers.covariance.SquareExponential(python_cov.hyperparameters) cpp_gp = moe.optimal_learning.python.cpp_wrappers.gaussian_process.GaussianProcess(cpp_cov, historical_data) cpp_ei_eval = moe.optimal_learning.python.cpp_wrappers.expected_improvement.ExpectedImprovement(cpp_gp, points_to_sample) for _ in xrange(num_tests_per_case): points_to_sample = domain.generate_random_point_in_domain() cpp_ei_eval.current_point = points_to_sample python_ei_eval.current_point = points_to_sample cpp_ei = cpp_ei_eval.compute_expected_improvement() python_ei = python_ei_eval.compute_expected_improvement(force_1d_ei=True) self.assert_scalar_within_relative(python_ei, cpp_ei, ei_tolerance) cpp_grad_ei = cpp_ei_eval.compute_grad_expected_improvement() python_grad_ei = python_ei_eval.compute_grad_expected_improvement() self.assert_vector_within_relative(python_grad_ei, cpp_grad_ei, grad_ei_tolerance)
48.543478
141
0.751679
acefc20d0e6335f45cb7b8ba7bccb8250cd3fa93
426
py
Python
excel_sheet_column_number.py
tusharsadhwani/leetcode
a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8
[ "MIT" ]
6
2021-05-21T01:10:42.000Z
2021-12-16T16:12:30.000Z
excel_sheet_column_number.py
tusharsadhwani/leetcode
a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8
[ "MIT" ]
null
null
null
excel_sheet_column_number.py
tusharsadhwani/leetcode
a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8
[ "MIT" ]
null
null
null
import string class Solution: def titleToNumber(self, columnTitle: str) -> int: total = 0 for char in columnTitle: total = total * 26 + (1 + string.ascii_uppercase.index(char)) return total tests = [ ( ('A',), 1, ), ( ('AB',), 28, ), ( ('ZY',), 701, ), ( ('FXSHRXW',), 2147483647, ), ]
14.2
73
0.410798
acefc2dc874459ff95cc41ab2c241245b695eadd
3,755
py
Python
tests/vegeta_stress.py
wintersteiger/CCF
27febea56ff6923fbc02231de06e432b4f6b2511
[ "Apache-2.0" ]
null
null
null
tests/vegeta_stress.py
wintersteiger/CCF
27febea56ff6923fbc02231de06e432b4f6b2511
[ "Apache-2.0" ]
47
2021-11-15T06:19:01.000Z
2022-03-30T06:24:22.000Z
tests/vegeta_stress.py
beejones/CCF
335fc3613c2dd4a3bda38e10e8e8196dba52465e
[ "Apache-2.0" ]
1
2021-11-08T09:33:34.000Z
2021-11-08T09:33:34.000Z
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache 2.0 License. import infra.network import infra.e2e_args import subprocess import threading import time import generate_vegeta_targets as TargetGenerator from loguru import logger as LOG VEGETA_BIN = "/opt/vegeta/vegeta" def print_memory_stats(node, shutdown_event): with node.client() as c: while not shutdown_event.is_set(): r = c.get("/node/memory") LOG.warning(r.body.json()) time.sleep(10) def run(args, additional_attack_args): # Test that vegeta is available subprocess.run([VEGETA_BIN, "-version"], capture_output=True, check=True) with infra.network.network( args.nodes, args.binary_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb, ) as network: network.start_and_join(args) primary, _ = network.find_primary() primary_hostname = f"{primary.pubhost}:{primary.pubport}" vegeta_targets = "vegeta_targets" with open(vegeta_targets, "w", encoding="utf-8") as f: for i in range(10): TargetGenerator.write_vegeta_target_line( f, primary_hostname, "/app/log/private", body={"id": i, "msg": f"Private message: {i}"}, ) for i in range(10): TargetGenerator.write_vegeta_target_line( f, primary_hostname, f"/app/log/private?id={i}", method="GET" ) for i in range(10): TargetGenerator.write_vegeta_target_line( f, primary_hostname, "/app/log/public", body={"id": i, "msg": f"Public message: {i}"}, ) for i in range(10): TargetGenerator.write_vegeta_target_line( f, primary_hostname, f"/app/log/public?id={i}", method="GET" ) attack_cmd = [VEGETA_BIN, "attack"] attack_cmd += ["--targets", vegeta_targets] attack_cmd += ["--format", "json"] attack_cmd += ["--duration", "10s"] sa = primary.session_auth("user0") attack_cmd += ["--cert", sa["session_auth"].cert] attack_cmd += ["--key", sa["session_auth"].key] attack_cmd += ["--root-certs", primary.session_ca(False)["ca"]] attack_cmd += additional_attack_args attack_cmd_s = " ".join(attack_cmd) LOG.warning(f"Starting: {attack_cmd_s}") vegeta_run = subprocess.Popen(attack_cmd, stdout=subprocess.PIPE) tee_split = subprocess.Popen( ["tee", "vegeta_results.bin"], stdin=vegeta_run.stdout, stdout=subprocess.PIPE, ) report_cmd = [VEGETA_BIN, "report", "--every", "5s"] vegeta_report = subprocess.Popen(report_cmd, stdin=tee_split.stdout) # Start a second thread which will print the primary's memory stats at regular intervals shutdown_event = threading.Event() memory_thread = threading.Thread( target=print_memory_stats, args=(primary, shutdown_event) ) memory_thread.start() LOG.info("Waiting for completion...") vegeta_report.communicate() LOG.info("Shutting down...") shutdown_event.set() memory_thread.join() LOG.success("Done!") if __name__ == "__main__": def add(parser): pass args, unknown_args = infra.e2e_args.cli_args(add=add, accept_unknown=True) args.package = "samples/apps/logging/liblogging" args.nodes = infra.e2e_args.min_nodes(args, f=1) run(args, unknown_args)
32.652174
96
0.588282
acefc3f2e20e04b4366383dc6c1e95a4ce885cb3
3,991
py
Python
open_discussions/envs_test.py
mitodl/open-discussions
ab6e9fac70b8a1222a84e78ba778a7a065c20541
[ "BSD-3-Clause" ]
12
2017-09-27T21:23:27.000Z
2020-12-25T04:31:30.000Z
open_discussions/envs_test.py
mitodl/open-discussions
ab6e9fac70b8a1222a84e78ba778a7a065c20541
[ "BSD-3-Clause" ]
3,293
2017-06-30T18:16:01.000Z
2022-03-31T18:01:34.000Z
open_discussions/envs_test.py
mitodl/open-discussions
ab6e9fac70b8a1222a84e78ba778a7a065c20541
[ "BSD-3-Clause" ]
1
2020-04-13T12:19:57.000Z
2020-04-13T12:19:57.000Z
"""Tests for environment variable parsing functions""" from unittest.mock import patch import pytest from open_discussions.envs import ( EnvironmentVariableParseException, get_any, get_bool, get_int, get_string, get_list_of_str, ) FAKE_ENVIRONS = { "true": "True", "false": "False", "positive": "123", "negative": "-456", "zero": "0", "float": "1.1", "expression": "123-456", "none": "None", "string": "a b c d e f g", "list_of_int": "[3,4,5]", "list_of_str": '["x", "y", \'z\']', } def test_get_any(): """ get_any should parse an environment variable into a bool, int, or a string """ expected = { "true": True, "false": False, "positive": 123, "negative": -456, "zero": 0, "float": "1.1", "expression": "123-456", "none": "None", "string": "a b c d e f g", "list_of_int": "[3,4,5]", "list_of_str": '["x", "y", \'z\']', } with patch("open_discussions.envs.os", environ=FAKE_ENVIRONS): for key, value in expected.items(): assert get_any(key, "default") == value assert get_any("missing", "default") == "default" def test_get_string(): """ get_string should get the string from the environment variable """ with patch("open_discussions.envs.os", environ=FAKE_ENVIRONS): for key, value in FAKE_ENVIRONS.items(): assert get_string(key, "default") == value assert get_string("missing", "default") == "default" assert get_string("missing", "default") == "default" def test_get_int(): """ get_int should get the int from the environment variable, or raise an exception if it's not parseable as an int """ with patch("open_discussions.envs.os", environ=FAKE_ENVIRONS): assert get_int("positive", 1234) == 123 assert get_int("negative", 1234) == -456 assert get_int("zero", 1234) == 0 for key, value in FAKE_ENVIRONS.items(): if key not in ("positive", "negative", "zero"): with pytest.raises(EnvironmentVariableParseException) as ex: get_int(key, 1234) assert ex.value.args[ 0 ] == "Expected value in {key}={value} to be an int".format( key=key, value=value ) assert get_int("missing", "default") == "default" def test_get_bool(): """ get_bool should get the bool from the environment variable, or raise an exception if it's not parseable as a bool """ with patch("open_discussions.envs.os", environ=FAKE_ENVIRONS): assert get_bool("true", 1234) is True assert get_bool("false", 1234) is False for key, value in FAKE_ENVIRONS.items(): if key not in ("true", "false"): with pytest.raises(EnvironmentVariableParseException) as ex: get_bool(key, 1234) assert ex.value.args[ 0 ] == "Expected value in {key}={value} to be a boolean".format( key=key, value=value ) assert get_int("missing", "default") == "default" def test_get_list_of_str(): """ get_list_of_str should parse a list of strings """ with patch("open_discussions.envs.os", environ=FAKE_ENVIRONS): assert get_list_of_str("list_of_str", ["noth", "ing"]) == ["x", "y", "z"] for key, value in FAKE_ENVIRONS.items(): if key != "list_of_str": with pytest.raises(EnvironmentVariableParseException) as ex: get_list_of_str(key, ["noth", "ing"]) assert ex.value.args[ 0 ] == "Expected value in {key}={value} to be a list of str".format( key=key, value=value ) assert get_list_of_str("missing", "default") == "default"
31.674603
117
0.559258
acefc402cd2fa36f565106c0f4e1f060a095488e
3,971
py
Python
web_project/Board/views.py
nosy0411/Object_Oriented_Programming
e6713b5131c125ac50814d375057f06da43e958e
[ "MIT" ]
null
null
null
web_project/Board/views.py
nosy0411/Object_Oriented_Programming
e6713b5131c125ac50814d375057f06da43e958e
[ "MIT" ]
null
null
null
web_project/Board/views.py
nosy0411/Object_Oriented_Programming
e6713b5131c125ac50814d375057f06da43e958e
[ "MIT" ]
null
null
null
from django.shortcuts import render, get_object_or_404, redirect from .models import Post from .forms import PostForm from django.utils import timezone # 장고 기본 유저 정보를 가져옴 from django.contrib.auth import get_user_model from django.contrib.auth.decorators import login_required # Create your views here. @login_required def board(request, pg): user = request.user talkable = True if user.handle.skku: if user.handle.line_t.all().filter(alive=True): talkable = False else: if user.handle.line_s.all().filter(alive=True): talkable = False keyw = request.GET.get('keyword', '') posts = Post.objects.all().order_by('date').reverse().filter(content__contains=keyw) page_count = int(posts.count()/10)+1 po = [] onl = timezone.now().today() pag = 0 if keyw == '': # 키워드 검색 시, 페이지 분리 없음 for p in posts: p.page = int(pag / 10) + 1 p.save() pag += 1 posts = posts.filter(page=pg) else: page_count = 1 for p in posts: p.page = 1 p.save() for p in posts: if p.date.date().year == onl.year and p.date.date().month == onl.month and p.date.date().day == onl.day: po.append([(p.date.strftime('%H:%M')), p]) else: po.append([(p.date.strftime('%y-%m-%d')), p]) return render(request, 'Board/post_list.html', {'po': po, 'user': user, 'talkable': talkable,'allpage': range(1, page_count+1)}) @login_required def new_post(request): user = request.user talkable = True if user.handle.skku: if user.handle.line_t.all().filter(alive=True): talkable = False else: if user.handle.line_s.all().filter(alive=True): talkable = False if request.method == "POST": form = PostForm(request.POST) if form.is_valid(): post = form.save(commit=False) post.author = request.user post.date = timezone.now() post.state = "대기" post.save() return redirect('br', pg=1) else: form = PostForm() return render(request, 'Board/post_edit.html', {'form': form, 'user': user, 'talkable': talkable}) @login_required def my_posts(request): user = request.user talkable = True if user.handle.skku: if user.handle.line_t.all().filter(alive=True): talkable = False else: if user.handle.line_s.all().filter(alive=True): talkable = False posts = Post.objects.all().order_by('date').reverse().filter(author=user) po = [] onl = timezone.now().today() for p in posts: if p.date.date().year == onl.year and p.date.date().month == onl.month and p.date.date().day == onl.day: po.append([(p.date.strftime('%H:%M')), p]) else: po.append([(p.date.strftime('%y-%m-%d')), p]) return render(request, 'Board/my_post_list.html', {'po': po, 'user': user, 'talkable': talkable}) @login_required def edit_post(request, pk): user = request.user talkable = True if user.handle.skku: if user.handle.line_t.all().filter(alive=True): talkable = False else: if user.handle.line_s.all().filter(alive=True): talkable = False post = get_object_or_404(Post, pk=pk) if request.method == "POST": form = PostForm(request.POST, instance=post) if form.is_valid(): post = form.save(commit=False) post.author = request.user post.date = timezone.now() post.save() return redirect('my_post') else: form = PostForm(instance=post) return render(request, 'Board/post_edit.html', {'form': form, 'user': user, 'talkable': talkable}) @login_required def del_post(request, pk): post = get_object_or_404(Post, pk=pk) post.delete() return redirect('my_post') # 삭제하기전에 html에서 물어보는 것 추가해야 함
34.232759
132
0.592546
acefc46bc7fe59577992541dc4262104deabb9d1
30,024
py
Python
DS_ETL/bin/py3/Standardization_20210112.py
joonwoo8395/joonwoo
b84a7881a87424bd1557bf83c7a71da295b83ecb
[ "MIT" ]
null
null
null
DS_ETL/bin/py3/Standardization_20210112.py
joonwoo8395/joonwoo
b84a7881a87424bd1557bf83c7a71da295b83ecb
[ "MIT" ]
null
null
null
DS_ETL/bin/py3/Standardization_20210112.py
joonwoo8395/joonwoo
b84a7881a87424bd1557bf83c7a71da295b83ecb
[ "MIT" ]
null
null
null
#!/bin/env python3 # -*- coding: utf-8 -*- #- needed import ---------------------------------------------------- #import $PYTHON_LIB$ import os import sys import time import datetime import signal import subprocess import glob import configparser as ConfigParser import csv import copy import json #import $MOBIGEN_LIB$ import Mobigen.Common.Log_PY3 as Log; Log.Init() import Mobigen.Utils.FileProgress_py3 as FileProgress import Mobigen.Utils.LogClient as SendLog #import $PROJECT_LIB$ #- shutdown ---------------------------------------------------- SHUTDOWN = False def shutdown(signalnum, handler): global SHUTDOWN SHUTDOWN = True sys.stderr.write('Catch Signal: %s \n' % signalnum) sys.stderr.flush() signal.signal(signal.SIGTERM, shutdown) # sigNum 15 : Terminate signal.signal(signal.SIGINT, shutdown) # sigNum 2 : Keyboard Interrupt signal.signal(signal.SIGHUP, shutdown) # sigNum 1 : Hangup detected signal.signal(signal.SIGPIPE, shutdown) # sigNum 13 : Broken Pipe ''' On Windows, signal() can only be called with SIGABRT, SIGFPE,SIGILL, SIGINT, SIGSEGV, or SIGTERM. A ValueError will be raised in any other case. ''' #- def global setting ---------------------------------------------------- def stderr(msg) : sys.stderr.write(msg + '\n') sys.stderr.flush() __LOG__.Trace('Std ERR : %s' % msg) def stdout(msg) : sys.stdout.write(msg + '\n') sys.stdout.flush() __LOG__.Trace('Std OUT : %s' % msg) def makedirs(path) : try : os.makedirs(path) __LOG__.Trace( path ) except : pass #- Class ---------------------------------------------------- class Standardization: def __init__(self, conf, debug_mode, log_arg) : section = 'STANDARD' self.debug_mode = debug_mode self.log_arg = log_arg self.center_id = conf.get('GENERAL', 'CENTER_ID') self.center_name = conf.get('GENERAL', 'CENTER_NAME') self.ref_path = conf.get(section, 'REF_PATH') self.ref_column_name = conf.get(section, 'REF_COLUMN_NAME') self.ref_code_name = conf.get(section, 'REF_CODE_NAME') #self.ref_hdong_name = conf.get(section, 'REF_HDONG_NAME') #self.ref_hbdong_name = conf.get(section, 'REF_HBDONG_NAME') self.ref_sep = conf.get(section, 'REF_SEP') self.date_column_name_list = conf.get(section, 'DATE_COLUMN_NAME').split(';') self.date_column_length_list = conf.get(section, 'DATE_COLUMN_LENGTH').split(';') if len(self.date_column_name_list) != len(self.date_column_length_list) : raise Exception('Please check configure [DATE_COLUMN_NAME|DATE_COLUMN_LENGTH]') self.json_path = conf.get(section, 'JSON_PATH') self.leave_error_log = conf.getint(section, 'LEAVE_ERROR_LOG') try: self.csv_read_sep = conf.get(section, 'CSV_READ_SEP') except: self.csv_read_sep = ',' self.colum_file_ctime = None self.code_file_ctime = None #self.hdong_file_ctime = None #self.hbdong_file_ctime = None self.get_colum_dict() self.get_code_dict() self.SendLogInit() def __del__(self): pass def SendLogInit(self) : self.center_name = self.center_name self.center_id = self.center_id self.start_time = '' self.std_in = '' self.in_file_size = '' self.in_file_row_cnt = '' self.result_flag = '' self.success_cnt = '' self.fail_reason = '' self.header_cnt = '' self.comp_row_cnt = '' self.error_column_length = '' self.error_check_notnull = '' self.error_check_type_legth = '' self.error_check_format = '' self.error_change_cont = '' self.product_cd = '' self.product_nm = '' self.dataset_cd = '' self.ps_duration = '' self.min_period = '' self.max_period = '' self.fail_type = '' # def dump_dict(self, dicts, path): # # try: # with open(path, 'w') as fd: # json.dump(dicts, fd) # # __LOG__.Trace("dump_dict : %s " % (path)) # # except: # __LOG__.Exception() # def get_lastest_file(self, glob_name): list_of_files = glob.glob(glob_name) #latest_file = max(list_of_files, key=os.path.getctime) latest_file = max(list_of_files) #latest_file_ctime = os.path.getctime(latest_file) latest_file_ctime = os.path.basename(latest_file).rsplit('_',1)[-1].split('.')[0] return [ latest_file, latest_file_ctime ] def get_colum_dict(self): file_name, file_ctime = self.get_lastest_file( os.path.join(self.ref_path, '%s*.dat' % self.ref_column_name) ) if self.colum_file_ctime == None or file_ctime > self.colum_file_ctime: __LOG__.Trace("!! NEW === COLUMN REF FILE CHANGED [ %s ]" % file_name) self.colum_file_ctime = file_ctime self.dataset_dict = {} self.table_dict = {} self.colum_dict = {} self.table_header_dict = {} self.new_table_header_dict = {} self.column_func_dict = {} line_count = 0 with open(file_name) as fd: for line in fd: line_list = line.strip().split( self.ref_sep ) if line_count == 0 : header_list = line_list line_count += 1 else: #CNTR_NM^CNTR_ID^PRD_CD^PRD_NM^PREV_TB_NM^DTST_CD^NW_TB_NM^IDX^PREV_CLMN_NM^KOR_CLMN_NM^NW_CLMN_NM^DMN_LRG_CTGRY^DMN_MDDL_CTGRY^ #KOR_DMN_NM^TYPE_LNTH^DATA_FRMT^NOTNULL_YN^MINM_VL^MAX_VL^XCPTN_VL^CLMN_FUNC^DATA_CONT_FUNC^RMKS^CREATE_TIME^UPDATE_TIME if line_list[header_list.index('CNTR_ID')].upper().strip() == self.center_id.upper().strip(): #try: self.dataset_dict.setdefault(line_list[header_list.index('DTST_CD')].upper().strip(), \ [ line_list[header_list.index('PRD_CD')].upper().strip(), line_list[header_list.index('PRD_NM')].strip()] ) self.table_dict.setdefault(line_list[header_list.index('DTST_CD')].upper().strip(), \ [ line_list[header_list.index('PRD_CD')].upper().strip(), line_list[header_list.index('NW_TB_NM')].upper().strip() ] ) self.colum_dict.setdefault(line_list[header_list.index('DTST_CD')].upper().strip(), {}).setdefault( line_list[header_list.index('PREV_CLMN_NM')].upper().strip(), \ [ line_list[header_list.index('TYPE_LNTH')].upper().strip(), \ line_list[header_list.index('NOTNULL_YN')].upper().strip(), \ line_list[header_list.index('DATA_FRMT')].strip(), \ line_list[header_list.index('DATA_CONT_FUNC')].strip(), \ line_list[header_list.index('MINM_VL')].strip(), \ line_list[header_list.index('MAX_VL')].strip(), \ line_list[header_list.index('XCPTN_VL')].strip() \ ]) self.table_header_dict.setdefault(line_list[header_list.index('DTST_CD')].upper().strip(), []).append(\ [int(line_list[header_list.index('IDX')].strip()), line_list[header_list.index('PREV_CLMN_NM')].upper().strip()]) self.new_table_header_dict.setdefault(line_list[header_list.index('DTST_CD')].upper().strip(), []).append(\ [int(line_list[header_list.index('IDX')].strip()), line_list[header_list.index('NW_CLMN_NM')].upper().strip()]) self.column_func_dict.setdefault(line_list[header_list.index('DTST_CD')].upper().strip(), []).append(\ [int(line_list[header_list.index('IDX')].strip()), line_list[header_list.index('CLMN_FUNC')].strip()]) #except: # __LOG__.Exception() # __LOG__.Trace(line) #__LOG__.Watch(self.table_dict) #self.dump_dict( self.table_dict, os.path.join( self.json_path, 'sd_table_dict%s.json' % self.log_arg)) #__LOG__.Watch(self.colum_dict) #self.dump_dict( self.colum_dict, os.path.join( self.json_path, 'sd_colum_dict%s.json' % self.log_arg)) for keys in self.table_header_dict: self.table_header_dict[keys] = [ x[1] for x in sorted(self.table_header_dict[keys], key=lambda x:x[0]) ] #__LOG__.Watch(self.table_header_dict) #self.dump_dict( self.table_header_dict, os.path.join( self.json_path, 'sd_table_header_dict%s.json' % self.log_arg)) for keys in self.new_table_header_dict: self.new_table_header_dict[keys] = [ x[1] for x in sorted(self.new_table_header_dict[keys], key=lambda x:x[0]) ] #__LOG__.Watch(self.new_table_header_dict) #self.dump_dict( self.new_table_header_dict, os.path.join( self.json_path, 'sd_new_table_header_dict%s.json' % self.log_arg)) for keys in self.column_func_dict: self.column_func_dict[keys] = [ x[1] for x in sorted(self.column_func_dict[keys], key=lambda x:x[0]) ] #__LOG__.Watch(self.column_func_dict) #self.dump_dict( self.column_func_dict, os.path.join( self.json_path, 'sd_column_func_dict%s.json' % self.log_arg)) def get_code_dict(self): afile_name, afile_ctime = self.get_lastest_file( os.path.join(self.ref_path, '%s*.dat' % self.ref_code_name) ) if self.code_file_ctime == None or afile_ctime > self.code_file_ctime: self.code_dict = {} __LOG__.Trace("!!! NEW === CODE REF FILE CHANGED [ %s ]" % afile_name) self.code_file_ctime = afile_ctime line_count = 0 with open(afile_name) as fd: for line in fd: line_list = line.strip().split( self.ref_sep ) if line_count == 0 : header_list = line_list line_count += 1 else: #CNTR_ID^CNTR_NM^CD_NM^CD_VL^CD_KOR_MNNG^CD_ENG_MNNG^CD_EXPLN^CREATE_TIME^UPDATE_TIME if line_list[header_list.index('CNTR_ID')].upper() == self.center_id.upper() or line_list[header_list.index('CNTR_ID')].upper() == 'CM' : self.code_dict.setdefault(line_list[header_list.index('CNTR_ID')].upper(), {}).setdefault(line_list[header_list.index('CD_NM')], {}).setdefault(\ line_list[header_list.index('CD_VL')], line_list[header_list.index('CD_KOR_MNNG')] ) #self.dump_dict( self.code_dict, os.path.join( self.json_path, 'sd_code_dict%s.json' % self.log_arg)) def check_type_legth( self, in_data, ty_len): result = False try: types, length = ty_len.split('|') # 글자일때 if types.lower() == 'text': if len(in_data) <= int(length) : result = True # 정수일때 elif types.lower() == 'integer': if in_data.startswith('-'): in_data = in_data[1:] if type(eval(in_data)).__name__ == 'int' and len(in_data) <= int(length) : result = True # 소수점 있을때 elif types.lower() == 'real' : if in_data.startswith('-'): in_data = in_data[1:] tot_len, decimal_place = length.split(',') if type(eval(in_data)).__name__ == 'float' and len(in_data.split('.')[0]) <= int(tot_len) - int(decimal_place) and len(in_data.split('.')[1]) <= int(decimal_place) : result = True #else: # raise Exception('TYPES ERROR : types must in [integer/text/real] : %s' % types) except SyntaxError: pass #except: # __LOG__.Exception() #if not result : __LOG__.Watch([in_data, ty_len]) return result def check_notnull( self, in_data, notnull ): result = False #try: if notnull.lower() in ( 'not null', 'notnull', 'y' ): if len(in_data) > 0 : result = True elif notnull.lower() in ( '', 'n' ): result = True #except: #__LOG__.Exception() #if not result : __LOG__.Watch([in_data, notnull]) return result def check_format( self, in_data, formats ): result = False if formats.lower() == 'yyyymm': try: datetime.datetime.strptime(in_data,'%Y%m') result = True except: pass #__LOG__.Exception() elif formats.lower() == 'yyyymmdd': try: datetime.datetime.strptime(in_data,'%Y%m%d') result = True except: pass #__LOG__.Exception() elif formats.lower() == 'yyyymmddhhmm': try: datetime.datetime.strptime(in_data,'%Y%m%d%H%M') result = True except: pass #__LOG__.Exception() elif formats.lower() == 'yyyymmddhhmmss': try: datetime.datetime.strptime(in_data,'%Y%m%d%H%M%S') result = True except: pass #__LOG__.Exception() elif formats.lower() == 'yyyymmddhhmmss.ffffff': try: datetime.datetime.strptime(in_data,'%Y%m%d%H%M%S.%f') result = True except: pass #__LOG__.Exception() elif formats.lower() == 'hhmm': try: datetime.datetime.strptime(in_data,'%H%M') result = True except: pass #__LOG__.Exception() elif formats.lower().startswith('in_'): key_list = formats.lower().split('in_')[1].split(',') if in_data.lower() not in key_list : result = False else: result = True elif formats.lower().startswith('code_'): key = formats.split('code_')[1] try: if in_data in self.code_dict['CM'][key].keys(): result = True except: try: if 'NOT CHECK' in self.code_dict[self.center_id][key].keys() : result = True elif in_data in self.code_dict[self.center_id][key].keys() : result = True except: pass #__LOG__.Exception() #if not result : __LOG__.Watch([in_data, formats]) return result def change_data_cont( self, in_data, change_rule): #표준화 CM으로 치환 def change_to_cmcode(in_data, cm_name): #self.code_dict <'dict> = {'KC': { '성별구분코드': {'1': '남성', '2': '여성'} , CM': {'성별구분코드': {'M': '남성', 'F': '여성'}}} in_meaning = self.code_dict[self.center_id][cm_name][in_data] return next(( k for k, v in self.code_dict['CM'][cm_name].items() if v == in_meaning), None) def codemean_to_codekey(in_data, code_name): if in_data in self.code_dict[self.center_id][code_name] : return in_data else: return next((k for k, v in self.code_dict[self.center_id][code_name].items() if v == in_data), None) #행정동코드 자리수 맞추기 = 뒤에서 부터 0 채우기 def op_zfill(in_data, length): if in_data == '99999999': return in_data + '9'*( length - len(in_data)) else: return in_data + '0'*( length - len(in_data)) def zfill(in_data, length): return in_data.zfill(length) def strstrip(in_data): return in_data.strip() def division(in_data, length): return float(in_data)/length def image_to_alphabet(in_data): if in_data == '○' : return 'N' elif in_data == '●' : return 'Y' elif in_data == '-' : return 'X' elif in_data == '.' : return 'E' else: return in_data def letter_change(in_data, obj, obj_true): if obj == '' : obj = 'None' if in_data == obj: return obj_true else: return in_data #리눅스timestamp to date def timestamp_to_dt(in_data): return datetime.datetime.fromtimestamp(int(in_data)).strftime('%Y%m%d%H%M%S') #a = datetime.datetime.fromtimestamp(int(in_data)).strftime('%Y%m%d%H%M%S') #if not a.startswith('202006'): __LOG__.Trace("!!!ERROR!!!!!: %s %s" % (in_data, a)) #return a def tran_to_decimal(in_data): try: if type(eval(in_data)).__name__ != 'float': return in_data + '.0' else: return in_data except: return None def float_round(in_data): if type(eval(in_data)).__name__ == 'float': return str(round(eval(in_data))) else : return in_data def time_to_time(in_format, out_format, in_data): return datetime.datetime.strptime(in_data, in_format).strftime(out_format) try: return eval(change_rule) except: #__LOG__.Exception() return None def processing(self, in_file) : __LOG__.Trace( ' === processing ===' ) self.SendLogInit() __LOG__.Watch( in_file ) ### input 과 output file 명, etc 에 대하여 out_data_dict = {} #/home/ds_center/DATA/DATA_COMP/KCB_T003_DEMO_RES3_202008.CSV/LineSplit/T003_DEMO_RES3:KCB_T003_DEMO_RES3_202008.CSV_19_20_1945746 dataset_code, rest_file_name = os.path.basename(in_file).split(':') self.dataset_cd = dataset_code = dataset_code.upper() try: save_base_name = '-'.join( self.table_dict[dataset_code] ) except KeyError: #__LOG__.Exception() __LOG__.Trace("NOT IN TABLE_DICT !!! [%s] : %s" % (dataset_code, in_file)) self.result_flag = 'FA' self.fail_type = '미등록' return #rest_file_name.rsplit('_',3)[-3] = split file index #rest_file_name.rsplit('_',3)[-2] = split file 총 갯수 #rest_file_name.rsplit('_',3)[-1] = 원본 총 row수 #- csv 파일 이 LineSplit 을 탔니 안탔니에 따라 달라지는 디렉토리 depth 를 output 통일로 맞추기 if not rest_file_name.rsplit('_',3)[-1].isdigit() or not rest_file_name.rsplit('_',3)[-2].isdigit() or not rest_file_name.rsplit('_',3)[-3].isdigit(): #/home/ds_center/DATA/DATA_COMP/KCB_T003_DEMO_RES3_202008.CSV/T003_DEMO_RES3:KCB_T003_DEMO_RES3_202008.CSV save_path = os.path.join(os.path.dirname(in_file), 'Standardization') rest_name = '.csv' split_flag = False else: #/home/ds_center/DATA/DATA_COMP/KCB_T003_DEMO_RES3_202008.CSV/LineSplit/T003_DEMO_RES3:KCB_T003_DEMO_RES3_202008.CSV_19_20_1945746 save_path = os.path.join(os.path.abspath(os.path.join(os.path.dirname(in_file), os.pardir)), 'Standardization') rest_name = '.csv_%s_%s_%s' % (rest_file_name.rsplit('_',3)[-3], rest_file_name.rsplit('_',2)[-2], rest_file_name.rsplit('_',2)[-1]) split_flag = True makedirs(save_path) ### input 컬럼 명세 header_list = self.table_header_dict[dataset_code] __LOG__.Watch( [dataset_code , len(header_list), header_list ]) ### output 컬럼 명세, 삭제 컬럼 대상 확인 new_header_list = copy.deepcopy( self.new_table_header_dict[dataset_code] ) #__LOG__.Watch( [ dataset_code, len(new_header_list), new_header_list ]) column_remove_idx = [] column_rename_list = [] for i, func in enumerate(self.column_func_dict[dataset_code]): if func == 'column_remove()': column_remove_idx.append(i) elif func.startswith('column_rename'): column_rename_list.append([i, func]) __LOG__.Watch(column_remove_idx) __LOG__.Watch(column_rename_list) def column_rename(new_name): return new_name for idx, func in column_rename_list : new_header_list[idx] = eval(func) for i in column_remove_idx: del new_header_list[i] __LOG__.Watch( [ dataset_code, len(new_header_list), new_header_list ]) ### csv 파일 열어서 데이터 확인 시작 progress = FileProgress.Progress( in_file=in_file ) with open( in_file, newline='', encoding='utf-8') as csvread: #for row in csv.reader(csvread, delimiter=' ', quotechar='|') total_row_cnt = 0 error_column_length = 0 error_check_type_legth = 0 error_check_notnull = 0 error_check_format = 0 error_change_cont = 0 comp_row_cnt = 0 header_cnt = 0 ### 파일의 로우별로 읽어서 for index, row in enumerate(csv.reader(csvread, delimiter=self.csv_read_sep),1): ## 컬럼 갯수 비교 if len(header_list) != len(row): out_data_dict.setdefault('error_column_length', []).append('|^|'.join(row)) error_column_length += 1 if error_column_length < self.leave_error_log : #__LOG__.Watch(['!!!ERROR : COLUMN CNT!!!', index, len(header_list), len(row)]) self.erSendLog(index, '컬럼갯수', '등록갯수[%s개] 들어온갯수[%s개] 데이터[%s]' % (len(header_list), len(row), row)) continue #헤더가 들어가있을 경우 무시 row_compare = [x.upper() for x in row] if header_list == row_compare : header_cnt += 1 continue #if self.debug_mode: __LOG__.Watch(row) ### 로우의 데이터별 확인 row_comp_flag = True break_flag = False for i, in_data in enumerate(row): rules = self.colum_dict[dataset_code][header_list[i]] ########## 기능 추가 필요 rules[6] XCPTN_VL(;구분자) ## 예외데이터count 추가, 예외일 경우 아래의 rule 무시 if rules[3] != '' : #DATA_CONT_FUNC 변경 #if self.debug_mode: __LOG__.Watch([ in_data, row[i] ]) for rule in [ x.strip() for x in rules[3].split('->') ]: if in_data != '': c_value = self.change_data_cont( in_data, rule) if c_value == None: out_data_dict.setdefault('error_change_cont', []).append('|^|'.join(row)) error_change_cont += 1 row_comp_flag = False if error_change_cont < self.leave_error_log : #__LOG__.Watch(['!!!ERROR : DATA_CONT_FUNC!!!', header_list[i], i, in_data, rule ]) self.erSendLog(index, '데이터변형', '변형룰[%s] 컬럼명[%s] 데이터[%s]' % (rule, header_list[i], in_data) ) break_flag = True break else: in_data = row[i] = c_value if break_flag: break #if self.debug_mode: __LOG__.Watch([in_data, row[i], c_value]) #NOTNULL 여부에 체크 if not self.check_notnull( in_data, rules[1] ): out_data_dict.setdefault('error_check_notnull', []).append('|^|'.join(row)) error_check_notnull += 1 row_comp_flag = False if error_check_notnull < self.leave_error_log : #__LOG__.Watch(['!!!ERROR : NOT_NULL!!!', header_list[i], i, in_data, rules[1]]) self.erSendLog(index, '빈값여부', 'NOTNULL여부[%s] 컬럼명[%s] 데이터[%s]' % (rules[1], header_list[i], in_data) ) break #타입과 글자수 체크 if not self.check_type_legth( in_data, rules[0] ) and in_data != '': out_data_dict.setdefault('error_check_type_legth', []).append('|^|'.join(row)) error_check_type_legth += 1 row_comp_flag = False if error_check_type_legth < self.leave_error_log : #__LOG__.Watch(['!!!ERROR : TYPE AND LENGTH!!!', header_list[i], i, in_data, rules[0]]) self.erSendLog(index, '데이터타입', '타입|길이[%s] 컬럼명[%s] 데이터[%s]' % (rules[0], header_list[i], in_data) ) break if rules[2] != '' and in_data != '': #FORMAT 체크 if not self.check_format( in_data, rules[2] ): out_data_dict.setdefault('error_check_format', []).append('|^|'.join(row)) error_check_format += 1 row_comp_flag = False # if error_check_format < self.leave_error_log : #__LOG__.Watch(['!!!ERROR : FORMAT!!!', header_list[i], i, in_data, rules[2]]) self.erSendLog(index, '데이터형식', '형식[%s] 컬럼명[%s] 데이터[%s]' % (rules[2], header_list[i], in_data) ) break ########## 기능 추가 필요 rules[4] MINM_VL rules[5] MAX_VL ##데이터범위 #row의 모든 데이터가 OK 통과하면 if row_comp_flag: for x,y in zip(self.date_column_name_list, self.date_column_length_list): if x.upper() in header_list : #date_key = row[header_list.index(self.date_column_name)][:self.date_column_length] date_key = row[header_list.index(x.upper())][:int(y)] elif x.upper() == 'SDTMAKEDATE': date_key = datetime.datetime.now().strftime('%Y%m%d%H%M%S')[:int(y)] else: continue out_data_dict.setdefault(date_key, []).append('|^|'.join(row)) comp_row_cnt += 1 total_row_cnt += 1 progress.getStatus(index) ####### test 용 ####### #if total_row_cnt == 5: # break # __LOG__.Watch(out_data_dict) # __LOG__.Watch([total_row_cnt, error_check_type_legth, error_check_notnull, error_check_format, error_change_cont, comp_row_cnt]) # os._exit(1) __LOG__.Trace('< total_row_cnt = %s, header_cnt = %s >:[comp_row_cnt = %s, error_column_length = %s, error_check_notnull = %s, error_check_type_legth = %s, error_check_format = %s, error_change_cont = %s]' % \ (total_row_cnt, header_cnt, comp_row_cnt, error_column_length, error_check_notnull, error_check_type_legth, error_check_format, error_change_cont)) if total_row_cnt == 0 : #__LOG__.Trace('!!!!!!!!!!!!!! 0 row DATA !!!!!!!!!!!!!!!!!') self.result_flag = 'FA' self.fail_type = '빈데이터' return self.result_flag = 'SC' self.success_cnt = total_row_cnt self.fail_reason = rest_file_name.rsplit('_',3)[0] self.header_cnt = header_cnt self.comp_row_cnt = comp_row_cnt self.error_column_length = error_column_length self.error_check_notnull = error_check_notnull self.error_check_type_legth = error_check_type_legth self.error_check_format = error_check_format self.error_change_cont = error_change_cont period_list = [int(i) for i in out_data_dict.keys() if not i.startswith('error_')] if len(period_list) == 0 : self.min_period = '' self.max_period = '' else: self.min_period = min(period_list) self.max_period = max(period_list) ##csv file 쓰기 (삭제할 컬럼은 쓸때 정리) if split_flag : rest_name += '_%s' % header_cnt for keys in out_data_dict: save_file_name = os.path.join(save_path, '%s-%s%s' % (save_base_name, keys, rest_name)) ctl_file_name = os.path.join(save_path, '%s-%s.ctl' % (save_base_name, keys)) with open(save_file_name, mode='w') as csvwrite: #error if keys.startswith('error'): prefix = 'error' for line in out_data_dict[keys]: csv_writer = csv.writer(csvwrite, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL) line_list = line.split('|^|') csv_writer.writerow(line_list) with open(ctl_file_name, 'w') as erf: erf_writer = csv.writer(erf, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL) erf_writer.writerow(header_list) #표준화 완료 else: prefix = 'csv' for line in out_data_dict[keys]: csv_writer = csv.writer(csvwrite, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL) line_list = line.split('|^|') for i in column_remove_idx: del line_list[i] csv_writer.writerow(line_list) with open(ctl_file_name, 'w') as cpf: cpf_writer = csv.writer(cpf, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL) cpf_writer.writerow(new_header_list) stdout( '%s://%s' % ('ctl', ctl_file_name) ) stdout( '%s://%s' % (prefix, save_file_name) ) def psSendLog(self): if self.result_flag == 'SC' : self.product_cd = self.dataset_dict[self.dataset_cd][0] self.product_nm = self.dataset_dict[self.dataset_cd][1] elif self.result_flag == 'FA' : pass # self.product_cd = '' # self.product_nm = '' # self.success_cnt = '' # self.header_cnt = '' # self.comp_row_cnt = '' # self.error_column_length = '' # self.error_check_notnull = '' # self.error_check_type_legth = '' # self.error_check_format = '' # self.error_change_cont = '' # self.min_period = '' # self.max_period = '' # self.ps_duration = '' else: __LOG__.Trace("self.result_flag NOT EXIST !!!!!") sendLogData = '|^|'.join(map(str, [ self.center_name , self.center_id , self.product_cd , self.product_nm , self.dataset_cd , self.min_period , self.max_period , self.start_time , self.ps_duration , self.std_in , self.in_file_size , self.in_file_row_cnt , self.result_flag , self.success_cnt , self.fail_type , self.fail_reason , self.header_cnt , self.comp_row_cnt , self.error_column_length , self.error_check_notnull , self.error_check_type_legth , self.error_check_format , self.error_change_cont ])) __LOG__.Trace("PS_LOG://%s" % sendLogData) SendLog.irisLogClient().log("PS_LOG://%s\n" % sendLogData) def erSendLog(self, error_index, error_type, error_reason): sendLogData = '|^|'.join(map(str, [ self.center_name , self.center_id , self.dataset_dict[self.dataset_cd][0] #self.product_cd , self.dataset_dict[self.dataset_cd][1] #self.product_nm , self.dataset_cd , self.start_time , self.std_in , error_index , error_type , error_reason ])) __LOG__.Trace("ER_LOG://%s" % sendLogData) SendLog.irisLogClient().log("ER_LOG://%s\n" % sendLogData) def run(self): while not SHUTDOWN : std_in = None is_std_err = False try: #csv:///home/ds_center/DATA/DATA_COMP/KCB_T003_DEMO_RES3_202008.CSV/LineSplit/T003_DEMO_RES3:KCB_T003_DEMO_RES3_202008.CSV_19_20_1945746 std_in = sys.stdin.readline().strip() self.std_in = std_in if not std_in : is_std_err = True continue __LOG__.Trace('STD IN : %s' % std_in ) try : prefix, in_file = std_in.split('://', 1) except : is_std_err = True __LOG__.Trace( 'Input format error : %s' % std_in ) continue if prefix != 'csv' : is_std_err = True __LOG__.Trace('Prefix is not match : %s' % prefix) continue if not os.path.exists( in_file ) : is_std_err = True __LOG__.Trace('Not found file : %s' % in_file) continue stime = time.time() self.start_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S') self.in_file_size = os.path.getsize(in_file) self.in_file_row_cnt = subprocess.getstatusoutput('/usr/bin/wc -l %s' % in_file)[-1].split()[0] self.get_colum_dict() self.get_code_dict() self.processing(in_file) etime = time.time() self.ps_duration = etime - stime __LOG__.Trace( 'Duration %s sec' % self.ps_duration ) self.psSendLog() is_std_err = True except: if not SHUTDOWN : __LOG__.Exception() finally : if std_in != None and is_std_err : stderr( std_in ) time.sleep(1) #- main function ---------------------------------------------------- def main(): module = os.path.basename(sys.argv[0]) if len(sys.argv) < 2: sys.stderr.write('Usage : %s conf {option:[[log_arg]-d]}\n' % module ) sys.stderr.write('Usage : %s conf {option:[[log_arg]-d]}\n' % module ) #python3 /home/test/Project_name/bin/py3/BaseModule.py /home/test/Project_name/conf/BaseModule.conf #python3 /home/test/Project_name/bin/py3/BaseModule.py /home/test/Project_name/conf/BaseModule.conf 0 #python3 /home/test/Project_name/bin/py3/BaseModule.py /home/test/Project_name/conf/BaseModule.conf -d sys.stderr.flush() os._exit(1) config_file = sys.argv[1] conf = ConfigParser.ConfigParser() conf.read(config_file) debug_mode = False log_arg = '' if '-d' not in sys.argv : etc_argv = sys.argv[2:] if len(sys.argv[2:]) > 0 : log_arg = '_' + sys.argv[2] log_path = conf.get('GENERAL', 'LOG_PATH') makedirs( log_path ) log_file = os.path.join(log_path, '%s%s.log' % (os.path.splitext(module)[0], log_arg )) Log.Init(Log.CRotatingLog(log_file, 10240000, 9)) else: Log.Init() debug_mode = True pid = os.getpid() __LOG__.Trace('============= %s START [pid:%s]==================' % ( module, pid )) Standardization(conf, debug_mode, log_arg).run() __LOG__.Trace('============= %s END [pid:%s]====================' % (module, pid )) #- if name start ---------------------------------------------- if __name__ == "__main__" : main()
32.777293
211
0.647349
acefc4c7af50da8fa625dd6111397ae621addce4
38
py
Python
tests/__init__.py
pathwayforte/PathwayForte
07775f3e174bfc756f7cf8f03efe49ef95a1cfd9
[ "Apache-2.0" ]
10
2019-03-31T14:53:05.000Z
2021-01-16T07:33:41.000Z
tests/__init__.py
pathwayforte/PathwayForte
07775f3e174bfc756f7cf8f03efe49ef95a1cfd9
[ "Apache-2.0" ]
16
2019-03-31T07:25:43.000Z
2019-08-21T09:47:26.000Z
tests/__init__.py
pathwayforte/PathwayForte
07775f3e174bfc756f7cf8f03efe49ef95a1cfd9
[ "Apache-2.0" ]
3
2020-04-23T13:55:29.000Z
2020-08-28T16:10:27.000Z
# -*- coding: utf-8 -*- """Tests."""
9.5
23
0.394737
acefc51c60f7aa666b408116e154def65f104b6b
519
py
Python
epi_judge_python/delete_kth_last_from_list.py
shobhitmishra/CodingProblems
0fc8c5037eef95b3ec9826b3a6e48885fc86659e
[ "MIT" ]
null
null
null
epi_judge_python/delete_kth_last_from_list.py
shobhitmishra/CodingProblems
0fc8c5037eef95b3ec9826b3a6e48885fc86659e
[ "MIT" ]
null
null
null
epi_judge_python/delete_kth_last_from_list.py
shobhitmishra/CodingProblems
0fc8c5037eef95b3ec9826b3a6e48885fc86659e
[ "MIT" ]
null
null
null
from typing import Optional from list_node import ListNode from test_framework import generic_test # Assumes L has at least k nodes, deletes the k-th last node in L. def remove_kth_last(L: ListNode, k: int) -> Optional[ListNode]: # TODO - you fill in here. return None if __name__ == '__main__': exit( generic_test.generic_test_main('delete_kth_last_from_list.py', 'delete_kth_last_from_list.tsv', remove_kth_last))
28.833333
71
0.635838
acefc51e6beea8aa98a90f1683a4833b9b1f5005
1,353
py
Python
argus/migrations/0005_switch_to_numerator_denominator.py
littleweaver/django-argus
3d60d788d14e3b19c404a5dfa372e7bbc73ac916
[ "BSD-3-Clause" ]
null
null
null
argus/migrations/0005_switch_to_numerator_denominator.py
littleweaver/django-argus
3d60d788d14e3b19c404a5dfa372e7bbc73ac916
[ "BSD-3-Clause" ]
1
2018-02-07T22:17:31.000Z
2018-02-08T18:32:12.000Z
argus/migrations/0005_switch_to_numerator_denominator.py
littleweaver/django-argus
3d60d788d14e3b19c404a5dfa372e7bbc73ac916
[ "BSD-3-Clause" ]
null
null
null
# encoding: utf8 from django.db import models, migrations def percent_to_numdenom(apps, schema_editor): Share = apps.get_model("argus", "Share") for share in Share.objects.all(): share.denominator = 10000 share.numerator = share.portion * 10000 share.fraction_is_manual = share.portion_is_manual share.save() class Migration(migrations.Migration): dependencies = [ ('argus', '0004_auto_20140301_0358'), ] operations = [ migrations.AddField( model_name='share', name='denominator', field=models.PositiveIntegerField(default=1), preserve_default=False, ), migrations.AddField( model_name='share', name='numerator', field=models.PositiveIntegerField(default=1), preserve_default=False, ), migrations.AddField( model_name='share', name='fraction_is_manual', field=models.BooleanField(default=False), preserve_default=True, ), migrations.RunPython(percent_to_numdenom), migrations.RemoveField( model_name='share', name='portion_is_manual', ), migrations.RemoveField( model_name='share', name='portion', ), ]
27.612245
58
0.586105
acefc5a3b26660718b74b40b808e009c89876f0f
8,489
py
Python
homeassistant/components/bayesian/binary_sensor.py
petewill/home-assistant
5859dba4344f05fb8774aa1207e47ac28f627a67
[ "Apache-2.0" ]
3
2020-01-21T18:09:09.000Z
2022-01-17T08:06:03.000Z
homeassistant/components/bayesian/binary_sensor.py
petewill/home-assistant
5859dba4344f05fb8774aa1207e47ac28f627a67
[ "Apache-2.0" ]
39
2016-12-16T12:40:34.000Z
2017-02-13T17:53:42.000Z
homeassistant/components/bayesian/binary_sensor.py
petewill/home-assistant
5859dba4344f05fb8774aa1207e47ac28f627a67
[ "Apache-2.0" ]
3
2020-01-11T15:44:13.000Z
2022-01-17T08:06:09.000Z
"""Use Bayesian Inference to trigger a binary sensor.""" from collections import OrderedDict import voluptuous as vol from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorDevice from homeassistant.const import ( CONF_ABOVE, CONF_BELOW, CONF_DEVICE_CLASS, CONF_ENTITY_ID, CONF_NAME, CONF_PLATFORM, CONF_STATE, CONF_VALUE_TEMPLATE, STATE_UNKNOWN, ) from homeassistant.core import callback from homeassistant.helpers import condition import homeassistant.helpers.config_validation as cv from homeassistant.helpers.event import async_track_state_change ATTR_OBSERVATIONS = "observations" ATTR_PROBABILITY = "probability" ATTR_PROBABILITY_THRESHOLD = "probability_threshold" CONF_OBSERVATIONS = "observations" CONF_PRIOR = "prior" CONF_TEMPLATE = "template" CONF_PROBABILITY_THRESHOLD = "probability_threshold" CONF_P_GIVEN_F = "prob_given_false" CONF_P_GIVEN_T = "prob_given_true" CONF_TO_STATE = "to_state" DEFAULT_NAME = "Bayesian Binary Sensor" DEFAULT_PROBABILITY_THRESHOLD = 0.5 NUMERIC_STATE_SCHEMA = vol.Schema( { CONF_PLATFORM: "numeric_state", vol.Required(CONF_ENTITY_ID): cv.entity_id, vol.Optional(CONF_ABOVE): vol.Coerce(float), vol.Optional(CONF_BELOW): vol.Coerce(float), vol.Required(CONF_P_GIVEN_T): vol.Coerce(float), vol.Optional(CONF_P_GIVEN_F): vol.Coerce(float), }, required=True, ) STATE_SCHEMA = vol.Schema( { CONF_PLATFORM: CONF_STATE, vol.Required(CONF_ENTITY_ID): cv.entity_id, vol.Required(CONF_TO_STATE): cv.string, vol.Required(CONF_P_GIVEN_T): vol.Coerce(float), vol.Optional(CONF_P_GIVEN_F): vol.Coerce(float), }, required=True, ) TEMPLATE_SCHEMA = vol.Schema( { CONF_PLATFORM: CONF_TEMPLATE, vol.Required(CONF_VALUE_TEMPLATE): cv.template, vol.Required(CONF_P_GIVEN_T): vol.Coerce(float), vol.Optional(CONF_P_GIVEN_F): vol.Coerce(float), }, required=True, ) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_DEVICE_CLASS): cv.string, vol.Required(CONF_OBSERVATIONS): vol.Schema( vol.All( cv.ensure_list, [vol.Any(NUMERIC_STATE_SCHEMA, STATE_SCHEMA, TEMPLATE_SCHEMA)], ) ), vol.Required(CONF_PRIOR): vol.Coerce(float), vol.Optional( CONF_PROBABILITY_THRESHOLD, default=DEFAULT_PROBABILITY_THRESHOLD ): vol.Coerce(float), } ) def update_probability(prior, prob_true, prob_false): """Update probability using Bayes' rule.""" numerator = prob_true * prior denominator = numerator + prob_false * (1 - prior) probability = numerator / denominator return probability async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the Bayesian Binary sensor.""" name = config.get(CONF_NAME) observations = config.get(CONF_OBSERVATIONS) prior = config.get(CONF_PRIOR) probability_threshold = config.get(CONF_PROBABILITY_THRESHOLD) device_class = config.get(CONF_DEVICE_CLASS) async_add_entities( [ BayesianBinarySensor( name, prior, observations, probability_threshold, device_class ) ], True, ) class BayesianBinarySensor(BinarySensorDevice): """Representation of a Bayesian sensor.""" def __init__(self, name, prior, observations, probability_threshold, device_class): """Initialize the Bayesian sensor.""" self._name = name self._observations = observations self._probability_threshold = probability_threshold self._device_class = device_class self._deviation = False self.prior = prior self.probability = prior self.current_obs = OrderedDict({}) to_observe = set() for obs in self._observations: if "entity_id" in obs: to_observe.update(set([obs.get("entity_id")])) if "value_template" in obs: to_observe.update(set(obs.get(CONF_VALUE_TEMPLATE).extract_entities())) self.entity_obs = dict.fromkeys(to_observe, []) for ind, obs in enumerate(self._observations): obs["id"] = ind if "entity_id" in obs: self.entity_obs[obs["entity_id"]].append(obs) if "value_template" in obs: for ent in obs.get(CONF_VALUE_TEMPLATE).extract_entities(): self.entity_obs[ent].append(obs) self.watchers = { "numeric_state": self._process_numeric_state, "state": self._process_state, "template": self._process_template, } async def async_added_to_hass(self): """Call when entity about to be added.""" @callback def async_threshold_sensor_state_listener(entity, old_state, new_state): """Handle sensor state changes.""" if new_state.state == STATE_UNKNOWN: return entity_obs_list = self.entity_obs[entity] for entity_obs in entity_obs_list: platform = entity_obs["platform"] self.watchers[platform](entity_obs) prior = self.prior for obs in self.current_obs.values(): prior = update_probability(prior, obs["prob_true"], obs["prob_false"]) self.probability = prior self.hass.async_add_job(self.async_update_ha_state, True) async_track_state_change( self.hass, self.entity_obs, async_threshold_sensor_state_listener ) def _update_current_obs(self, entity_observation, should_trigger): """Update current observation.""" obs_id = entity_observation["id"] if should_trigger: prob_true = entity_observation["prob_given_true"] prob_false = entity_observation.get("prob_given_false", 1 - prob_true) self.current_obs[obs_id] = { "prob_true": prob_true, "prob_false": prob_false, } else: self.current_obs.pop(obs_id, None) def _process_numeric_state(self, entity_observation): """Add entity to current_obs if numeric state conditions are met.""" entity = entity_observation["entity_id"] should_trigger = condition.async_numeric_state( self.hass, entity, entity_observation.get("below"), entity_observation.get("above"), None, entity_observation, ) self._update_current_obs(entity_observation, should_trigger) def _process_state(self, entity_observation): """Add entity to current observations if state conditions are met.""" entity = entity_observation["entity_id"] should_trigger = condition.state( self.hass, entity, entity_observation.get("to_state") ) self._update_current_obs(entity_observation, should_trigger) def _process_template(self, entity_observation): """Add entity to current_obs if template is true.""" template = entity_observation.get(CONF_VALUE_TEMPLATE) template.hass = self.hass should_trigger = condition.async_template( self.hass, template, entity_observation ) self._update_current_obs(entity_observation, should_trigger) @property def name(self): """Return the name of the sensor.""" return self._name @property def is_on(self): """Return true if sensor is on.""" return self._deviation @property def should_poll(self): """No polling needed.""" return False @property def device_class(self): """Return the sensor class of the sensor.""" return self._device_class @property def device_state_attributes(self): """Return the state attributes of the sensor.""" return { ATTR_OBSERVATIONS: [val for val in self.current_obs.values()], ATTR_PROBABILITY: round(self.probability, 2), ATTR_PROBABILITY_THRESHOLD: self._probability_threshold, } async def async_update(self): """Get the latest data and update the states.""" self._deviation = bool(self.probability >= self._probability_threshold)
32.524904
87
0.65579
acefc613a013330ace1bca6e59c3e8959a4cb6fe
802
py
Python
src/main.py
ithihasmadala/gene-to-signal
96efc1253604c0b5b46a0643e10bd1f0c9a7801c
[ "MIT" ]
null
null
null
src/main.py
ithihasmadala/gene-to-signal
96efc1253604c0b5b46a0643e10bd1f0c9a7801c
[ "MIT" ]
null
null
null
src/main.py
ithihasmadala/gene-to-signal
96efc1253604c0b5b46a0643e10bd1f0c9a7801c
[ "MIT" ]
null
null
null
from utils.dna2signal import toNumeric from utils.signal2spec import toSpectrogram import numpy as np import matplotlib.pyplot as plt from librosa import stft from PIL import Image seq = 'GTTAATGTAGCTTACATAAAGTGTGGCACTGAAAATGCTAAGACAGATTTTAAAATATCTCATAAACACACAGGTTTGGTCCTGACCTTGCTATTAATTTTTACTACGCTTACACATGCAAGTATCTGCATACCCGTGAAAATGCCCTTTACTACCCGTAAGTAGAACAGGAGCAGATATCAGGCACTTATAATGCCAAAGACATCTTGTTTAACCACACCCCTAAGGGAGCTCAGCAGTGATAAACATTGAATATAAGCGACACAAGCTTGAATCAGCGATAGTTAACAGAGTCGGTAAATCTCGTGCCAGCCACCGCGGTTATACGAGAGACTCAAATTAATATAATCGGCCCAAAGAGTGGTTAGGAGCGTAAATCAAATAGGGTTAAAAACTAACCCCGCTGTCGTACGCAGAGGTTAAAAAAAGCACAACACCGAAAGTAACCCTATAAAAACACCACTGAACCCACGACAGCTAGGACACAAACTGGGATTAGATACCCCACTATGCCTAGCCATAA' X = toNumeric(seq) Spec = toSpectrogram(X.Tetrahedron()) print(Spec.inNumpy()[4][30])
57.285714
531
0.92394
acefc83ba074084c5a4398a61a327469f9397b8c
794
py
Python
src/data/caching.py
silasbrack/special-course
47dc396f97b2027d366e90add115d4ed2bc0f1de
[ "MIT" ]
null
null
null
src/data/caching.py
silasbrack/special-course
47dc396f97b2027d366e90add115d4ed2bc0f1de
[ "MIT" ]
null
null
null
src/data/caching.py
silasbrack/special-course
47dc396f97b2027d366e90add115d4ed2bc0f1de
[ "MIT" ]
null
null
null
import logging import torch from tqdm import tqdm def cache_dataset(dataset, verbose=False): logging.info("Caching dataset.") dataset.set_use_cache(False) if verbose: for _ in tqdm(dataset): pass else: for _ in iter(dataset): pass dataset.set_use_cache(True) def save_cached(dataset, path): torch.save( { "cached_data": dataset.cached_data, "cached_indices": dataset.cached_indices, "n_cached": dataset.n_cached, }, path, ) def load_cached(dataset, path): cache = torch.load(path) dataset.cached_data = cache["cached_data"] dataset.cached_indices = cache["cached_indices"] dataset.n_cached = cache["n_cached"] dataset.set_use_cache(True)
21.459459
53
0.629723
acefc89205af08d3433e3aadb07606dd8e5dd68b
4,906
py
Python
{{cookiecutter.project_name}}/packages/{{cookiecutter.package_name}}/{{cookiecutter.package_name}}/processing/preprocessors.py
Cawiess/data_project_template
b1b141473a0dd3bd9152e3bf6d24d1ff8cdfb92c
[ "MIT" ]
null
null
null
{{cookiecutter.project_name}}/packages/{{cookiecutter.package_name}}/{{cookiecutter.package_name}}/processing/preprocessors.py
Cawiess/data_project_template
b1b141473a0dd3bd9152e3bf6d24d1ff8cdfb92c
[ "MIT" ]
null
null
null
{{cookiecutter.project_name}}/packages/{{cookiecutter.package_name}}/{{cookiecutter.package_name}}/processing/preprocessors.py
Cawiess/data_project_template
b1b141473a0dd3bd9152e3bf6d24d1ff8cdfb92c
[ "MIT" ]
null
null
null
import numpy as np import pandas as pd from sklearn.base import BaseEstimator, TransformerMixin from typing import List ''' Rationale for BaseEstimator and TransformerMixin: Each project is unique in its details, similar to others in terms of overarching functionality. Sklearn offers well established functionality which is highly compatible with other services. Useful core functionalities of Sklearn can be implemented project specific classes through inheritance of methods from the BaseEstimator and TransformerMixin classes. By inheriting the methods of the BaseEstimator and TransformerMixin, the preprocessors defined here can be directly integrated into a Sklearn pipeline. The user can find a template for a Preprocessing unit which is ready to be extended with purpose-specific code, as well as a simple usecase below. ''' ######################################### PREPROCESSING UNIT TEMPLATE ############################ class PreprocessorUnitTemplate(BaseEstimator, TransformerMixin): def __init__(self, variables: List[str]) -> None: ''' Specify variable input as list of strings representing column names of a pd.DataFrame. ''' # YOUR CODE HERE return None def fit(self, X: pd.DataFrame, y: pd.Series = None): ''' Required method for Sklearn TransformerMixin class. Remains inactive and performs no action for now. Leave as is. ''' return self def transform(self, X: pd.DataFrame, y: pd.Series = None): ''' Creates a copy of input dataframe, which is passed to method via the sklearn Pipeline. This method performs changes to the dataframe with reference to specified variables. The modified copy of the original dataframe is then returned and passed to the next step in pipeline. Define your specific transformation here. ''' X = X.copy() # YOUR CODE HERE return X ####################################################################################################### class ColumnLabelNormalizer(BaseEstimator, TransformerMixin): def __init__(self): ''' Input: None, iterates over all dataframe columns. Normalizes column labels: - remove whitespace - spaces to _ - full lowercase - remove clutter (ex. ;,.) ''' def fit(self, X: pd.DataFrame, y: pd.Series = None): return self def transform(self, X: pd.DataFrame): X = X.copy() X.columns = [var.lower() for var in X.columns] X.columns = [var.replace(';', '') for var in X.columns] X.columns = [var.replace(' ', '_') for var in X.columns] return X class ColumnValueNormalizer(BaseEstimator, TransformerMixin): def __init__(self): ''' Input: None, iterates over all dataframe columns. Normalizes column labels: - remove whitespace - spaces to _ - full lowercase - remove clutter (ex. ;,.) - Check if all remaining column values are numeric, if True; type --> float ''' def fit(self, X: pd.DataFrame, y: pd.Series = None): return self def transform(self, X: pd.DataFrame): X = X.copy() special_characters = ';:' #TODO: move this to config file for variable in X.columns: if X[variable].dtype=='O': if X[variable].str.contains('|'.join(special_characters)).any(): X[variable] = X[variable].str.strip(special_characters)#.astype(bool).any() if X[variable].str.isnumeric().all() == True: X[variable] = X[variable].astype(float) return X class ExtractSubsetVariables(BaseEstimator, TransformerMixin): def __init__(self, variables: List[str]): ''' Extracts selected variables only. ''' if not isinstance(variables, list): raise ValueError("variables must be given as elements of list.") self.variables = variables def fit(self, X: pd.DataFrame, y: pd.Series = None): return self def transform(self, X: pd.DataFrame): X = X.copy() X = X[self.variables] return X class ExtractSubsetVariables(BaseEstimator, TransformerMixin): def __init__(self, variables: List[str]): ''' Extracts selected variables only. ''' if not isinstance(variables, list): raise ValueError("variables must be given as elements of list.") self.variables = variables def fit(self, X: pd.DataFrame, y: pd.Series = None): return self def transform(self, X: pd.DataFrame): X = X.copy() X = X[self.variables] return X
33.37415
109
0.601916
acefc8d99a53efc2f6a8a8b4680967555d26fd31
7,935
py
Python
dynamic_scraper/spiders/django_checker.py
jsdelivrbot/xscraper
5f097b4e21a5cc57eea2b30ede6983caf1391742
[ "BSD-3-Clause" ]
null
null
null
dynamic_scraper/spiders/django_checker.py
jsdelivrbot/xscraper
5f097b4e21a5cc57eea2b30ede6983caf1391742
[ "BSD-3-Clause" ]
null
null
null
dynamic_scraper/spiders/django_checker.py
jsdelivrbot/xscraper
5f097b4e21a5cc57eea2b30ede6983caf1391742
[ "BSD-3-Clause" ]
null
null
null
#Stage 2 Update (Python 3) from __future__ import unicode_literals from builtins import str import datetime, json, logging, os from jsonpath_rw import jsonpath, parse from jsonpath_rw.lexer import JsonPathLexerError from scrapy import signals from scrapy.exceptions import CloseSpider from scrapy.http import Request from pydispatch import dispatcher from dynamic_scraper.spiders.django_base_spider import DjangoBaseSpider from dynamic_scraper.models import ScraperElem from dynamic_scraper.utils.scheduler import Scheduler class DjangoChecker(DjangoBaseSpider): name = "django_checker" mandatory_vars = ['ref_object', 'scraper',] def __init__(self, *args, **kwargs): super(DjangoChecker, self).__init__(self, *args, **kwargs) self._set_config(**kwargs) self._check_checker_config() self._set_request_kwargs() self._set_meta_splash_args() self.scheduler = Scheduler(self.scraper.scraped_obj_class.checker_scheduler_conf) dispatcher.connect(self.response_received, signal=signals.response_received) msg = "Checker for " + self.ref_object.__class__.__name__ + " \"" + str(self.ref_object) + "\" (" + str(self.ref_object.pk) + ") initialized." self.log(msg, logging.INFO) def _set_config(self, **kwargs): log_msg = "" #output_response_body if 'output_response_body' in kwargs and kwargs['output_response_body'] == 'yes': self.conf['OUTPUT_RESPONSE_BODY'] = True if len(log_msg) > 0: log_msg += ", " log_msg += "output_response_body " + str(self.conf['OUTPUT_RESPONSE_BODY']) else: self.conf['OUTPUT_RESPONSE_BODY'] = False super(DjangoChecker, self)._set_config(log_msg, **kwargs) def _check_checker_config(self): if self.scraper.checker_set.count() == 0: msg = 'No checkers defined for scraper!' logging.warning(msg) raise CloseSpider(msg) def _del_ref_object(self): if self.action_successful: self.log("Item already deleted, skipping.", logging.INFO) return from scrapy.utils.project import get_project_settings settings = get_project_settings() try: img_elem = self.scraper.get_image_elem() if hasattr(self.ref_object, img_elem.scraped_obj_attr.name): img_name = getattr(self.ref_object, img_elem.scraped_obj_attr.name) thumb_paths = [] if settings.get('IMAGES_THUMBS') and len(settings.get('IMAGES_THUMBS')) > 0: for key in settings.get('IMAGES_THUMBS').keys(): thumb_paths.append(('thumbnail, {k}'.format(k=key), os.path.join(settings.get('IMAGES_STORE'), 'thumbs', key, img_name),)) del_paths = [] if self.conf['IMAGES_STORE_FORMAT'] == 'FLAT': del_paths.append(('original, flat path', os.path.join(settings.get('IMAGES_STORE'), img_name),)) if self.conf['IMAGES_STORE_FORMAT'] == 'ALL': del_paths.append(('original, full/ path', os.path.join(settings.get('IMAGES_STORE'), 'full' , img_name),)) del_paths += thumb_paths if self.conf['IMAGES_STORE_FORMAT'] == 'THUMBS': del_paths += thumb_paths for path in del_paths: if os.access(path[1], os.F_OK): try: os.unlink(path[1]) self.log("Associated image ({n}, {p}) deleted.".format(n=img_name, p=path[0]), logging.INFO) except Exception: self.log("Associated image ({n}, {p}) could not be deleted!".format(n=img_name, p=path[0]), logging.ERROR) else: self.log("Associated image ({n}, {p}) could not be found!".format(n=img_name, p=path[0]), logging.WARNING) except ScraperElem.DoesNotExist: pass self.ref_object.delete() self.scraper.last_checker_delete = datetime.datetime.now() self.scraper.save() self.action_successful = True self.log("Item deleted.", logging.INFO) def start_requests(self): for checker in self.scraper.checker_set.all(): url = getattr(self.ref_object, checker.scraped_obj_attr.name) rpt = self.scraper.get_rpt_for_scraped_obj_attr(checker.scraped_obj_attr) kwargs = self.dp_request_kwargs[rpt.page_type].copy() if 'meta' not in kwargs: kwargs['meta'] = {} kwargs['meta']['checker'] = checker kwargs['meta']['rpt'] = rpt self._set_meta_splash_args() if url: if rpt.request_type == 'R': yield Request(url, callback=self.parse, method=rpt.method, dont_filter=True, **kwargs) else: yield FormRequest(url, callback=self.parse, method=rpt.method, formdata=self.dp_form_data[rpt.page_type], dont_filter=True, **kwargs) def response_received(self, **kwargs): checker = kwargs['response'].request.meta['checker'] rpt = kwargs['response'].request.meta['rpt'] # 404 test if kwargs['response'].status == 404: if self.scheduler_runtime.num_zero_actions == 0: self.log("Checker test returned second 404 ({c}). Delete reason.".format(c=str(checker)), logging.INFO) if self.conf['DO_ACTION']: self._del_ref_object() else: self.log("Checker test returned first 404 ({c}).".format(str(checker)), logging.INFO) self.action_successful = True def parse(self, response): # x_path test checker = response.request.meta['checker'] rpt = response.request.meta['rpt'] if self.conf['OUTPUT_RESPONSE_BODY']: self.log("Response body ({url})\n\n***** RP_START *****\n{resp_body}\n***** RP_END *****\n\n".format( url=response.url, resp_body=response.body.decode('utf-8')), logging.INFO) if checker.checker_type == '4': self.log("No 404 ({c}).".format(c=str(checker)), logging.INFO) return if rpt.content_type == 'J': json_resp = json.loads(response.body_as_unicode()) try: jsonpath_expr = parse(checker.checker_x_path) except JsonPathLexerError: raise CloseSpider("Invalid checker JSONPath ({c})!".format(c=str(checker))) test_select = [match.value for match in jsonpath_expr.find(json_resp)] #self.log(unicode(test_select), logging.INFO) else: try: test_select = response.xpath(checker.checker_x_path).extract() except ValueError: self.log("Invalid checker XPath ({c})!".format(c=str(checker)), logging.ERROR) return if len(test_select) > 0 and checker.checker_x_path_result == '': self.log("Elements for XPath found on page (no result string defined) ({c}). Delete reason.".format(c=str(checker)), logging.INFO) if self.conf['DO_ACTION']: self._del_ref_object() return elif len(test_select) > 0 and test_select[0] == checker.checker_x_path_result: self.log("XPath result string '{s}' found on page ({c}). Delete reason.".format(s=checker.checker_x_path_result, c=str(checker)), logging.INFO) if self.conf['DO_ACTION']: self._del_ref_object() return else: self.log("XPath result string not found ({c}).".format(c=str(checker)), logging.INFO) return
44.083333
155
0.593825