hexsha
stringlengths
40
40
size
int64
2
1.02M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
2
1.02M
avg_line_length
float64
1
417k
max_line_length
int64
1
987k
alphanum_fraction
float64
0
1
content_no_comment
stringlengths
0
1.01M
is_comment_constant_removed
bool
1 class
is_sharp_comment_removed
bool
1 class
f72c7eba9adcee72dd6bb67f5e168999469e292c
1,058
py
Python
XXBDailyFresh/apps/user/urls.py
sixTiger/XXBDailyFresh
5c6976eff8e073f79b50e7829e10332ccd8df43d
[ "MIT" ]
null
null
null
XXBDailyFresh/apps/user/urls.py
sixTiger/XXBDailyFresh
5c6976eff8e073f79b50e7829e10332ccd8df43d
[ "MIT" ]
null
null
null
XXBDailyFresh/apps/user/urls.py
sixTiger/XXBDailyFresh
5c6976eff8e073f79b50e7829e10332ccd8df43d
[ "MIT" ]
null
null
null
"""XXBDailyFresh URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ # url(r'^register$', RegisterView.as_view(), name='register'), # 注册 # url(r'^active/(?P<token>.*)$', ActiveView.as_view(), name='active'), # 用户激活 # url(r'^login$', LoginView.as_view(), name='login'), # 登录 from django.urls import path from apps.user.views import RegisterView urlpatterns = [ path('register/', RegisterView.as_view(), name='register'), # 首页 # path('', views.index, name='index'), # 首页 ]
39.185185
81
0.680529
.urls import path from apps.user.views import RegisterView urlpatterns = [ path('register/', RegisterView.as_view(), name='register'),
true
true
f72c7fa8536014c7a70bc5bf40a892ab8804afca
4,820
py
Python
Tsukihime/nscript_parser.py
Samyuth/LomandoCrawler
2d6bc7bd79678b78ac7c30e88b72127134e99b91
[ "MIT" ]
null
null
null
Tsukihime/nscript_parser.py
Samyuth/LomandoCrawler
2d6bc7bd79678b78ac7c30e88b72127134e99b91
[ "MIT" ]
1
2022-03-31T09:40:48.000Z
2022-03-31T09:44:48.000Z
Tsukihime/nscript_parser.py
Samyuth/LomandoCrawler
2d6bc7bd79678b78ac7c30e88b72127134e99b91
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Wed Mar 16 02:05:23 2022 @author: Sagi """ ''' Sample choice node text: ;-BLOCK------------------------------------------------------------------------- *f20 # Label gosub *regard_update !sd if %sceneskip==1 && %1020==1 skip 4 gosub *s20 mov %1020,1 skip 9 `You have already viewed this scene. `Would you like to skip? br selgosub `1. Skip`, *skip20, `2. Don't skip`, *s20 skip 3 *skip20 return ;gosub *s20 select `1. There's only a few minutes until homeroom. I have to head there right away.`, *f21, `2. || I'm curious, so I'll go take a look.`, *f22 ''' import re from Graph import * class TextNode(): def __init__(self, label=None, text=None, children=None): if label is not None: self.label = label else: self.label = None if text is not None: self.text = text else: self.text = "" if children is not None: self.children = children else: self.children = [] def get_text(self): if self.text: return self.text else: return None def get_label(self): if self.label: return self.label else: return None def add_text(self, text): self.text += text def change_label(self, label): self.label = label def add_children(self, children): self.children += children class ChoiceNode(TextNode): def add_choices(self, choices): self.choices = choices def get_choices(self): if self.choices: return self.choices else: return None class TsukihimeNode(TextNode): def get_labels(self, string): return re.findall("\*.*(?=,)|\*.*(?=\s)|\*.*", string) def parse_text(self): if self.text is None: print("No text to parse") return -1 line_ctr = 0 lines = self.text.splitlines() no_lines = len(lines) while (line_ctr < no_lines): if lines[line_ctr].find("select") != -1: children = [] while (line_ctr < no_lines and re.search("`[0-9].*`", lines[line_ctr])): children += self.get_labels(lines[line_ctr]) line_ctr += 1 self.add_children(children) elif lines[line_ctr].find("goto") != -1: self.add_children(self.get_labels(lines[line_ctr])) line_ctr += 1 class NscriptParser(Graph): # method to parse the script def parse(self): nscript = open("./nsdec/NSDEC/result.txt", encoding="cp932") line = nscript.readline() header = open("./parsed_texts/header.txt", "w", encoding="cp932") remaining = open("./parsed_texts/remaining.txt", "w", encoding="cp932") choices = open("./parsed_texts/choices.txt", "w", encoding="cp932") choice_nodes = [] nodes = [] nodes_present = False while (line and line.strip() != "*start"): header.writelines(line) line = nscript.readline() while (line and line.strip() != "; $Id: 4.txt 1282 2006-08-04 18:12:29Z chendo $"): if re.match("\*f.*", line): nodes_present = True choice_nodes.append(TsukihimeNode(text="")) if nodes_present: choice_nodes[-1].add_text(line) if re.match("^\*f", line): choice_nodes[-1].change_label(line.strip()) choices.writelines(line) line = nscript.readline() while (line): if re.match("^\*", line): nodes.append(TextNode(line)) remaining.writelines(line) line = nscript.readline() nscript.close() header.close() remaining.close() choices.close() choice_nodes = list(filter(lambda x: x.get_label() is not None, choice_nodes)) for node in choice_nodes: node.parse_text() for node in choice_nodes: self.graph.add_node(node.label) for child in node.children: if child not in self.graph: self.graph.add_node(child) self.graph.add_edge(node.label, child) return choice_nodes if __name__ == "__main__": parser = NscriptParser() choice_nodes = parser.parse() leveled_tree = parser.get_leveled_tree() output = parser.output_tree_sideways() with open("ouput.txt", "w") as outfile: outfile.write(output) #parser.plot() #parser.plot_pretty()
27.542857
94
0.527386
import re from Graph import * class TextNode(): def __init__(self, label=None, text=None, children=None): if label is not None: self.label = label else: self.label = None if text is not None: self.text = text else: self.text = "" if children is not None: self.children = children else: self.children = [] def get_text(self): if self.text: return self.text else: return None def get_label(self): if self.label: return self.label else: return None def add_text(self, text): self.text += text def change_label(self, label): self.label = label def add_children(self, children): self.children += children class ChoiceNode(TextNode): def add_choices(self, choices): self.choices = choices def get_choices(self): if self.choices: return self.choices else: return None class TsukihimeNode(TextNode): def get_labels(self, string): return re.findall("\*.*(?=,)|\*.*(?=\s)|\*.*", string) def parse_text(self): if self.text is None: print("No text to parse") return -1 line_ctr = 0 lines = self.text.splitlines() no_lines = len(lines) while (line_ctr < no_lines): if lines[line_ctr].find("select") != -1: children = [] while (line_ctr < no_lines and re.search("`[0-9].*`", lines[line_ctr])): children += self.get_labels(lines[line_ctr]) line_ctr += 1 self.add_children(children) elif lines[line_ctr].find("goto") != -1: self.add_children(self.get_labels(lines[line_ctr])) line_ctr += 1 class NscriptParser(Graph): def parse(self): nscript = open("./nsdec/NSDEC/result.txt", encoding="cp932") line = nscript.readline() header = open("./parsed_texts/header.txt", "w", encoding="cp932") remaining = open("./parsed_texts/remaining.txt", "w", encoding="cp932") choices = open("./parsed_texts/choices.txt", "w", encoding="cp932") choice_nodes = [] nodes = [] nodes_present = False while (line and line.strip() != "*start"): header.writelines(line) line = nscript.readline() while (line and line.strip() != "; $Id: 4.txt 1282 2006-08-04 18:12:29Z chendo $"): if re.match("\*f.*", line): nodes_present = True choice_nodes.append(TsukihimeNode(text="")) if nodes_present: choice_nodes[-1].add_text(line) if re.match("^\*f", line): choice_nodes[-1].change_label(line.strip()) choices.writelines(line) line = nscript.readline() while (line): if re.match("^\*", line): nodes.append(TextNode(line)) remaining.writelines(line) line = nscript.readline() nscript.close() header.close() remaining.close() choices.close() choice_nodes = list(filter(lambda x: x.get_label() is not None, choice_nodes)) for node in choice_nodes: node.parse_text() for node in choice_nodes: self.graph.add_node(node.label) for child in node.children: if child not in self.graph: self.graph.add_node(child) self.graph.add_edge(node.label, child) return choice_nodes if __name__ == "__main__": parser = NscriptParser() choice_nodes = parser.parse() leveled_tree = parser.get_leveled_tree() output = parser.output_tree_sideways() with open("ouput.txt", "w") as outfile: outfile.write(output)
true
true
f72c80155df71399c41c13f3793341aca06db318
2,677
py
Python
plugins/cylance_protect/unit_test/test_update_agent.py
lukaszlaszuk/insightconnect-plugins
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
[ "MIT" ]
46
2019-06-05T20:47:58.000Z
2022-03-29T10:18:01.000Z
plugins/cylance_protect/unit_test/test_update_agent.py
lukaszlaszuk/insightconnect-plugins
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
[ "MIT" ]
386
2019-06-07T20:20:39.000Z
2022-03-30T17:35:01.000Z
plugins/cylance_protect/unit_test/test_update_agent.py
lukaszlaszuk/insightconnect-plugins
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
[ "MIT" ]
43
2019-07-09T14:13:58.000Z
2022-03-28T12:04:46.000Z
import sys import os sys.path.append(os.path.abspath("../")) from unittest import TestCase from icon_cylance_protect.connection.connection import Connection from icon_cylance_protect.actions.update_agent import UpdateAgent import json import logging class TestUpdateAgent(TestCase): def test_integration_update_agent(self): """ TODO: Implement assertions at the end of this test case This is an integration test that will connect to the services your plugin uses. It should be used as the basis for tests below that can run independent of a "live" connection. This test assumes a normal plugin structure with a /tests directory. In that /tests directory should be json samples that contain all the data needed to run this test. To generate samples run: icon-plugin generate samples """ log = logging.getLogger("Test") test_conn = Connection() test_action = UpdateAgent() test_conn.logger = log test_action.logger = log try: with open("../tests/update_agent.json") as file: test_json = json.loads(file.read()).get("body") connection_params = test_json.get("connection") action_params = test_json.get("input") except Exception as e: message = """ Could not find or read sample tests from /tests directory An exception here likely means you didn't fill out your samples correctly in the /tests directory Please use 'icon-plugin generate samples', and fill out the resulting test files in the /tests directory """ self.fail(message) test_conn.connect(connection_params) test_action.connection = test_conn results = test_action.run(action_params) # TODO: Remove this line self.fail("Unimplemented test case") # TODO: The following assert should be updated to look for data from your action # For example: self.assertEquals({"success": True}, results) self.assertEquals({}, results) def test_update_agent(self): """ TODO: Implement test cases here Here you can mock the connection with data returned from the above integration test. For information on mocking and unit testing please go here: https://docs.google.com/document/d/1PifePDG1-mBcmNYE8dULwGxJimiRBrax5BIDG_0TFQI/edit?usp=sharing You can either create a formal Mock for this, or you can create a fake connection class to pass to your action for testing. """ self.fail("Unimplemented Test Case")
36.671233
116
0.668285
import sys import os sys.path.append(os.path.abspath("../")) from unittest import TestCase from icon_cylance_protect.connection.connection import Connection from icon_cylance_protect.actions.update_agent import UpdateAgent import json import logging class TestUpdateAgent(TestCase): def test_integration_update_agent(self): log = logging.getLogger("Test") test_conn = Connection() test_action = UpdateAgent() test_conn.logger = log test_action.logger = log try: with open("../tests/update_agent.json") as file: test_json = json.loads(file.read()).get("body") connection_params = test_json.get("connection") action_params = test_json.get("input") except Exception as e: message = """ Could not find or read sample tests from /tests directory An exception here likely means you didn't fill out your samples correctly in the /tests directory Please use 'icon-plugin generate samples', and fill out the resulting test files in the /tests directory """ self.fail(message) test_conn.connect(connection_params) test_action.connection = test_conn results = test_action.run(action_params) # TODO: Remove this line self.fail("Unimplemented test case") # TODO: The following assert should be updated to look for data from your action # For example: self.assertEquals({"success": True}, results) self.assertEquals({}, results) def test_update_agent(self): self.fail("Unimplemented Test Case")
true
true
f72c80b9bc4510e5476205c3adf1bfd5dea678af
1,931
py
Python
model-optimizer/extensions/front/onnx/detectionoutput_ext.py
Andruxin52rus/openvino
d824e371fe7dffb90e6d3d58e4e34adecfce4606
[ "Apache-2.0" ]
2
2020-11-18T14:14:06.000Z
2020-11-28T04:55:57.000Z
model-optimizer/extensions/front/onnx/detectionoutput_ext.py
Andruxin52rus/openvino
d824e371fe7dffb90e6d3d58e4e34adecfce4606
[ "Apache-2.0" ]
30
2020-11-13T11:44:07.000Z
2022-02-21T13:03:16.000Z
model-optimizer/extensions/front/onnx/detectionoutput_ext.py
mmakridi/openvino
769bb7709597c14debdaa356dd60c5a78bdfa97e
[ "Apache-2.0" ]
3
2021-03-09T08:27:29.000Z
2021-04-07T04:58:54.000Z
""" Copyright (C) 2018-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from math import log import numpy as np from extensions.ops.detectionoutput_onnx import ExperimentalDetectronDetectionOutput from mo.front.extractor import FrontExtractorOp from mo.front.onnx.extractors.utils import onnx_attr class ExperimentalDetectronDetectionOutputFrontExtractor(FrontExtractorOp): op = 'ExperimentalDetectronDetectionOutput' enabled = True @classmethod def extract(cls, node): attrs = dict(class_agnostic_box_regression=onnx_attr(node, 'class_agnostic_box_regression', 'i', 0), max_detections_per_image=onnx_attr(node, 'max_detections_per_image', 'i', 100), nms_threshold=onnx_attr(node, 'nms_threshold', 'f', 0.5), num_classes=onnx_attr(node, 'num_classes', 'i', 81), post_nms_count=onnx_attr(node, 'post_nms_count', 'i', 2000), score_threshold=onnx_attr(node, 'score_threshold', 'f', 0.05), max_delta_log_wh=onnx_attr(node, 'max_delta_log_wh', 'f', log(1000. / 16.)), deltas_weights=np.array(onnx_attr(node, 'deltas_weights', 'floats', [10., 10., 5., 5.]), dtype=np.float32) ) ExperimentalDetectronDetectionOutput.update_node_stat(node, attrs) return cls.enabled
43.886364
109
0.684619
from math import log import numpy as np from extensions.ops.detectionoutput_onnx import ExperimentalDetectronDetectionOutput from mo.front.extractor import FrontExtractorOp from mo.front.onnx.extractors.utils import onnx_attr class ExperimentalDetectronDetectionOutputFrontExtractor(FrontExtractorOp): op = 'ExperimentalDetectronDetectionOutput' enabled = True @classmethod def extract(cls, node): attrs = dict(class_agnostic_box_regression=onnx_attr(node, 'class_agnostic_box_regression', 'i', 0), max_detections_per_image=onnx_attr(node, 'max_detections_per_image', 'i', 100), nms_threshold=onnx_attr(node, 'nms_threshold', 'f', 0.5), num_classes=onnx_attr(node, 'num_classes', 'i', 81), post_nms_count=onnx_attr(node, 'post_nms_count', 'i', 2000), score_threshold=onnx_attr(node, 'score_threshold', 'f', 0.05), max_delta_log_wh=onnx_attr(node, 'max_delta_log_wh', 'f', log(1000. / 16.)), deltas_weights=np.array(onnx_attr(node, 'deltas_weights', 'floats', [10., 10., 5., 5.]), dtype=np.float32) ) ExperimentalDetectronDetectionOutput.update_node_stat(node, attrs) return cls.enabled
true
true
f72c811a9b903e14fbd5d11e5e45b9449c6237b3
2,159
py
Python
test/chemistry/test_driver_gaussian_extra.py
hushaohan/aqua
8512bc6ce246a8b3cca1e5edb1703b6885aa7c5d
[ "Apache-2.0" ]
2
2020-06-29T16:08:12.000Z
2020-08-07T22:42:13.000Z
test/chemistry/test_driver_gaussian_extra.py
hushaohan/aqua
8512bc6ce246a8b3cca1e5edb1703b6885aa7c5d
[ "Apache-2.0" ]
null
null
null
test/chemistry/test_driver_gaussian_extra.py
hushaohan/aqua
8512bc6ce246a8b3cca1e5edb1703b6885aa7c5d
[ "Apache-2.0" ]
1
2022-01-25T07:09:10.000Z
2022-01-25T07:09:10.000Z
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ Test Driver Gaussian internals - does not require Gaussian installed """ import unittest from test.chemistry import QiskitChemistryTestCase from qiskit.chemistry.drivers import GaussianDriver # We need to have an instance so we can test function but constructor calls # an internal method to check G16 installed. We need to replace that with # the following dummy for things to work and we do it for each test so the # class ends up as it was def _check_valid(): pass class TestDriverGaussianExtra(QiskitChemistryTestCase): """Gaussian Driver extra tests for driver specifics, errors etc """ def setUp(self): super().setUp() self.good_check = GaussianDriver._check_valid GaussianDriver._check_valid = _check_valid # We can now create a driver without the installed (check valid) test failing def tearDown(self): GaussianDriver._check_valid = self.good_check def test_cfg_augment(self): """ test input configuration augmentation """ cfg = '# rhf/sto-3g scf(conventional)\n\n' \ 'h2 molecule\n\n0 1\nH 0.0 0.0 0.0\nH 0.0 0.0 0.735\n\n' g16 = GaussianDriver(cfg) aug_cfg = g16._augment_config("mymatfile.mat", cfg) expected = '# rhf/sto-3g scf(conventional)\n' \ '# Window=Full Int=NoRaff Symm=(NoInt,None)' \ ' output=(matrix,i4labels,mo2el) tran=full\n\n' \ 'h2 molecule\n\n0 1\nH 0.0 0.0 0.0\nH 0.0 0.0 0.735' \ '\n\nmymatfile.mat\n\n' self.assertEqual(aug_cfg, expected) if __name__ == '__main__': unittest.main()
36.59322
85
0.672534
import unittest from test.chemistry import QiskitChemistryTestCase from qiskit.chemistry.drivers import GaussianDriver def _check_valid(): pass class TestDriverGaussianExtra(QiskitChemistryTestCase): def setUp(self): super().setUp() self.good_check = GaussianDriver._check_valid GaussianDriver._check_valid = _check_valid def tearDown(self): GaussianDriver._check_valid = self.good_check def test_cfg_augment(self): cfg = '# rhf/sto-3g scf(conventional)\n\n' \ 'h2 molecule\n\n0 1\nH 0.0 0.0 0.0\nH 0.0 0.0 0.735\n\n' g16 = GaussianDriver(cfg) aug_cfg = g16._augment_config("mymatfile.mat", cfg) expected = '# rhf/sto-3g scf(conventional)\n' \ '# Window=Full Int=NoRaff Symm=(NoInt,None)' \ ' output=(matrix,i4labels,mo2el) tran=full\n\n' \ 'h2 molecule\n\n0 1\nH 0.0 0.0 0.0\nH 0.0 0.0 0.735' \ '\n\nmymatfile.mat\n\n' self.assertEqual(aug_cfg, expected) if __name__ == '__main__': unittest.main()
true
true
f72c82f4acdaefe35bcc5d195dbe520974fda99d
1,269
py
Python
qiskit_nature/algorithms/excited_states_solvers/__init__.py
divshacker/qiskit-nature
08f6dcec5e4ac8c08f5b84e764ee78cc3d12facb
[ "Apache-2.0" ]
132
2021-01-28T14:51:11.000Z
2022-03-25T21:10:47.000Z
qiskit_nature/algorithms/excited_states_solvers/__init__.py
divshacker/qiskit-nature
08f6dcec5e4ac8c08f5b84e764ee78cc3d12facb
[ "Apache-2.0" ]
449
2021-01-28T19:57:43.000Z
2022-03-31T17:01:50.000Z
qiskit_nature/algorithms/excited_states_solvers/__init__.py
divshacker/qiskit-nature
08f6dcec5e4ac8c08f5b84e764ee78cc3d12facb
[ "Apache-2.0" ]
109
2021-01-28T13:17:46.000Z
2022-03-30T23:53:39.000Z
# This code is part of Qiskit. # # (C) Copyright IBM 2020, 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ Excited State Solving Algorithms (:mod:`qiskit_nature.algorithms.excited_states_solvers`) ========================================================================================= .. currentmodule:: qiskit_nature.algorithms.excited_states_solvers .. autosummary:: :toctree: ../stubs/ eigensolver_factories .. autosummary:: :toctree: ../stubs/ :nosignatures: ExcitedStatesEigensolver QEOM """ from .excited_states_solver import ExcitedStatesSolver from .qeom import QEOM from .eigensolver_factories import EigensolverFactory, NumPyEigensolverFactory from .excited_states_eigensolver import ExcitedStatesEigensolver __all__ = [ "ExcitedStatesSolver", "ExcitedStatesEigensolver", "EigensolverFactory", "NumPyEigensolverFactory", "QEOM", ]
28.840909
89
0.711584
from .excited_states_solver import ExcitedStatesSolver from .qeom import QEOM from .eigensolver_factories import EigensolverFactory, NumPyEigensolverFactory from .excited_states_eigensolver import ExcitedStatesEigensolver __all__ = [ "ExcitedStatesSolver", "ExcitedStatesEigensolver", "EigensolverFactory", "NumPyEigensolverFactory", "QEOM", ]
true
true
f72c838e66c47527c0a178012ed8acbdbcfe18e4
827
gyp
Python
ui/aura_extra/aura_extra.gyp
hefen1/chromium
52f0b6830e000ca7c5e9aa19488af85be792cc88
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
ui/aura_extra/aura_extra.gyp
hefen1/chromium
52f0b6830e000ca7c5e9aa19488af85be792cc88
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
ui/aura_extra/aura_extra.gyp
hefen1/chromium
52f0b6830e000ca7c5e9aa19488af85be792cc88
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
2
2020-04-04T13:34:56.000Z
2020-11-04T07:17:52.000Z
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'variables': { 'chromium_code': 1, }, 'targets': [ { # GN version: //ui/aura_extra 'target_name': 'aura_extra', 'type': '<(component)', 'dependencies': [ '../../base/base.gyp:base', '../../skia/skia.gyp:skia', '../aura/aura.gyp:aura', '../base/ui_base.gyp:ui_base', '../events/events.gyp:events', '../gfx/gfx.gyp:gfx', '../gfx/gfx.gyp:gfx_geometry', ], 'defines': [ 'AURA_EXTRA_IMPLEMENTATION', ], 'sources': [ 'aura_extra_export.h', 'image_window_delegate.cc', 'image_window_delegate.h', ], }, ], }
24.323529
72
0.53688
{ 'variables': { 'chromium_code': 1, }, 'targets': [ { 'target_name': 'aura_extra', 'type': '<(component)', 'dependencies': [ '../../base/base.gyp:base', '../../skia/skia.gyp:skia', '../aura/aura.gyp:aura', '../base/ui_base.gyp:ui_base', '../events/events.gyp:events', '../gfx/gfx.gyp:gfx', '../gfx/gfx.gyp:gfx_geometry', ], 'defines': [ 'AURA_EXTRA_IMPLEMENTATION', ], 'sources': [ 'aura_extra_export.h', 'image_window_delegate.cc', 'image_window_delegate.h', ], }, ], }
true
true
f72c844451481add20eff334fb82624c5d7efbe7
1,662
py
Python
lab-05-1-logistic_regression.py
KANG91/Deep_Learning
e3e9de769ab835215d0ebeee79ff869afbe64ebf
[ "MIT" ]
null
null
null
lab-05-1-logistic_regression.py
KANG91/Deep_Learning
e3e9de769ab835215d0ebeee79ff869afbe64ebf
[ "MIT" ]
null
null
null
lab-05-1-logistic_regression.py
KANG91/Deep_Learning
e3e9de769ab835215d0ebeee79ff869afbe64ebf
[ "MIT" ]
null
null
null
# Lab 5 Logistic Regression Classifier import tensorflow as tf tf.set_random_seed(777) # for reproducibility x_data = [[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2]] y_data = [[0], [0], [0], [1], [1], [1]] # placeholders for a tensor that will be always fed. X = tf.placeholder(tf.float32, shape=[None, 2]) Y = tf.placeholder(tf.float32, shape=[None, 1]) W = tf.Variable(tf.random_normal([2, 1]), name='weight') b = tf.Variable(tf.random_normal([1]), name='bias') # Hypothesis using sigmoid: tf.div(1., 1. + tf.exp(tf.matmul(X, W))) hypothesis = tf.sigmoid(tf.matmul(X, W) + b) # Cost function cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis)) train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost) # Accuracy computation # True if hypothesis>0.5 else False predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32) accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32)) # Launch graph with tf.Session() as sess: # Initialize TensorFlow variables sess.run(tf.global_variables_initializer()) feed = {X: x_data, Y: y_data} for step in range(10001): sess.run(train, feed_dict=feed) if step % 200 == 0: print(step, sess.run(cost, feed_dict=feed), sess.run(W)) # Accuracy report h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict=feed) print("\nHypothesis: ", h, "\nCorrect (Y): ", c, "\nAccuracy: ", a) ''' Hypothesis: [[ 0.03074029] [ 0.15884677] [ 0.30486736] [ 0.78138196] [ 0.93957496] [ 0.98016882]] Correct (Y): [[ 0.] [ 0.] [ 0.] [ 1.] [ 1.] [ 1.]] Accuracy: 1.0 '''
28.169492
76
0.628159
import tensorflow as tf tf.set_random_seed(777) x_data = [[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2]] y_data = [[0], [0], [0], [1], [1], [1]] X = tf.placeholder(tf.float32, shape=[None, 2]) Y = tf.placeholder(tf.float32, shape=[None, 1]) W = tf.Variable(tf.random_normal([2, 1]), name='weight') b = tf.Variable(tf.random_normal([1]), name='bias') hypothesis = tf.sigmoid(tf.matmul(X, W) + b) cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis)) train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost) predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32) accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32)) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) feed = {X: x_data, Y: y_data} for step in range(10001): sess.run(train, feed_dict=feed) if step % 200 == 0: print(step, sess.run(cost, feed_dict=feed), sess.run(W)) h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict=feed) print("\nHypothesis: ", h, "\nCorrect (Y): ", c, "\nAccuracy: ", a)
true
true
f72c8477a6936f3991993793141885a0bb21af12
4,436
py
Python
tests/unit/models/physics/MeniscusTest.py
edgargmartinez/OpenPNM
c68745993b3e9895f53938164a9cf6305500748e
[ "MIT" ]
3
2019-07-05T22:07:21.000Z
2019-07-05T22:07:30.000Z
tests/unit/models/physics/MeniscusTest.py
edgargmartinez/OpenPNM
c68745993b3e9895f53938164a9cf6305500748e
[ "MIT" ]
null
null
null
tests/unit/models/physics/MeniscusTest.py
edgargmartinez/OpenPNM
c68745993b3e9895f53938164a9cf6305500748e
[ "MIT" ]
null
null
null
import openpnm as op import openpnm.models.physics as pm import scipy as sp class MeniscusTest: def setup_class(self): sp.random.seed(1) self.net = op.network.Cubic(shape=[5, 1, 5], spacing=5e-5) self.geo = op.geometry.StickAndBall(network=self.net, pores=self.net.pores(), throats=self.net.throats()) self.phase = op.phases.Water(network=self.net) self.phys = op.physics.Standard(network=self.net, phase=self.phase, geometry=self.geo) def test_toroidal_touch(self): phys = self.phys r_tor = 1e-6 self.geo['throat.touch_length'] = 2e-6 phys.add_model(propname='throat.tor_max', model=pm.meniscus.purcell, mode='max', r_toroid=r_tor) phys.add_model(propname='throat.tor_touch', model=pm.meniscus.purcell, mode='touch', r_toroid=r_tor) assert sp.any(phys['throat.tor_touch'] < phys['throat.tor_max']) def test_sinusoidal_touch(self): phys = self.phys self.geo['throat.amplitude'] = 5e-6 self.geo['throat.touch_length'] = 1e-6 phys.add_model(propname='throat.sin_pressure_max', model=pm.meniscus.sinusoidal, mode='max') phys.add_model(propname='throat.sin_pressure_touch', model=pm.meniscus.sinusoidal, mode='touch') h = phys.check_data_health() for check in h.values(): if len(check) > 0: assert 1 == 2 assert sp.any((phys['throat.sin_pressure_touch'] < phys['throat.sin_pressure_max'])) def test_sinusoidal(self): phys = self.phys self.geo['throat.amplitude'] = 5e-6 phys.add_model(propname='throat.sin_pressure', model=pm.meniscus.sinusoidal, mode='max') phys.add_model(propname='throat.sin_meniscus', model=pm.meniscus.sinusoidal, mode='men', target_Pc=5000) h = phys.check_data_health() for check in h.values(): if len(check) > 0: assert 1 == 2 def test_toroidal(self): phys = self.phys r_tor = 1e-6 phys.add_model(propname='throat.purcell_pressure', model=pm.capillary_pressure.purcell, r_toroid=r_tor) phys.add_model(propname='throat.tor_pressure', model=pm.meniscus.purcell, mode='max', r_toroid=r_tor, num_points=1000) phys.add_model(propname='throat.tor_meniscus', model=pm.meniscus.purcell, mode='men', r_toroid=r_tor, target_Pc=5000) a = sp.around(phys['throat.purcell_pressure'], 10) b = sp.around(phys['throat.tor_pressure'], 10) assert sp.allclose(a, b) h = phys.check_data_health() for check in h.values(): if len(check) > 0: assert 1 == 2 def test_general_toroidal(self): phys = self.phys r_tor = 1e-6 phys.add_model(propname='throat.purcell_pressure', model=pm.capillary_pressure.purcell, r_toroid=r_tor) phys['throat.scale_a'] = r_tor phys['throat.scale_b'] = r_tor phys.add_model(propname='throat.general_pressure', model=pm.meniscus.general_toroidal, mode='max', num_points=1000) a = sp.around(phys['throat.purcell_pressure'], 10) b = sp.around(phys['throat.general_pressure'], 10) assert sp.allclose(a, b) h = phys.check_data_health() for check in h.values(): if len(check) > 0: assert 1 == 2 if __name__ == '__main__': t = MeniscusTest() self = t t.setup_class() for item in t.__dir__(): if item.startswith('test'): print('running test: '+item) t.__getattribute__(item)()
37.277311
72
0.511046
import openpnm as op import openpnm.models.physics as pm import scipy as sp class MeniscusTest: def setup_class(self): sp.random.seed(1) self.net = op.network.Cubic(shape=[5, 1, 5], spacing=5e-5) self.geo = op.geometry.StickAndBall(network=self.net, pores=self.net.pores(), throats=self.net.throats()) self.phase = op.phases.Water(network=self.net) self.phys = op.physics.Standard(network=self.net, phase=self.phase, geometry=self.geo) def test_toroidal_touch(self): phys = self.phys r_tor = 1e-6 self.geo['throat.touch_length'] = 2e-6 phys.add_model(propname='throat.tor_max', model=pm.meniscus.purcell, mode='max', r_toroid=r_tor) phys.add_model(propname='throat.tor_touch', model=pm.meniscus.purcell, mode='touch', r_toroid=r_tor) assert sp.any(phys['throat.tor_touch'] < phys['throat.tor_max']) def test_sinusoidal_touch(self): phys = self.phys self.geo['throat.amplitude'] = 5e-6 self.geo['throat.touch_length'] = 1e-6 phys.add_model(propname='throat.sin_pressure_max', model=pm.meniscus.sinusoidal, mode='max') phys.add_model(propname='throat.sin_pressure_touch', model=pm.meniscus.sinusoidal, mode='touch') h = phys.check_data_health() for check in h.values(): if len(check) > 0: assert 1 == 2 assert sp.any((phys['throat.sin_pressure_touch'] < phys['throat.sin_pressure_max'])) def test_sinusoidal(self): phys = self.phys self.geo['throat.amplitude'] = 5e-6 phys.add_model(propname='throat.sin_pressure', model=pm.meniscus.sinusoidal, mode='max') phys.add_model(propname='throat.sin_meniscus', model=pm.meniscus.sinusoidal, mode='men', target_Pc=5000) h = phys.check_data_health() for check in h.values(): if len(check) > 0: assert 1 == 2 def test_toroidal(self): phys = self.phys r_tor = 1e-6 phys.add_model(propname='throat.purcell_pressure', model=pm.capillary_pressure.purcell, r_toroid=r_tor) phys.add_model(propname='throat.tor_pressure', model=pm.meniscus.purcell, mode='max', r_toroid=r_tor, num_points=1000) phys.add_model(propname='throat.tor_meniscus', model=pm.meniscus.purcell, mode='men', r_toroid=r_tor, target_Pc=5000) a = sp.around(phys['throat.purcell_pressure'], 10) b = sp.around(phys['throat.tor_pressure'], 10) assert sp.allclose(a, b) h = phys.check_data_health() for check in h.values(): if len(check) > 0: assert 1 == 2 def test_general_toroidal(self): phys = self.phys r_tor = 1e-6 phys.add_model(propname='throat.purcell_pressure', model=pm.capillary_pressure.purcell, r_toroid=r_tor) phys['throat.scale_a'] = r_tor phys['throat.scale_b'] = r_tor phys.add_model(propname='throat.general_pressure', model=pm.meniscus.general_toroidal, mode='max', num_points=1000) a = sp.around(phys['throat.purcell_pressure'], 10) b = sp.around(phys['throat.general_pressure'], 10) assert sp.allclose(a, b) h = phys.check_data_health() for check in h.values(): if len(check) > 0: assert 1 == 2 if __name__ == '__main__': t = MeniscusTest() self = t t.setup_class() for item in t.__dir__(): if item.startswith('test'): print('running test: '+item) t.__getattribute__(item)()
true
true
f72c85576b8389695f555dc9f2032aaaf2f1f2df
19,881
py
Python
plugins/modules/oci_network_drg.py
LaudateCorpus1/oci-ansible-collection
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
[ "Apache-2.0" ]
null
null
null
plugins/modules/oci_network_drg.py
LaudateCorpus1/oci-ansible-collection
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
[ "Apache-2.0" ]
null
null
null
plugins/modules/oci_network_drg.py
LaudateCorpus1/oci-ansible-collection
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # Copyright (c) 2020, 2022 Oracle and/or its affiliates. # This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Apache License v2.0 # See LICENSE.TXT for details. # GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], "supported_by": "community", } DOCUMENTATION = """ --- module: oci_network_drg short_description: Manage a Drg resource in Oracle Cloud Infrastructure description: - This module allows the user to create, update and delete a Drg resource in Oracle Cloud Infrastructure - For I(state=present), creates a new dynamic routing gateway (DRG) in the specified compartment. For more information, see L(Dynamic Routing Gateways (DRGs),https://docs.cloud.oracle.com/iaas/Content/Network/Tasks/managingDRGs.htm). - For the purposes of access control, you must provide the L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment where you want the DRG to reside. Notice that the DRG doesn't have to be in the same compartment as the VCN, the DRG attachment, or other Networking Service components. If you're not sure which compartment to use, put the DRG in the same compartment as the VCN. For more information about compartments and access control, see L(Overview of the IAM Service,https://docs.cloud.oracle.com/iaas/Content/Identity/Concepts/overview.htm). For information about OCIDs, see L(Resource Identifiers,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm). - "You may optionally specify a *display name* for the DRG, otherwise a default is provided. It does not have to be unique, and you can change it. Avoid entering confidential information." - "This resource has the following action operations in the M(oracle.oci.oci_network_drg_actions) module: change_compartment, get_all_drg_attachments, upgrade." version_added: "2.9.0" author: Oracle (@oracle) options: compartment_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment to contain the DRG. - Required for create using I(state=present). - Required for update when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set. - Required for delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set. type: str defined_tags: description: - Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). - "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`" - This parameter is updatable. type: dict display_name: description: - A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information. - Required for create, update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set. - This parameter is updatable when C(OCI_USE_NAME_AS_IDENTIFIER) is not set. type: str aliases: ["name"] freeform_tags: description: - Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). - "Example: `{\\"Department\\": \\"Finance\\"}`" - This parameter is updatable. type: dict drg_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the DRG. - Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set. - Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set. type: str aliases: ["id"] default_drg_route_tables: description: - "" - This parameter is updatable. type: dict suboptions: vcn: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the default DRG route table to be assigned to DRG attachments of type VCN on creation. - This parameter is updatable. type: str ipsec_tunnel: description: - The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the default DRG route table assigned to DRG attachments of type IPSEC_TUNNEL on creation. - This parameter is updatable. type: str virtual_circuit: description: - The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the default DRG route table to be assigned to DRG attachments of type VIRTUAL_CIRCUIT on creation. - This parameter is updatable. type: str remote_peering_connection: description: - The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the default DRG route table to be assigned to DRG attachments of type REMOTE_PEERING_CONNECTION on creation. - This parameter is updatable. type: str state: description: - The state of the Drg. - Use I(state=present) to create or update a Drg. - Use I(state=absent) to delete a Drg. type: str required: false default: 'present' choices: ["present", "absent"] extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ] """ EXAMPLES = """ - name: Create drg oci_network_drg: # required compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx" # optional defined_tags: {'Operations': {'CostCenter': 'US'}} display_name: display_name_example freeform_tags: {'Department': 'Finance'} - name: Update drg oci_network_drg: # required drg_id: "ocid1.drg.oc1..xxxxxxEXAMPLExxxxxx" # optional defined_tags: {'Operations': {'CostCenter': 'US'}} display_name: display_name_example freeform_tags: {'Department': 'Finance'} default_drg_route_tables: # optional vcn: vcn_example ipsec_tunnel: ipsec_tunnel_example virtual_circuit: virtual_circuit_example remote_peering_connection: remote_peering_connection_example - name: Update drg using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set) oci_network_drg: # required compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx" display_name: display_name_example # optional defined_tags: {'Operations': {'CostCenter': 'US'}} freeform_tags: {'Department': 'Finance'} default_drg_route_tables: # optional vcn: vcn_example ipsec_tunnel: ipsec_tunnel_example virtual_circuit: virtual_circuit_example remote_peering_connection: remote_peering_connection_example - name: Delete drg oci_network_drg: # required drg_id: "ocid1.drg.oc1..xxxxxxEXAMPLExxxxxx" state: absent - name: Delete drg using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set) oci_network_drg: # required compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx" display_name: display_name_example state: absent """ RETURN = """ drg: description: - Details of the Drg resource acted upon by the current operation returned: on success type: complex contains: compartment_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment containing the DRG. returned: on success type: str sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx" defined_tags: description: - Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). - "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`" returned: on success type: dict sample: {'Operations': {'CostCenter': 'US'}} display_name: description: - A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information. returned: on success type: str sample: display_name_example freeform_tags: description: - Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). - "Example: `{\\"Department\\": \\"Finance\\"}`" returned: on success type: dict sample: {'Department': 'Finance'} id: description: - The DRG's Oracle ID (L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm)). returned: on success type: str sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx" lifecycle_state: description: - The DRG's current state. returned: on success type: str sample: PROVISIONING time_created: description: - The date and time the DRG was created, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339). - "Example: `2016-08-25T21:10:29.600Z`" returned: on success type: str sample: "2013-10-20T19:20:30+01:00" default_drg_route_tables: description: - "" returned: on success type: complex contains: vcn: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the default DRG route table to be assigned to DRG attachments of type VCN on creation. returned: on success type: str sample: vcn_example ipsec_tunnel: description: - The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the default DRG route table assigned to DRG attachments of type IPSEC_TUNNEL on creation. returned: on success type: str sample: ipsec_tunnel_example virtual_circuit: description: - The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the default DRG route table to be assigned to DRG attachments of type VIRTUAL_CIRCUIT on creation. returned: on success type: str sample: virtual_circuit_example remote_peering_connection: description: - The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the default DRG route table to be assigned to DRG attachments of type REMOTE_PEERING_CONNECTION on creation. returned: on success type: str sample: remote_peering_connection_example default_export_drg_route_distribution_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of this DRG's default export route distribution for the DRG attachments. returned: on success type: str sample: "ocid1.defaultexportdrgroutedistribution.oc1..xxxxxxEXAMPLExxxxxx" sample: { "compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx", "defined_tags": {'Operations': {'CostCenter': 'US'}}, "display_name": "display_name_example", "freeform_tags": {'Department': 'Finance'}, "id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx", "lifecycle_state": "PROVISIONING", "time_created": "2013-10-20T19:20:30+01:00", "default_drg_route_tables": { "vcn": "vcn_example", "ipsec_tunnel": "ipsec_tunnel_example", "virtual_circuit": "virtual_circuit_example", "remote_peering_connection": "remote_peering_connection_example" }, "default_export_drg_route_distribution_id": "ocid1.defaultexportdrgroutedistribution.oc1..xxxxxxEXAMPLExxxxxx" } """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.oracle.oci.plugins.module_utils import ( oci_common_utils, oci_wait_utils, ) from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import ( OCIResourceHelperBase, get_custom_class, ) try: from oci.core import VirtualNetworkClient from oci.core.models import CreateDrgDetails from oci.core.models import UpdateDrgDetails HAS_OCI_PY_SDK = True except ImportError: HAS_OCI_PY_SDK = False class DrgHelperGen(OCIResourceHelperBase): """Supported operations: create, update, get, list and delete""" def get_possible_entity_types(self): return super(DrgHelperGen, self).get_possible_entity_types() + [ "drg", "drgs", "coredrg", "coredrgs", "drgresource", "drgsresource", "core", ] def get_module_resource_id_param(self): return "drg_id" def get_module_resource_id(self): return self.module.params.get("drg_id") def get_get_fn(self): return self.client.get_drg def get_resource(self): return oci_common_utils.call_with_backoff( self.client.get_drg, drg_id=self.module.params.get("drg_id"), ) def get_required_kwargs_for_list(self): required_list_method_params = [ "compartment_id", ] return dict( (param, self.module.params[param]) for param in required_list_method_params ) def get_optional_kwargs_for_list(self): return dict() def list_resources(self): required_kwargs = self.get_required_kwargs_for_list() optional_kwargs = self.get_optional_kwargs_for_list() kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs) return oci_common_utils.list_all_resources(self.client.list_drgs, **kwargs) def get_create_model_class(self): return CreateDrgDetails def create_resource(self): create_details = self.get_create_model() return oci_wait_utils.call_and_wait( call_fn=self.client.create_drg, call_fn_args=(), call_fn_kwargs=dict(create_drg_details=create_details,), waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY, operation=oci_common_utils.CREATE_OPERATION_KEY, waiter_client=self.get_waiter_client(), resource_helper=self, wait_for_states=self.get_wait_for_states_for_operation( oci_common_utils.CREATE_OPERATION_KEY, ), ) def get_update_model_class(self): return UpdateDrgDetails def update_resource(self): update_details = self.get_update_model() return oci_wait_utils.call_and_wait( call_fn=self.client.update_drg, call_fn_args=(), call_fn_kwargs=dict( drg_id=self.module.params.get("drg_id"), update_drg_details=update_details, ), waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY, operation=oci_common_utils.UPDATE_OPERATION_KEY, waiter_client=self.get_waiter_client(), resource_helper=self, wait_for_states=self.get_wait_for_states_for_operation( oci_common_utils.UPDATE_OPERATION_KEY, ), ) def delete_resource(self): return oci_wait_utils.call_and_wait( call_fn=self.client.delete_drg, call_fn_args=(), call_fn_kwargs=dict(drg_id=self.module.params.get("drg_id"),), waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY, operation=oci_common_utils.DELETE_OPERATION_KEY, waiter_client=self.get_waiter_client(), resource_helper=self, wait_for_states=self.get_wait_for_states_for_operation( oci_common_utils.DELETE_OPERATION_KEY, ), ) DrgHelperCustom = get_custom_class("DrgHelperCustom") class ResourceHelper(DrgHelperCustom, DrgHelperGen): pass def main(): module_args = oci_common_utils.get_common_arg_spec( supports_create=True, supports_wait=True ) module_args.update( dict( compartment_id=dict(type="str"), defined_tags=dict(type="dict"), display_name=dict(aliases=["name"], type="str"), freeform_tags=dict(type="dict"), drg_id=dict(aliases=["id"], type="str"), default_drg_route_tables=dict( type="dict", options=dict( vcn=dict(type="str"), ipsec_tunnel=dict(type="str"), virtual_circuit=dict(type="str"), remote_peering_connection=dict(type="str"), ), ), state=dict(type="str", default="present", choices=["present", "absent"]), ) ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) if not HAS_OCI_PY_SDK: module.fail_json(msg="oci python sdk required for this module.") resource_helper = ResourceHelper( module=module, resource_type="drg", service_client_class=VirtualNetworkClient, namespace="core", ) result = dict(changed=False) if resource_helper.is_delete_using_name(): result = resource_helper.delete_using_name() elif resource_helper.is_delete(): result = resource_helper.delete() elif resource_helper.is_update_using_name(): result = resource_helper.update_using_name() elif resource_helper.is_update(): result = resource_helper.update() elif resource_helper.is_create(): result = resource_helper.create() module.exit_json(**result) if __name__ == "__main__": main()
41.076446
160
0.632866
from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], "supported_by": "community", } DOCUMENTATION = """ --- module: oci_network_drg short_description: Manage a Drg resource in Oracle Cloud Infrastructure description: - This module allows the user to create, update and delete a Drg resource in Oracle Cloud Infrastructure - For I(state=present), creates a new dynamic routing gateway (DRG) in the specified compartment. For more information, see L(Dynamic Routing Gateways (DRGs),https://docs.cloud.oracle.com/iaas/Content/Network/Tasks/managingDRGs.htm). - For the purposes of access control, you must provide the L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment where you want the DRG to reside. Notice that the DRG doesn't have to be in the same compartment as the VCN, the DRG attachment, or other Networking Service components. If you're not sure which compartment to use, put the DRG in the same compartment as the VCN. For more information about compartments and access control, see L(Overview of the IAM Service,https://docs.cloud.oracle.com/iaas/Content/Identity/Concepts/overview.htm). For information about OCIDs, see L(Resource Identifiers,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm). - "You may optionally specify a *display name* for the DRG, otherwise a default is provided. It does not have to be unique, and you can change it. Avoid entering confidential information." - "This resource has the following action operations in the M(oracle.oci.oci_network_drg_actions) module: change_compartment, get_all_drg_attachments, upgrade." version_added: "2.9.0" author: Oracle (@oracle) options: compartment_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment to contain the DRG. - Required for create using I(state=present). - Required for update when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set. - Required for delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set. type: str defined_tags: description: - Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). - "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`" - This parameter is updatable. type: dict display_name: description: - A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information. - Required for create, update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set. - This parameter is updatable when C(OCI_USE_NAME_AS_IDENTIFIER) is not set. type: str aliases: ["name"] freeform_tags: description: - Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). - "Example: `{\\"Department\\": \\"Finance\\"}`" - This parameter is updatable. type: dict drg_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the DRG. - Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set. - Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set. type: str aliases: ["id"] default_drg_route_tables: description: - "" - This parameter is updatable. type: dict suboptions: vcn: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the default DRG route table to be assigned to DRG attachments of type VCN on creation. - This parameter is updatable. type: str ipsec_tunnel: description: - The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the default DRG route table assigned to DRG attachments of type IPSEC_TUNNEL on creation. - This parameter is updatable. type: str virtual_circuit: description: - The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the default DRG route table to be assigned to DRG attachments of type VIRTUAL_CIRCUIT on creation. - This parameter is updatable. type: str remote_peering_connection: description: - The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the default DRG route table to be assigned to DRG attachments of type REMOTE_PEERING_CONNECTION on creation. - This parameter is updatable. type: str state: description: - The state of the Drg. - Use I(state=present) to create or update a Drg. - Use I(state=absent) to delete a Drg. type: str required: false default: 'present' choices: ["present", "absent"] extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ] """ EXAMPLES = """ - name: Create drg oci_network_drg: # required compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx" # optional defined_tags: {'Operations': {'CostCenter': 'US'}} display_name: display_name_example freeform_tags: {'Department': 'Finance'} - name: Update drg oci_network_drg: # required drg_id: "ocid1.drg.oc1..xxxxxxEXAMPLExxxxxx" # optional defined_tags: {'Operations': {'CostCenter': 'US'}} display_name: display_name_example freeform_tags: {'Department': 'Finance'} default_drg_route_tables: # optional vcn: vcn_example ipsec_tunnel: ipsec_tunnel_example virtual_circuit: virtual_circuit_example remote_peering_connection: remote_peering_connection_example - name: Update drg using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set) oci_network_drg: # required compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx" display_name: display_name_example # optional defined_tags: {'Operations': {'CostCenter': 'US'}} freeform_tags: {'Department': 'Finance'} default_drg_route_tables: # optional vcn: vcn_example ipsec_tunnel: ipsec_tunnel_example virtual_circuit: virtual_circuit_example remote_peering_connection: remote_peering_connection_example - name: Delete drg oci_network_drg: # required drg_id: "ocid1.drg.oc1..xxxxxxEXAMPLExxxxxx" state: absent - name: Delete drg using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set) oci_network_drg: # required compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx" display_name: display_name_example state: absent """ RETURN = """ drg: description: - Details of the Drg resource acted upon by the current operation returned: on success type: complex contains: compartment_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment containing the DRG. returned: on success type: str sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx" defined_tags: description: - Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). - "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`" returned: on success type: dict sample: {'Operations': {'CostCenter': 'US'}} display_name: description: - A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information. returned: on success type: str sample: display_name_example freeform_tags: description: - Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). - "Example: `{\\"Department\\": \\"Finance\\"}`" returned: on success type: dict sample: {'Department': 'Finance'} id: description: - The DRG's Oracle ID (L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm)). returned: on success type: str sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx" lifecycle_state: description: - The DRG's current state. returned: on success type: str sample: PROVISIONING time_created: description: - The date and time the DRG was created, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339). - "Example: `2016-08-25T21:10:29.600Z`" returned: on success type: str sample: "2013-10-20T19:20:30+01:00" default_drg_route_tables: description: - "" returned: on success type: complex contains: vcn: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the default DRG route table to be assigned to DRG attachments of type VCN on creation. returned: on success type: str sample: vcn_example ipsec_tunnel: description: - The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the default DRG route table assigned to DRG attachments of type IPSEC_TUNNEL on creation. returned: on success type: str sample: ipsec_tunnel_example virtual_circuit: description: - The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the default DRG route table to be assigned to DRG attachments of type VIRTUAL_CIRCUIT on creation. returned: on success type: str sample: virtual_circuit_example remote_peering_connection: description: - The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the default DRG route table to be assigned to DRG attachments of type REMOTE_PEERING_CONNECTION on creation. returned: on success type: str sample: remote_peering_connection_example default_export_drg_route_distribution_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of this DRG's default export route distribution for the DRG attachments. returned: on success type: str sample: "ocid1.defaultexportdrgroutedistribution.oc1..xxxxxxEXAMPLExxxxxx" sample: { "compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx", "defined_tags": {'Operations': {'CostCenter': 'US'}}, "display_name": "display_name_example", "freeform_tags": {'Department': 'Finance'}, "id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx", "lifecycle_state": "PROVISIONING", "time_created": "2013-10-20T19:20:30+01:00", "default_drg_route_tables": { "vcn": "vcn_example", "ipsec_tunnel": "ipsec_tunnel_example", "virtual_circuit": "virtual_circuit_example", "remote_peering_connection": "remote_peering_connection_example" }, "default_export_drg_route_distribution_id": "ocid1.defaultexportdrgroutedistribution.oc1..xxxxxxEXAMPLExxxxxx" } """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.oracle.oci.plugins.module_utils import ( oci_common_utils, oci_wait_utils, ) from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import ( OCIResourceHelperBase, get_custom_class, ) try: from oci.core import VirtualNetworkClient from oci.core.models import CreateDrgDetails from oci.core.models import UpdateDrgDetails HAS_OCI_PY_SDK = True except ImportError: HAS_OCI_PY_SDK = False class DrgHelperGen(OCIResourceHelperBase): def get_possible_entity_types(self): return super(DrgHelperGen, self).get_possible_entity_types() + [ "drg", "drgs", "coredrg", "coredrgs", "drgresource", "drgsresource", "core", ] def get_module_resource_id_param(self): return "drg_id" def get_module_resource_id(self): return self.module.params.get("drg_id") def get_get_fn(self): return self.client.get_drg def get_resource(self): return oci_common_utils.call_with_backoff( self.client.get_drg, drg_id=self.module.params.get("drg_id"), ) def get_required_kwargs_for_list(self): required_list_method_params = [ "compartment_id", ] return dict( (param, self.module.params[param]) for param in required_list_method_params ) def get_optional_kwargs_for_list(self): return dict() def list_resources(self): required_kwargs = self.get_required_kwargs_for_list() optional_kwargs = self.get_optional_kwargs_for_list() kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs) return oci_common_utils.list_all_resources(self.client.list_drgs, **kwargs) def get_create_model_class(self): return CreateDrgDetails def create_resource(self): create_details = self.get_create_model() return oci_wait_utils.call_and_wait( call_fn=self.client.create_drg, call_fn_args=(), call_fn_kwargs=dict(create_drg_details=create_details,), waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY, operation=oci_common_utils.CREATE_OPERATION_KEY, waiter_client=self.get_waiter_client(), resource_helper=self, wait_for_states=self.get_wait_for_states_for_operation( oci_common_utils.CREATE_OPERATION_KEY, ), ) def get_update_model_class(self): return UpdateDrgDetails def update_resource(self): update_details = self.get_update_model() return oci_wait_utils.call_and_wait( call_fn=self.client.update_drg, call_fn_args=(), call_fn_kwargs=dict( drg_id=self.module.params.get("drg_id"), update_drg_details=update_details, ), waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY, operation=oci_common_utils.UPDATE_OPERATION_KEY, waiter_client=self.get_waiter_client(), resource_helper=self, wait_for_states=self.get_wait_for_states_for_operation( oci_common_utils.UPDATE_OPERATION_KEY, ), ) def delete_resource(self): return oci_wait_utils.call_and_wait( call_fn=self.client.delete_drg, call_fn_args=(), call_fn_kwargs=dict(drg_id=self.module.params.get("drg_id"),), waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY, operation=oci_common_utils.DELETE_OPERATION_KEY, waiter_client=self.get_waiter_client(), resource_helper=self, wait_for_states=self.get_wait_for_states_for_operation( oci_common_utils.DELETE_OPERATION_KEY, ), ) DrgHelperCustom = get_custom_class("DrgHelperCustom") class ResourceHelper(DrgHelperCustom, DrgHelperGen): pass def main(): module_args = oci_common_utils.get_common_arg_spec( supports_create=True, supports_wait=True ) module_args.update( dict( compartment_id=dict(type="str"), defined_tags=dict(type="dict"), display_name=dict(aliases=["name"], type="str"), freeform_tags=dict(type="dict"), drg_id=dict(aliases=["id"], type="str"), default_drg_route_tables=dict( type="dict", options=dict( vcn=dict(type="str"), ipsec_tunnel=dict(type="str"), virtual_circuit=dict(type="str"), remote_peering_connection=dict(type="str"), ), ), state=dict(type="str", default="present", choices=["present", "absent"]), ) ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) if not HAS_OCI_PY_SDK: module.fail_json(msg="oci python sdk required for this module.") resource_helper = ResourceHelper( module=module, resource_type="drg", service_client_class=VirtualNetworkClient, namespace="core", ) result = dict(changed=False) if resource_helper.is_delete_using_name(): result = resource_helper.delete_using_name() elif resource_helper.is_delete(): result = resource_helper.delete() elif resource_helper.is_update_using_name(): result = resource_helper.update_using_name() elif resource_helper.is_update(): result = resource_helper.update() elif resource_helper.is_create(): result = resource_helper.create() module.exit_json(**result) if __name__ == "__main__": main()
true
true
f72c8559dfd7a6014d9c69e946707586ad068801
2,817
py
Python
src/dashboard/pages/visualization/visualization.py
ddlatumalea/disease_and_life
aa8c84fdd4a0b41bc0ee275538ac70a362eb26ba
[ "Apache-2.0" ]
null
null
null
src/dashboard/pages/visualization/visualization.py
ddlatumalea/disease_and_life
aa8c84fdd4a0b41bc0ee275538ac70a362eb26ba
[ "Apache-2.0" ]
null
null
null
src/dashboard/pages/visualization/visualization.py
ddlatumalea/disease_and_life
aa8c84fdd4a0b41bc0ee275538ac70a362eb26ba
[ "Apache-2.0" ]
null
null
null
from pathlib import Path import panel as pn import pandas as pd import plotly.express as px from models.pages import Page from models.utils.paths import get_prepared_data_path, get_standardized_data_file from dashboard.widgets import heatmap PREPARED_DATA_DIR = get_prepared_data_path() PREPARED_DATA_FILE = get_standardized_data_file() COLUMNS = ['non-communicable chronic disease [deaths]', 'cancer [deaths]', 'cardiovascular disease [deaths]', 'diabetes mellitus [deaths]', 'chronic respiratory diseases [deaths]', 'diseases of digestive system [deaths]', 'life expectancy [age]'] def get_correlation_heatmap(df, columns): corr = df[columns].corr() z = corr.values.round(decimals=2) x = corr.index y = corr.index return heatmap(z, x, y, labels=dict(color='Correlation')) def get_line_plot(df, x_col, y_col, index, title, width=500): if width is None: fig = px.line(df, x=x_col, y=y_col, color=index, title=title) return pn.pane.Plotly(fig) else: fig = px.line(df, x=x_col, y=y_col, color=index, title=title, width=width) return pn.pane.Plotly(fig) data = pd.read_csv(Path(PREPARED_DATA_DIR, PREPARED_DATA_FILE)) df = data[data['sex'] == 3] class VisualizationPage(Page): def __init__(self): super().__init__() self.df = df self.checkbutton = pn.widgets.CheckButtonGroup(name='Countries', value=['Netherlands'], options=['Netherlands', 'Japan', 'Canada']) self.pane = pn.Column(self.checkbutton, self.get_plot(self.checkbutton)) self.button = pn.widgets.Button(name='Visualization') self.checkbutton.param.watch(self.update, 'value') def get_plot(self, checkbutton): gspec = pn.GridSpec(ncols=2, nrows=4, width=1200, height=1800) selection = df.loc[df['country'].isin(checkbutton.value)] # life expectancy plot life_exp_plot = pn.pane.Plotly( px.line(selection, x='year', y='life expectancy [age]', color='country', title='life expectancy')) # plots about disease plots = [] for col in COLUMNS[:-1]: plots.append(pn.pane.Plotly( px.line(selection, x='year', y=col, labels={col: 'Deaths per 100.000 people'}, color='country', title=col.replace('[deaths]', '')))) gspec[0, :] = life_exp_plot gspec[1, 0] = plots[0] gspec[1, 1] = plots[1] gspec[2, 0] = plots[2] gspec[2, 1] = plots[3] gspec[3, 0] = plots[4] gspec[3, 1] = plots[5] return gspec def update(self, event): self.pane[1] = self.get_plot(self.checkbutton) def get_contents(self): return self.pane, self.button
32.011364
111
0.624778
from pathlib import Path import panel as pn import pandas as pd import plotly.express as px from models.pages import Page from models.utils.paths import get_prepared_data_path, get_standardized_data_file from dashboard.widgets import heatmap PREPARED_DATA_DIR = get_prepared_data_path() PREPARED_DATA_FILE = get_standardized_data_file() COLUMNS = ['non-communicable chronic disease [deaths]', 'cancer [deaths]', 'cardiovascular disease [deaths]', 'diabetes mellitus [deaths]', 'chronic respiratory diseases [deaths]', 'diseases of digestive system [deaths]', 'life expectancy [age]'] def get_correlation_heatmap(df, columns): corr = df[columns].corr() z = corr.values.round(decimals=2) x = corr.index y = corr.index return heatmap(z, x, y, labels=dict(color='Correlation')) def get_line_plot(df, x_col, y_col, index, title, width=500): if width is None: fig = px.line(df, x=x_col, y=y_col, color=index, title=title) return pn.pane.Plotly(fig) else: fig = px.line(df, x=x_col, y=y_col, color=index, title=title, width=width) return pn.pane.Plotly(fig) data = pd.read_csv(Path(PREPARED_DATA_DIR, PREPARED_DATA_FILE)) df = data[data['sex'] == 3] class VisualizationPage(Page): def __init__(self): super().__init__() self.df = df self.checkbutton = pn.widgets.CheckButtonGroup(name='Countries', value=['Netherlands'], options=['Netherlands', 'Japan', 'Canada']) self.pane = pn.Column(self.checkbutton, self.get_plot(self.checkbutton)) self.button = pn.widgets.Button(name='Visualization') self.checkbutton.param.watch(self.update, 'value') def get_plot(self, checkbutton): gspec = pn.GridSpec(ncols=2, nrows=4, width=1200, height=1800) selection = df.loc[df['country'].isin(checkbutton.value)] life_exp_plot = pn.pane.Plotly( px.line(selection, x='year', y='life expectancy [age]', color='country', title='life expectancy')) plots = [] for col in COLUMNS[:-1]: plots.append(pn.pane.Plotly( px.line(selection, x='year', y=col, labels={col: 'Deaths per 100.000 people'}, color='country', title=col.replace('[deaths]', '')))) gspec[0, :] = life_exp_plot gspec[1, 0] = plots[0] gspec[1, 1] = plots[1] gspec[2, 0] = plots[2] gspec[2, 1] = plots[3] gspec[3, 0] = plots[4] gspec[3, 1] = plots[5] return gspec def update(self, event): self.pane[1] = self.get_plot(self.checkbutton) def get_contents(self): return self.pane, self.button
true
true
f72c85a0942c0540d2abee2d9180bd484b5864a7
6,141
py
Python
main.py
akashrchandran/pokeowo
0b2621494ef56f350239817546b843814fe3448e
[ "MIT" ]
null
null
null
main.py
akashrchandran/pokeowo
0b2621494ef56f350239817546b843814fe3448e
[ "MIT" ]
null
null
null
main.py
akashrchandran/pokeowo
0b2621494ef56f350239817546b843814fe3448e
[ "MIT" ]
null
null
null
import datetime import json import multiprocessing import os import random import re import time import discum version = 'v0.01' config_path = 'data/config.json' logo = f''' ###### ### ### ## ####### ### ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##### ## ## #### #### ## ## ## # ## ## ## ## ## ## ## ## ## ## ## ####### ## ## ## ## ## ## ## ## ## ## ## ### ### ## ## #### ### ### ## ####### ### ## ## ### ~ Pokétwo Autocatcher {version} ''' num_pokemon = 0 shiny = 0 legendary = 0 mythical = 0 poketwo_id = '716390085896962058' def auto_config(): global user_token, channel_id if not os.path.exists(config_path): with open(config_path, "a") as file: auth_token = input("Enter you Discord auth token: ") channel_id = input("Enter the preferred Channel ID for spamming and catching: ") file.write("{\n") file.write(f' "user_token" : "{auth_token}",\n') file.write(f' "channel_id" : "{channel_id}"\n') file.write("}") os.system('cls' if os.name=='nt' else 'clear') with open(config_path,'r') as file: info = json.loads(file.read()) user_token = info['user_token'] channel_id = info['channel_id'] with open('data/pokemon.txt', 'r', encoding='utf8') as file: pokemon_list = file.read() with open('data/legendary.txt','r') as file: legendary_list = file.read() with open('data/mythical.txt','r') as file: mythical_list = file.read() auto_config() print(logo) bot = discum.Client(token=user_token, log=False) def solve(message): hint = [message[i] for i in range(15, len(message) - 1) if message[i] != '\\'] hint_string = ''.join(hint) return re.findall( '^' + hint_string.replace('_', '.') + '$', pokemon_list, re.MULTILINE ) def spam(): while True: random_number = random.getrandbits(128) bot.sendMessage(channel_id, random_number) intervals = [2.0,2.1,2.2,2.3,2.4,2.5] time.sleep(random.choice(intervals)) def start_spam(): new_process = multiprocessing.Process(target=spam) new_process.start() return new_process def stop(process): process.terminate() def log(string): now = datetime.datetime.now() current_time = now.strftime('%H:%M:%S') print(f'[{current_time}]', string) @bot.gateway.command def on_ready(resp): if resp.event.ready_supplemental: user = bot.gateway.session.user log(f'Logged into account: {user["username"]}#{user["discriminator"]}') @bot.gateway.command def on_message(resp): global spam_process if resp.event.message: m = resp.parsed.auto() if m['channel_id'] == channel_id and m['author']['id'] == poketwo_id: if m['embeds']: embed_title = m['embeds'][0]['title'] if 'wild pokémon has appeared!' in embed_title: stop(spam_process) time.sleep(2) bot.sendMessage(channel_id, '<@716390085896962058> h') elif "Congratulations" in embed_title: embed_content = m['embeds'][0]['description'] if 'now level' in embed_content: stop(spam_process) split = embed_content.split(' ') a = embed_content.count(' ') level = int(split[a].replace('!', '')) if level == 100: #wait will implement in next update pass spam_process = start_spam() else: content = m['content'] if 'The pokémon is ' in content: if len(solve(content)) == 0: log('Pokemon not found.') else: for i in solve(content): stop(spam_process) time.sleep(2) bot.sendMessage(channel_id, f'<@716390085896962058> c {i}') time.sleep(2) spam_process = start_spam() elif 'Congratulations' in content: global shiny global legendary global num_pokemon global mythical num_pokemon += 1 split = content.split(' ') pokemon = split[7].replace('!','') if 'These colors seem unusual...' in content: shiny += 1 log(f'A shiny Pokémon was caught! Pokémon: {pokemon}') log(f'Shiny: {shiny} | Legendary: {legendary} | Mythical: {mythical}') elif re.findall( f'^{pokemon}$', legendary_list, re.MULTILINE ): legendary += 1 log(f'A legendary Pokémon was caught! Pokémon: {pokemon}') log(f'Shiny: {shiny} | Legendary: {legendary} | Mythical: {mythical}') elif re.findall(f'^{pokemon}$', mythical_list, re.MULTILINE): mythical += 1 log(f'A mythical Pokémon was caught! Pokémon: {pokemon}') log(f'Shiny: {shiny} | Legendary: {legendary} | Mythical: {mythical}') else: print(f'Total Pokémon Caught: {num_pokemon}') elif 'human' in content: stop(spam_process) log('Captcha Detected; Autocatcher Paused. Press enter to restart.') input() bot.sendMessage(channel_id, '<@716390085896962058> h') if __name__ == '__main__': print('\nEvent Log:') spam_process = start_spam() bot.gateway.run(auto_reconnect=True)
37.218182
96
0.485752
import datetime import json import multiprocessing import os import random import re import time import discum version = 'v0.01' config_path = 'data/config.json' logo = f''' ###### ### ### ## ####### ### ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##### ## ## #### #### ## ## ## # ## ## ## ## ## ## ## ## ## ## ## ####### ## ## ## ## ## ## ## ## ## ## ## ### ### ## ## #### ### ### ## ####### ### ## ## ### ~ Pokétwo Autocatcher {version} ''' num_pokemon = 0 shiny = 0 legendary = 0 mythical = 0 poketwo_id = '716390085896962058' def auto_config(): global user_token, channel_id if not os.path.exists(config_path): with open(config_path, "a") as file: auth_token = input("Enter you Discord auth token: ") channel_id = input("Enter the preferred Channel ID for spamming and catching: ") file.write("{\n") file.write(f' "user_token" : "{auth_token}",\n') file.write(f' "channel_id" : "{channel_id}"\n') file.write("}") os.system('cls' if os.name=='nt' else 'clear') with open(config_path,'r') as file: info = json.loads(file.read()) user_token = info['user_token'] channel_id = info['channel_id'] with open('data/pokemon.txt', 'r', encoding='utf8') as file: pokemon_list = file.read() with open('data/legendary.txt','r') as file: legendary_list = file.read() with open('data/mythical.txt','r') as file: mythical_list = file.read() auto_config() print(logo) bot = discum.Client(token=user_token, log=False) def solve(message): hint = [message[i] for i in range(15, len(message) - 1) if message[i] != '\\'] hint_string = ''.join(hint) return re.findall( '^' + hint_string.replace('_', '.') + '$', pokemon_list, re.MULTILINE ) def spam(): while True: random_number = random.getrandbits(128) bot.sendMessage(channel_id, random_number) intervals = [2.0,2.1,2.2,2.3,2.4,2.5] time.sleep(random.choice(intervals)) def start_spam(): new_process = multiprocessing.Process(target=spam) new_process.start() return new_process def stop(process): process.terminate() def log(string): now = datetime.datetime.now() current_time = now.strftime('%H:%M:%S') print(f'[{current_time}]', string) @bot.gateway.command def on_ready(resp): if resp.event.ready_supplemental: user = bot.gateway.session.user log(f'Logged into account: {user["username"]}#{user["discriminator"]}') @bot.gateway.command def on_message(resp): global spam_process if resp.event.message: m = resp.parsed.auto() if m['channel_id'] == channel_id and m['author']['id'] == poketwo_id: if m['embeds']: embed_title = m['embeds'][0]['title'] if 'wild pokémon has appeared!' in embed_title: stop(spam_process) time.sleep(2) bot.sendMessage(channel_id, '<@716390085896962058> h') elif "Congratulations" in embed_title: embed_content = m['embeds'][0]['description'] if 'now level' in embed_content: stop(spam_process) split = embed_content.split(' ') a = embed_content.count(' ') level = int(split[a].replace('!', '')) if level == 100: pass spam_process = start_spam() else: content = m['content'] if 'The pokémon is ' in content: if len(solve(content)) == 0: log('Pokemon not found.') else: for i in solve(content): stop(spam_process) time.sleep(2) bot.sendMessage(channel_id, f'<@716390085896962058> c {i}') time.sleep(2) spam_process = start_spam() elif 'Congratulations' in content: global shiny global legendary global num_pokemon global mythical num_pokemon += 1 split = content.split(' ') pokemon = split[7].replace('!','') if 'These colors seem unusual...' in content: shiny += 1 log(f'A shiny Pokémon was caught! Pokémon: {pokemon}') log(f'Shiny: {shiny} | Legendary: {legendary} | Mythical: {mythical}') elif re.findall( f'^{pokemon}$', legendary_list, re.MULTILINE ): legendary += 1 log(f'A legendary Pokémon was caught! Pokémon: {pokemon}') log(f'Shiny: {shiny} | Legendary: {legendary} | Mythical: {mythical}') elif re.findall(f'^{pokemon}$', mythical_list, re.MULTILINE): mythical += 1 log(f'A mythical Pokémon was caught! Pokémon: {pokemon}') log(f'Shiny: {shiny} | Legendary: {legendary} | Mythical: {mythical}') else: print(f'Total Pokémon Caught: {num_pokemon}') elif 'human' in content: stop(spam_process) log('Captcha Detected; Autocatcher Paused. Press enter to restart.') input() bot.sendMessage(channel_id, '<@716390085896962058> h') if __name__ == '__main__': print('\nEvent Log:') spam_process = start_spam() bot.gateway.run(auto_reconnect=True)
true
true
f72c86f6141b9e5ce714030b5766cf7cff25194c
1,327
py
Python
isolated_functions.py
wonabru/chainnet
f8ec1e2b580af837cba3322ffe69b95156b1b9a1
[ "MIT" ]
5
2019-04-20T18:54:55.000Z
2019-08-23T09:17:20.000Z
isolated_functions.py
wonabru/chainnet
f8ec1e2b580af837cba3322ffe69b95156b1b9a1
[ "MIT" ]
null
null
null
isolated_functions.py
wonabru/chainnet
f8ec1e2b580af837cba3322ffe69b95156b1b9a1
[ "MIT" ]
null
null
null
import ast import re import pickle from Crypto.PublicKey import RSA from base64 import b64decode,b64encode from tkinter import messagebox def str2obj(s): return ast.literal_eval(s.replace('true', 'True').replace('false', 'False')) def trim_name(name): return name.replace('@','').replace('#','') def remove_special_char(in_seq): """ Function is responsible for normalize strings to defined format (UPPERCASE with '_' replacing any special character) :param in_seq: list of strings :return: list of strings """ _sub = re.sub(" {1,5}", "_", in_seq.strip()).lower() _chars = ['*', '\\', '&', '/', '+'] for x in _chars: _sub = _sub.replace(x, '_') return _sub class CFinish: finish = False def serialize(message): return pickle.dumps(message) def unserialize(ser_message): return pickle.loads(ser_message) def encode(n): b = bytearray() while n: b.append(n & 0xFF) n >>= 8 return b64encode(b).decode('utf-8') def decode(s): b = bytearray(b64decode(s.encode('utf-8'))) # in case you're passing in a bytes/str return sum((1 << (bi * 8)) * bb for (bi, bb) in enumerate(b)) class rsa_temp: key = RSA.generate(1024) def showError(ex): if len(ex.args) > 1: _title, _err = ex.args else: _title, _err = 'Other error', ex.args messagebox.showerror(title=str(_title), message=str(_err))
21.063492
117
0.679729
import ast import re import pickle from Crypto.PublicKey import RSA from base64 import b64decode,b64encode from tkinter import messagebox def str2obj(s): return ast.literal_eval(s.replace('true', 'True').replace('false', 'False')) def trim_name(name): return name.replace('@','').replace('#','') def remove_special_char(in_seq): _sub = re.sub(" {1,5}", "_", in_seq.strip()).lower() _chars = ['*', '\\', '&', '/', '+'] for x in _chars: _sub = _sub.replace(x, '_') return _sub class CFinish: finish = False def serialize(message): return pickle.dumps(message) def unserialize(ser_message): return pickle.loads(ser_message) def encode(n): b = bytearray() while n: b.append(n & 0xFF) n >>= 8 return b64encode(b).decode('utf-8') def decode(s): b = bytearray(b64decode(s.encode('utf-8'))) return sum((1 << (bi * 8)) * bb for (bi, bb) in enumerate(b)) class rsa_temp: key = RSA.generate(1024) def showError(ex): if len(ex.args) > 1: _title, _err = ex.args else: _title, _err = 'Other error', ex.args messagebox.showerror(title=str(_title), message=str(_err))
true
true
f72c87804c39b074934faddfa6a15a81e1a36cb8
4,587
py
Python
robot/hsin_agent.py
kanokkorn/watering_robot
b39fed532519e2b89a9f1ae1a3d1b72bb550cc1b
[ "MIT" ]
5
2020-04-01T13:55:12.000Z
2022-03-04T03:32:25.000Z
robot/hsin_agent.py
kanokkorn/watering_robot
b39fed532519e2b89a9f1ae1a3d1b72bb550cc1b
[ "MIT" ]
7
2019-12-21T10:26:40.000Z
2021-06-25T15:15:05.000Z
robot/hsin_agent.py
kanokkorn/watering_robot
b39fed532519e2b89a9f1ae1a3d1b72bb550cc1b
[ "MIT" ]
1
2020-06-03T07:41:21.000Z
2020-06-03T07:41:21.000Z
# import modules from gps3 import gps3 import serial import math import time import csv import os # setup gps socket ser = serial.Serial("/dev/ttyUSB0", 9600) gps_socket = gps3.GPSDSocket() data_stream = gps3.DataStream() gps_socket.connect() gps_socket.watch() # read csv files def track(): # prefix parameter distance = 10 earth_radius = 6371e3 k = 1 with open("robot/lat_lon.csv", newline="") as f: read = csv.reader(f) for gps_row in read: # print(gps_row) # check if gps read properly try: lat_b = float(gps_row[0]) # unpack list to float lon_b = float(gps_row[1]) except IndexError: os.system("clear") raise Exception("Indexing error...Program terminated.") ser.write(str.encode("S")) break # main function for new_data in gps_socket: while new_data and distance > 5: data_stream.unpack(new_data) # print('Altitude = ', data_stream.TPV['lat'], 'Latitude = ', data_stream.TPV['lon']) if (data_stream.TPV["lat"] == "n/a") or ( data_stream.TPV["lon"] != "n/a" ): pass if (data_stream.TPV["lat"] != "n/a") or ( data_stream.TPV["lon"] != "n/a" ): try: in_lat = float(data_stream.TPV["lat"]) except ValueError: print("lat N/A value") in_lat = 10.712709 try: in_lon = float(data_stream.TPV["lon"]) except ValueError: print("lon N/A value") in_lon = 99.378788 lat_A = math.radians(in_lat) lat_B = math.radians(lat_b) del_lat = math.radians(lat_b - (in_lat)) del_lon = math.radians(lon_b - (in_lon)) a = (math.sin(del_lat / 2) * math.sin(del_lat / 2)) + math.cos( lat_A ) * math.cos(lat_B) * ( math.sin(del_lon / 2) * math.sin(del_lon / 2) ) # check if equal zero try: c = 2 * math.atan2(math.sqrt(a), math.sqrt((1 - a))) except ValueError as identifier: print("No Value") distance = earth_radius * c # os.system('clear') print("Distance: ", distance, " Status : Running") ser.write(str.encode("M")) else: ser.write(str.encode("S")) os.system("clear") print("\n==== Checkpoint ", k, " start ====") time.sleep(0.3) print("\nDistance: ", distance, " Status : Stop") time.sleep(0.3) print("Serial_STOP") time.sleep(0.3) for target in range(10): ser.write(str.encode("O")) print("watering" + "." * target, end="\r") ser.write(str.encode("P")) time.sleep(0.8) time.sleep(0.3) print("\nClassification palm Tree :" + str(k)) time.sleep(0.3) # classify_edit.main() for target in range(10): print("writing csv files" + "." * target, end="\r") time.sleep(0.8) distance = 10 in_lat = lat_b in_lon = lon_b print("\n==== Checkpoint", k, " done ====\n") k += 1 time.sleep(1) print("Start Moving to next checkpoint\n") time.sleep(1) else: ser.write(str.encode("S")) os.system("clear") print("\n==== End of lines ====") time.sleep(1) print("\nFinished\n") if __name__ == "__main__": try: track() except KeyboardInterrupt: print("Serial_STOP") ser.write(str.encode("S")) raise Exception("Interrupt...Program terminated.")
36.404762
105
0.419228
from gps3 import gps3 import serial import math import time import csv import os ser = serial.Serial("/dev/ttyUSB0", 9600) gps_socket = gps3.GPSDSocket() data_stream = gps3.DataStream() gps_socket.connect() gps_socket.watch() def track(): distance = 10 earth_radius = 6371e3 k = 1 with open("robot/lat_lon.csv", newline="") as f: read = csv.reader(f) for gps_row in read: lat_b = float(gps_row[0]) lon_b = float(gps_row[1]) except IndexError: os.system("clear") raise Exception("Indexing error...Program terminated.") ser.write(str.encode("S")) break for new_data in gps_socket: while new_data and distance > 5: data_stream.unpack(new_data) if (data_stream.TPV["lat"] == "n/a") or ( data_stream.TPV["lon"] != "n/a" ): pass if (data_stream.TPV["lat"] != "n/a") or ( data_stream.TPV["lon"] != "n/a" ): try: in_lat = float(data_stream.TPV["lat"]) except ValueError: print("lat N/A value") in_lat = 10.712709 try: in_lon = float(data_stream.TPV["lon"]) except ValueError: print("lon N/A value") in_lon = 99.378788 lat_A = math.radians(in_lat) lat_B = math.radians(lat_b) del_lat = math.radians(lat_b - (in_lat)) del_lon = math.radians(lon_b - (in_lon)) a = (math.sin(del_lat / 2) * math.sin(del_lat / 2)) + math.cos( lat_A ) * math.cos(lat_B) * ( math.sin(del_lon / 2) * math.sin(del_lon / 2) ) try: c = 2 * math.atan2(math.sqrt(a), math.sqrt((1 - a))) except ValueError as identifier: print("No Value") distance = earth_radius * c print("Distance: ", distance, " Status : Running") ser.write(str.encode("M")) else: ser.write(str.encode("S")) os.system("clear") print("\n==== Checkpoint ", k, " start ====") time.sleep(0.3) print("\nDistance: ", distance, " Status : Stop") time.sleep(0.3) print("Serial_STOP") time.sleep(0.3) for target in range(10): ser.write(str.encode("O")) print("watering" + "." * target, end="\r") ser.write(str.encode("P")) time.sleep(0.8) time.sleep(0.3) print("\nClassification palm Tree :" + str(k)) time.sleep(0.3) for target in range(10): print("writing csv files" + "." * target, end="\r") time.sleep(0.8) distance = 10 in_lat = lat_b in_lon = lon_b print("\n==== Checkpoint", k, " done ====\n") k += 1 time.sleep(1) print("Start Moving to next checkpoint\n") time.sleep(1) else: ser.write(str.encode("S")) os.system("clear") print("\n==== End of lines ====") time.sleep(1) print("\nFinished\n") if __name__ == "__main__": try: track() except KeyboardInterrupt: print("Serial_STOP") ser.write(str.encode("S")) raise Exception("Interrupt...Program terminated.")
true
true
f72c8854af948f34376eadc837477a9b431ff2c9
2,138
py
Python
app/core/radiofrequency/__init__.py
FHellmann/MLWTF
582c3505d638907a848d5a6c739ee99981300f17
[ "Apache-2.0" ]
null
null
null
app/core/radiofrequency/__init__.py
FHellmann/MLWTF
582c3505d638907a848d5a6c739ee99981300f17
[ "Apache-2.0" ]
null
null
null
app/core/radiofrequency/__init__.py
FHellmann/MLWTF
582c3505d638907a848d5a6c739ee99981300f17
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python """ Author: Fabio Hellmann <info@fabio-hellmann.de> This is a layer between the raw execution unit and the database. """ import logging from datetime import datetime from . import rf_rpi from .models import Protocol, Signal from ..gpio import RaspberryPi3 as GPIO_PI from app.database import db from app.database.models import DataSource, DataSourceType from app.database.converter import converter _LOGGER = logging.getLogger(__name__) class RfDatabase(object): def __init__(self): self.db = db def save_received(self, signal : Signal): return self.db.add_event(converter.unstructure(signal), DataSource.LOW_RADIO_FREQUENCY, DataSourceType.SENSOR) def save_send(self, signal : Signal): return self.db.add_event(converter.unstructure(signal), DataSource.LOW_RADIO_FREQUENCY, DataSourceType.ACTUATOR) def get_received_signals_since(self, since : datetime): result_events = self.db.get_events_by(DataSource.LOW_RADIO_FREQUENCY, DataSourceType.SENSOR, since) result = [] for event in result_events: result.append(converter.structure(event.data, Signal)) return result class RfController(object): def __init__(self): self._db = RfDatabase() self._tx_device = rf_rpi.Device(GPIO_PI.GPIO_17.value) self._tx_device.enable_tx() self._rx_device = rf_rpi.Device(GPIO_PI.GPIO_27.value) self._rx_device.enable_rx() self._rx_device.add_rx_listener(self._receive) def __del__(self): self._tx_device.cleanup() self._rx_device.cleanup() def get_received_signals_since(self, since : datetime): return self._db.get_received_signals_since(since) def send(self, signal : Signal): _LOGGER.info("Sending radiofrequency signal: " + str(signal)) success = self._tx_device.tx_code(signal) self._db.save_send(signal) return success def _receive(self, signal : Signal): _LOGGER.info("Receiving radiofrequency signal: " + str(signal)) self._db.save_received(signal) rf_controller = RfController()
30.985507
120
0.713751
import logging from datetime import datetime from . import rf_rpi from .models import Protocol, Signal from ..gpio import RaspberryPi3 as GPIO_PI from app.database import db from app.database.models import DataSource, DataSourceType from app.database.converter import converter _LOGGER = logging.getLogger(__name__) class RfDatabase(object): def __init__(self): self.db = db def save_received(self, signal : Signal): return self.db.add_event(converter.unstructure(signal), DataSource.LOW_RADIO_FREQUENCY, DataSourceType.SENSOR) def save_send(self, signal : Signal): return self.db.add_event(converter.unstructure(signal), DataSource.LOW_RADIO_FREQUENCY, DataSourceType.ACTUATOR) def get_received_signals_since(self, since : datetime): result_events = self.db.get_events_by(DataSource.LOW_RADIO_FREQUENCY, DataSourceType.SENSOR, since) result = [] for event in result_events: result.append(converter.structure(event.data, Signal)) return result class RfController(object): def __init__(self): self._db = RfDatabase() self._tx_device = rf_rpi.Device(GPIO_PI.GPIO_17.value) self._tx_device.enable_tx() self._rx_device = rf_rpi.Device(GPIO_PI.GPIO_27.value) self._rx_device.enable_rx() self._rx_device.add_rx_listener(self._receive) def __del__(self): self._tx_device.cleanup() self._rx_device.cleanup() def get_received_signals_since(self, since : datetime): return self._db.get_received_signals_since(since) def send(self, signal : Signal): _LOGGER.info("Sending radiofrequency signal: " + str(signal)) success = self._tx_device.tx_code(signal) self._db.save_send(signal) return success def _receive(self, signal : Signal): _LOGGER.info("Receiving radiofrequency signal: " + str(signal)) self._db.save_received(signal) rf_controller = RfController()
true
true
f72c886994bd9fb0a5722665191651370d918e92
2,908
py
Python
tests/riscv/APIs/State_force.py
Wlgen/force-riscv
9f09b86c5a21ca00f8e5ade8e5186d65bc3e26f8
[ "Apache-2.0" ]
111
2020-06-12T22:31:30.000Z
2022-03-19T03:45:20.000Z
tests/riscv/APIs/State_force.py
Wlgen/force-riscv
9f09b86c5a21ca00f8e5ade8e5186d65bc3e26f8
[ "Apache-2.0" ]
34
2020-06-12T20:23:40.000Z
2022-03-15T20:04:31.000Z
tests/riscv/APIs/State_force.py
Wlgen/force-riscv
9f09b86c5a21ca00f8e5ade8e5186d65bc3e26f8
[ "Apache-2.0" ]
32
2020-06-12T19:15:26.000Z
2022-02-20T11:38:31.000Z
# # Copyright (C) [2020] Futurewei Technologies, Inc. # # FORCE-RISCV is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES # OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. # See the License for the specific language governing permissions and # limitations under the License. # import RandomUtils from Enums import EStateElementDuplicateMode from State import State from base.Sequence import Sequence from riscv.EnvRISCV import EnvRISCV from riscv.GenThreadRISCV import GenThreadRISCV # This test attempts to add StateElements to a State. There is no direct # mechanism for retrieving the StateElements after they have been added, so # this test merely ensures the method calls don't crash or fail. class MainSequence(Sequence): def generate(self, **kargs): state = State() state.setDuplicateMode(EStateElementDuplicateMode.Replace) mem_start_addr = (RandomUtils.random64(0, 0xFFFFFFFFFFFF) >> 3) << 3 mem_val = RandomUtils.random64() state.addMemoryStateElement(mem_start_addr, 8, mem_val) mem_values = [] for _ in range(RandomUtils.random32(1, 64)): mem_values.append(RandomUtils.random32(0, 0xFF)) mem_start_addr = RandomUtils.random64(0, 0xFFFFFFFFFFFF) state.addMemoryStateElementsAsBytes(mem_start_addr, mem_values) gpr_name = "x%d" % RandomUtils.random32(0, 31) state.addRegisterStateElement(gpr_name, (RandomUtils.random64(),)) fp_reg_name = "D%d" % RandomUtils.random32(0, 31) state.addRegisterStateElement(fp_reg_name, (RandomUtils.random64(),)) state.addSystemRegisterStateElementByField("sstatus", "FS", 0x3) state.addVmContextStateElement("mstatus", "MPRV", 0x1) state.addPcStateElement(RandomUtils.random64(0, 0xFFFFFFFFFFFF)) # Test creating duplicate StateElements state.addVmContextStateElement("mstatus", "MPRV", 0x0) state.setDuplicateMode(EStateElementDuplicateMode.Ignore) state.addRegisterStateElement("sstatus", (RandomUtils.random64(),)) # Test merging two StateElements mem_start_addr = (RandomUtils.random64(0, 0xFFFFFFFFFFFF) >> 3) << 3 mem_val = RandomUtils.random32() state.addMemoryStateElement(mem_start_addr, 4, mem_val) mem_start_addr += 4 mem_values = [] for _ in range(4): mem_values.append(RandomUtils.random32(0, 0xFF)) state.addMemoryStateElementsAsBytes(mem_start_addr, mem_values) MainSequenceClass = MainSequence GenThreadClass = GenThreadRISCV EnvClass = EnvRISCV
38.263158
77
0.725241
import RandomUtils from Enums import EStateElementDuplicateMode from State import State from base.Sequence import Sequence from riscv.EnvRISCV import EnvRISCV from riscv.GenThreadRISCV import GenThreadRISCV class MainSequence(Sequence): def generate(self, **kargs): state = State() state.setDuplicateMode(EStateElementDuplicateMode.Replace) mem_start_addr = (RandomUtils.random64(0, 0xFFFFFFFFFFFF) >> 3) << 3 mem_val = RandomUtils.random64() state.addMemoryStateElement(mem_start_addr, 8, mem_val) mem_values = [] for _ in range(RandomUtils.random32(1, 64)): mem_values.append(RandomUtils.random32(0, 0xFF)) mem_start_addr = RandomUtils.random64(0, 0xFFFFFFFFFFFF) state.addMemoryStateElementsAsBytes(mem_start_addr, mem_values) gpr_name = "x%d" % RandomUtils.random32(0, 31) state.addRegisterStateElement(gpr_name, (RandomUtils.random64(),)) fp_reg_name = "D%d" % RandomUtils.random32(0, 31) state.addRegisterStateElement(fp_reg_name, (RandomUtils.random64(),)) state.addSystemRegisterStateElementByField("sstatus", "FS", 0x3) state.addVmContextStateElement("mstatus", "MPRV", 0x1) state.addPcStateElement(RandomUtils.random64(0, 0xFFFFFFFFFFFF)) # Test creating duplicate StateElements state.addVmContextStateElement("mstatus", "MPRV", 0x0) state.setDuplicateMode(EStateElementDuplicateMode.Ignore) state.addRegisterStateElement("sstatus", (RandomUtils.random64(),)) # Test merging two StateElements mem_start_addr = (RandomUtils.random64(0, 0xFFFFFFFFFFFF) >> 3) << 3 mem_val = RandomUtils.random32() state.addMemoryStateElement(mem_start_addr, 4, mem_val) mem_start_addr += 4 mem_values = [] for _ in range(4): mem_values.append(RandomUtils.random32(0, 0xFF)) state.addMemoryStateElementsAsBytes(mem_start_addr, mem_values) MainSequenceClass = MainSequence GenThreadClass = GenThreadRISCV EnvClass = EnvRISCV
true
true
f72c88bad07b64edf6012e96a4a6af0ebf4b41c8
12,698
py
Python
mai_version/trees/TILDEQueryScorer.py
joschout/tilde
1403b50842b83f2edd6b16b1fbe24b9bec2d0048
[ "Apache-2.0" ]
16
2019-03-06T06:11:33.000Z
2022-02-07T21:30:25.000Z
mai_version/trees/TILDEQueryScorer.py
joschout/tilde
1403b50842b83f2edd6b16b1fbe24b9bec2d0048
[ "Apache-2.0" ]
4
2019-10-08T14:48:23.000Z
2020-03-26T00:31:57.000Z
mai_version/trees/TILDEQueryScorer.py
krishnangovindraj/tilde
5243a02d92f375d56ffc49ab8c3d1a87e31e99b9
[ "Apache-2.0" ]
4
2019-08-14T05:40:47.000Z
2020-08-05T13:21:16.000Z
import math from typing import Iterable, Set, List, Optional import problog import time from problog.logic import And, Term from mai_version.classification.example_partitioning import ExamplePartitioner from mai_version.representation.TILDE_query import TILDEQuery from mai_version.representation.example import ExampleWrapper from mai_version.representation.example import Label from mai_version.trees.scoring import entropy, information_gain2 class QueryScoreInfo: """Wrapper around the information about best scoring query""" def __init__(self, best_query: TILDEQuery, score_of_best_query: float, examples_satisfying_best_query: Set[ExampleWrapper], examples_not_satisfying_best_query: Set[ExampleWrapper]): self.best_query = best_query # type: TILDEQuery self.score_of_best_query = score_of_best_query # type: float self.examples_satisfying_best_query = examples_satisfying_best_query # type: Set[ExampleWrapper] self.examples_not_satisfying_best_query = examples_not_satisfying_best_query # type: Set[ExampleWrapper] class TILDEQueryScorer: @staticmethod def get_best_refined_query(refined_queries: Iterable[TILDEQuery], examples: Set[ExampleWrapper], example_partitioner: ExamplePartitioner, possible_targets: List[Label], probabilistic: Optional[bool] = False) -> QueryScoreInfo: # Tuple[Optional[TILDEQuery], float, Optional[Set[ExampleWrapper]], Optional[Set[ExampleWrapper]]]: best_query = None # type: Optional[TILDEQuery] score_best_query = - math.inf # type: float examples_satisfying_best_query = None # type: Optional[Set[ExampleWrapper]] examples_not_satisfying_best_query = None # type: Optional[Set[ExampleWrapper]] entropy_complete_set = entropy(examples, possible_targets) nb_of_examples_complete_set = len(examples) for q in refined_queries: # type: TILDEQuery print(q) # compute the score of the queries conj_of_tilde_query = q.to_conjunction() # type: And examples_satisfying_q, examples_not_satisfying_q = example_partitioner.get_examples_satisfying_query( examples, conj_of_tilde_query) # type: Set[ExampleWrapper] # examples_not_satisfying_q = examples - examples_satisfying_q # type: Set[ExampleWrapper] #TODO: no longer probabilistic! score = information_gain2(examples_satisfying_q, examples_not_satisfying_q, possible_targets, nb_of_examples_complete_set, entropy_complete_set) if score > score_best_query: best_query = q score_best_query = score examples_satisfying_best_query = examples_satisfying_q examples_not_satisfying_best_query = examples_not_satisfying_q return QueryScoreInfo(best_query, score_best_query, examples_satisfying_best_query, examples_not_satisfying_best_query) class TILDEQueryScorer2: @staticmethod def get_best_refined_query(refined_queries: Iterable[TILDEQuery], examples: Set[ExampleWrapper], example_partitioner: ExamplePartitioner, possible_targets: List[Label], probabilistic: Optional[bool] = False) -> QueryScoreInfo: # Tuple[Optional[TILDEQuery], float, Optional[Set[ExampleWrapper]], Optional[Set[ExampleWrapper]]]: best_query = None # type: Optional[TILDEQuery] score_best_query = - math.inf # type: float # examples_satisfying_best_query = None # type: Optional[Set[ExampleWrapper]] # examples_not_satisfying_best_query = None # type: Optional[Set[ExampleWrapper]] entropy_complete_set = entropy(examples, possible_targets) nb_of_examples_complete_set = len(examples) # ided_queries = list(zip(range(0,len(refined_queries)), refined_queries)) entropy_dict = {label: 0 for label in possible_targets} query_entropy_dicts = [(entropy_dict.copy(), entropy_dict.copy()) for q in refined_queries] for clause_db_ex in examples: db_to_query = clause_db_ex.extend() # type: ClauseDB if clause_db_ex.classification_term is not None: db_to_query += clause_db_ex.classification_term for id, q in zip(range(0,len(refined_queries)), refined_queries): to_query = Term('q' + str(id)) db_to_query += Term('query')(to_query) db_to_query += (to_query << q.to_conjunction()) start_time = time.time() evaluatable = problog.get_evaluatable() mid_time1 = time.time() something = evaluatable.create_from(db_to_query, engine=example_partitioner.engine) mid_time2 = time.time() results = something.evaluate() end_time = time.time() example_partitioner.nb_partitions_calculated += 1 get_evaluatable_duration = mid_time1 - start_time example_partitioner.sum_get_evaluatable += get_evaluatable_duration structure_creation_duration = mid_time2 - mid_time1 example_partitioner.sum_structure_creation_duration += structure_creation_duration if structure_creation_duration > example_partitioner.max_structure_creation_duration: example_partitioner.max_structure_creation_duration = structure_creation_duration if structure_creation_duration < example_partitioner.min_structure_creation_duration: example_partitioner.min_structure_creation_duration = structure_creation_duration if structure_creation_duration < 0.000001: example_partitioner.nb_structure_creation_zero += 1 evalutation_duration = end_time - mid_time2 example_partitioner.sum_evaluation_duration += evalutation_duration if evalutation_duration > example_partitioner.max_evaluation_duration: example_partitioner.max_evaluation_duration = evalutation_duration if evalutation_duration < example_partitioner.min_evaluation_duration: example_partitioner.min_evaluation_duration = evalutation_duration if evalutation_duration < 0.000001: example_partitioner.nb_evaluation_zero += 1 # results = problog.get_evaluatable().create_from(db_to_query, engine=example_partitioner.engine).evaluate() for to_query, prob in results.items(): id = int(to_query.functor[1:]) if prob > 0.5: query_entropy_dicts[id][0][clause_db_ex.get_label()] = query_entropy_dicts[id][0][clause_db_ex.get_label()] + 1 else: query_entropy_dicts[id][1][clause_db_ex.get_label()] = query_entropy_dicts[id][1][ clause_db_ex.get_label()] + 1 for query, (left_dic, right_dic) in zip(refined_queries, query_entropy_dicts): # -- ig -- ig = 0 if nb_of_examples_complete_set != 0: ig = entropy_complete_set nb_examples_left = sum(left_dic.values()) if nb_examples_left > 0: entropy_left = 0 for label in left_dic.keys(): label_value = left_dic[label] if label_value != 0: entropy_left -= label_value / nb_examples_left \ * math.log2(label_value / nb_examples_left) ig -= nb_examples_left / nb_of_examples_complete_set * entropy_left # ------ nb_examples_right = sum(right_dic.values()) if nb_examples_right > 0: entropy_right = 0 for label in right_dic.keys(): label_value = right_dic[label] if label_value != 0: entropy_right -= label_value / nb_examples_right \ * math.log2(label_value / nb_examples_right) ig -= nb_examples_right / nb_of_examples_complete_set * entropy_right if ig > score_best_query: best_query = query score_best_query = ig # --- we now know the best query, so create the partition again: examples_satisfying_best_query = set() # type: Optional[Set[ExampleWrapper]] examples_not_satisfying_best_query = set() # type: Optional[Set[ExampleWrapper]] to_query = Term('to_query') to_add1 = Term('query')(to_query) to_add2 = (to_query << best_query.to_conjunction()) for clause_db_ex in examples: db_to_query = clause_db_ex.extend() # type: ClauseDB if clause_db_ex.classification_term is not None: db_to_query += clause_db_ex.classification_term # db_to_query = example_db.extend() db_to_query += to_add1 db_to_query += to_add2 start_time = time.time() evaluatable = problog.get_evaluatable() mid_time1 = time.time() something = evaluatable.create_from(db_to_query, engine=example_partitioner.engine) mid_time2 = time.time() query_result = something.evaluate() end_time = time.time() example_partitioner.nb_partitions_calculated += 1 get_evaluatable_duration = mid_time1 - start_time example_partitioner.sum_get_evaluatable += get_evaluatable_duration structure_creation_duration = mid_time2 - mid_time1 example_partitioner.sum_structure_creation_duration += structure_creation_duration if structure_creation_duration > example_partitioner.max_structure_creation_duration: example_partitioner.max_structure_creation_duration = structure_creation_duration if structure_creation_duration < example_partitioner.min_structure_creation_duration: example_partitioner.min_structure_creation_duration = structure_creation_duration if structure_creation_duration < 0.000001: example_partitioner.nb_structure_creation_zero += 1 evalutation_duration = end_time - mid_time2 example_partitioner.sum_evaluation_duration += evalutation_duration if evalutation_duration > example_partitioner.max_evaluation_duration: example_partitioner.max_evaluation_duration = evalutation_duration if evalutation_duration < example_partitioner.min_evaluation_duration: example_partitioner.min_evaluation_duration = evalutation_duration if evalutation_duration < 0.000001: example_partitioner.nb_evaluation_zero += 1 # query_result = problog.get_evaluatable().create_from(db_to_query, # engine=example_partitioner.engine).evaluate() if query_result[to_query] > 0.5: examples_satisfying_best_query.add(clause_db_ex) else: examples_not_satisfying_best_query.add(clause_db_ex) # for qid, q in enumerate(refined_queries): # type: TILDEQuery # # compute the score of the queries # conj_of_tilde_query = q.to_conjunction() # type: And # # examples_satisfying_q, examples_not_satisfying_q = example_partitioner.get_examples_satisfying_query( # examples, conj_of_tilde_query) # type: Set[ExampleWrapper] # # examples_not_satisfying_q = examples - examples_satisfying_q # type: Set[ExampleWrapper] # # #TODO: no longer probabilistic! # score = information_gain2(examples_satisfying_q, examples_not_satisfying_q, possible_targets, nb_of_examples_complete_set, entropy_complete_set) # # if score > score_best_query: # best_query = q # score_best_query = score # examples_satisfying_best_query = examples_satisfying_q # examples_not_satisfying_best_query = examples_not_satisfying_q return QueryScoreInfo(best_query, score_best_query, examples_satisfying_best_query, examples_not_satisfying_best_query)
52.040984
158
0.654198
import math from typing import Iterable, Set, List, Optional import problog import time from problog.logic import And, Term from mai_version.classification.example_partitioning import ExamplePartitioner from mai_version.representation.TILDE_query import TILDEQuery from mai_version.representation.example import ExampleWrapper from mai_version.representation.example import Label from mai_version.trees.scoring import entropy, information_gain2 class QueryScoreInfo: def __init__(self, best_query: TILDEQuery, score_of_best_query: float, examples_satisfying_best_query: Set[ExampleWrapper], examples_not_satisfying_best_query: Set[ExampleWrapper]): self.best_query = best_query self.score_of_best_query = score_of_best_query self.examples_satisfying_best_query = examples_satisfying_best_query self.examples_not_satisfying_best_query = examples_not_satisfying_best_query class TILDEQueryScorer: @staticmethod def get_best_refined_query(refined_queries: Iterable[TILDEQuery], examples: Set[ExampleWrapper], example_partitioner: ExamplePartitioner, possible_targets: List[Label], probabilistic: Optional[bool] = False) -> QueryScoreInfo: best_query = None score_best_query = - math.inf examples_satisfying_best_query = None examples_not_satisfying_best_query = None entropy_complete_set = entropy(examples, possible_targets) nb_of_examples_complete_set = len(examples) for q in refined_queries: print(q) conj_of_tilde_query = q.to_conjunction() examples_satisfying_q, examples_not_satisfying_q = example_partitioner.get_examples_satisfying_query( examples, conj_of_tilde_query) score = information_gain2(examples_satisfying_q, examples_not_satisfying_q, possible_targets, nb_of_examples_complete_set, entropy_complete_set) if score > score_best_query: best_query = q score_best_query = score examples_satisfying_best_query = examples_satisfying_q examples_not_satisfying_best_query = examples_not_satisfying_q return QueryScoreInfo(best_query, score_best_query, examples_satisfying_best_query, examples_not_satisfying_best_query) class TILDEQueryScorer2: @staticmethod def get_best_refined_query(refined_queries: Iterable[TILDEQuery], examples: Set[ExampleWrapper], example_partitioner: ExamplePartitioner, possible_targets: List[Label], probabilistic: Optional[bool] = False) -> QueryScoreInfo: best_query = None score_best_query = - math.inf ts) nb_of_examples_complete_set = len(examples) entropy_dict = {label: 0 for label in possible_targets} query_entropy_dicts = [(entropy_dict.copy(), entropy_dict.copy()) for q in refined_queries] for clause_db_ex in examples: db_to_query = clause_db_ex.extend() if clause_db_ex.classification_term is not None: db_to_query += clause_db_ex.classification_term for id, q in zip(range(0,len(refined_queries)), refined_queries): to_query = Term('q' + str(id)) db_to_query += Term('query')(to_query) db_to_query += (to_query << q.to_conjunction()) start_time = time.time() evaluatable = problog.get_evaluatable() mid_time1 = time.time() something = evaluatable.create_from(db_to_query, engine=example_partitioner.engine) mid_time2 = time.time() results = something.evaluate() end_time = time.time() example_partitioner.nb_partitions_calculated += 1 get_evaluatable_duration = mid_time1 - start_time example_partitioner.sum_get_evaluatable += get_evaluatable_duration structure_creation_duration = mid_time2 - mid_time1 example_partitioner.sum_structure_creation_duration += structure_creation_duration if structure_creation_duration > example_partitioner.max_structure_creation_duration: example_partitioner.max_structure_creation_duration = structure_creation_duration if structure_creation_duration < example_partitioner.min_structure_creation_duration: example_partitioner.min_structure_creation_duration = structure_creation_duration if structure_creation_duration < 0.000001: example_partitioner.nb_structure_creation_zero += 1 evalutation_duration = end_time - mid_time2 example_partitioner.sum_evaluation_duration += evalutation_duration if evalutation_duration > example_partitioner.max_evaluation_duration: example_partitioner.max_evaluation_duration = evalutation_duration if evalutation_duration < example_partitioner.min_evaluation_duration: example_partitioner.min_evaluation_duration = evalutation_duration if evalutation_duration < 0.000001: example_partitioner.nb_evaluation_zero += 1 for to_query, prob in results.items(): id = int(to_query.functor[1:]) if prob > 0.5: query_entropy_dicts[id][0][clause_db_ex.get_label()] = query_entropy_dicts[id][0][clause_db_ex.get_label()] + 1 else: query_entropy_dicts[id][1][clause_db_ex.get_label()] = query_entropy_dicts[id][1][ clause_db_ex.get_label()] + 1 for query, (left_dic, right_dic) in zip(refined_queries, query_entropy_dicts): ig = 0 if nb_of_examples_complete_set != 0: ig = entropy_complete_set nb_examples_left = sum(left_dic.values()) if nb_examples_left > 0: entropy_left = 0 for label in left_dic.keys(): label_value = left_dic[label] if label_value != 0: entropy_left -= label_value / nb_examples_left \ * math.log2(label_value / nb_examples_left) ig -= nb_examples_left / nb_of_examples_complete_set * entropy_left nb_examples_right = sum(right_dic.values()) if nb_examples_right > 0: entropy_right = 0 for label in right_dic.keys(): label_value = right_dic[label] if label_value != 0: entropy_right -= label_value / nb_examples_right \ * math.log2(label_value / nb_examples_right) ig -= nb_examples_right / nb_of_examples_complete_set * entropy_right if ig > score_best_query: best_query = query score_best_query = ig examples_satisfying_best_query = set() examples_not_satisfying_best_query = set() to_query = Term('to_query') to_add1 = Term('query')(to_query) to_add2 = (to_query << best_query.to_conjunction()) for clause_db_ex in examples: db_to_query = clause_db_ex.extend() if clause_db_ex.classification_term is not None: db_to_query += clause_db_ex.classification_term db_to_query += to_add1 db_to_query += to_add2 start_time = time.time() evaluatable = problog.get_evaluatable() mid_time1 = time.time() something = evaluatable.create_from(db_to_query, engine=example_partitioner.engine) mid_time2 = time.time() query_result = something.evaluate() end_time = time.time() example_partitioner.nb_partitions_calculated += 1 get_evaluatable_duration = mid_time1 - start_time example_partitioner.sum_get_evaluatable += get_evaluatable_duration structure_creation_duration = mid_time2 - mid_time1 example_partitioner.sum_structure_creation_duration += structure_creation_duration if structure_creation_duration > example_partitioner.max_structure_creation_duration: example_partitioner.max_structure_creation_duration = structure_creation_duration if structure_creation_duration < example_partitioner.min_structure_creation_duration: example_partitioner.min_structure_creation_duration = structure_creation_duration if structure_creation_duration < 0.000001: example_partitioner.nb_structure_creation_zero += 1 evalutation_duration = end_time - mid_time2 example_partitioner.sum_evaluation_duration += evalutation_duration if evalutation_duration > example_partitioner.max_evaluation_duration: example_partitioner.max_evaluation_duration = evalutation_duration if evalutation_duration < example_partitioner.min_evaluation_duration: example_partitioner.min_evaluation_duration = evalutation_duration if evalutation_duration < 0.000001: example_partitioner.nb_evaluation_zero += 1 if query_result[to_query] > 0.5: examples_satisfying_best_query.add(clause_db_ex) else: examples_not_satisfying_best_query.add(clause_db_ex) examples_not_satisfying_best_query)
true
true
f72c8a7510c49c3ae446f48e397b061791a320e4
13,771
py
Python
logreg.py
naver/cog
5b34ca90757116b9cfae11d8838927ba73e1ede8
[ "BSD-3-Clause" ]
13
2021-10-13T11:13:55.000Z
2022-03-11T04:41:41.000Z
logreg.py
naver/cog
5b34ca90757116b9cfae11d8838927ba73e1ede8
[ "BSD-3-Clause" ]
null
null
null
logreg.py
naver/cog
5b34ca90757116b9cfae11d8838927ba73e1ede8
[ "BSD-3-Clause" ]
null
null
null
# ImageNet-CoG Benchmark # Copyright 2021-present NAVER Corp. # 3-Clause BSD License​ import argparse import copy import logging import math import os import shutil import time import optuna import torch as th import feature_ops import metrics import utils from iterators import TorchIterator from meters import AverageMeter, ProgressMeter logger = logging.getLogger() class LogReg: """ Logistic regression classifier with mini-batch SGD. """ def __init__(self, args, cfg): self.args = args self.cfg = cfg # load the training set features trainset = feature_ops.load_feature_set( args.train_features_path, "train", cfg.CLF.NORM_FTS ) if args.val: # randomly split the training set into train + val logger.info("Splitting the training set into train and val") trainset, testset = feature_ops.split_trainset(trainset, cfg.CLF.VAL_PERC) else: # load the test set testset = feature_ops.load_feature_set(args.test_features_path, "test", cfg.CLF.NORM_FTS) if cfg.CLF.N_SHOT > 0: logger.info( "Simulating few-shot learning setting, {} images per class.".format( cfg.CLF.N_SHOT ) ) trainset = feature_ops.make_fewshot_dataset(trainset, cfg.CLF.N_SHOT) self.trainset = trainset self.testset = testset self.trainset.print_info() self.testset.print_info() # determine number of cases if len(list(self.trainset.y.shape)) == 1: classes = th.unique(self.trainset.y) assert th.all(classes == th.unique(self.testset.y)) args.n_classes = classes.size(0) # move all features to the device if args.device == "cuda": feature_ops.move_data_to_cuda([self.trainset, self.testset]) def __call__(self, trial=None): """ The function called by Optuna. """ # empty the cache allocated in the previous call th.cuda.empty_cache() args = copy.deepcopy(self.args) cfg = self.cfg x_train = self.trainset.x y_train = self.trainset.y x_test = self.testset.x y_test = self.testset.y # create training and test set iterators train_iter = TorchIterator((x_train, y_train), cfg.CLF.BATCH_SIZE, shuffle=True) test_iter = TorchIterator((x_test, y_test), cfg.CLF.BATCH_SIZE, shuffle=False) # define logistic classifier model = th.nn.Linear(x_train.size(1), args.n_classes).to(args.device) crit = th.nn.CrossEntropyLoss().to(args.device) # sample a learning rate and weight decay if trial is not None: lr_intv = cfg.CLF.LR_INTV wd_intv = cfg.CLF.WD_INTV args.lr = trial.suggest_loguniform("lr", lr_intv[0], lr_intv[1]) args.wd = trial.suggest_loguniform("wd", wd_intv[0], wd_intv[1]) optim = th.optim.SGD( model.parameters(), lr=args.lr, momentum=args.mom, weight_decay=args.wd ) args.exp_dir = os.path.join( args.output_dir, "{}-lr-{}_wd-{}".format("val" if args.val else "final", args.lr, args.wd), ) os.makedirs(args.exp_dir, exist_ok=True) # write the model definition into exp_dir utils.write_to_file(str(model), os.path.join(args.exp_dir, "model.txt")) # logs computed during training / evaluation args.logs = { "train/loss": [], "train/top1": [], "train/top5": [], "test/loss": [], "test/top1": [], "test/top5": [], "lr": [], } # predictions over the evaluation sets args.preds = [] for epoch in range(cfg.CLF.N_EPOCHS): if not args.val: logger.info(f"**Epoch:{epoch}**") args.epoch = epoch train_stat = train(train_iter, model, crit, optim, epoch, args) validate(test_iter, model, crit, args) adjust_learning_rate(optim, args, cfg) # if something went wrong during training # e.g. SGD diverged if train_stat == -1: break # save the logs utils.save_pickle(args.logs, f"{args.exp_dir}/logs.pkl") # save the predictions utils.save_pickle(args.preds, f"{args.exp_dir}/preds.pkl") # save the whole args, for ease of access utils.save_pickle(vars(args), f"{args.exp_dir}/args.pkl") # save also the final model th.save( { "model": model.state_dict(), }, f"{args.exp_dir}/model.pth", ) # return the last test accuracy return args.logs["test/top1"][-1] def train(train_loader, model, criterion, optimizer, epoch, args): """ Train the classifier for one epoch. """ batch_time = AverageMeter("Time", ":6.3f") losses = AverageMeter("Loss", ":.4e") top1 = AverageMeter("Acc@1", ":6.2f") top5 = AverageMeter("Acc@5", ":6.2f") progress = ProgressMeter( len(train_loader), [batch_time, losses, top1, top5], prefix="Epoch: [{}]".format(epoch), ) # switch to train mode model.train() end = time.time() for i, (fts, lbls) in enumerate(train_loader): fts = fts.to(args.device) lbls = lbls.to(args.device) # compute output output = model(fts) loss = criterion(output, lbls) if not th.isfinite(loss): logger.info("Loss ({}) is not finite, terminating".format(loss.item())) optimizer.zero_grad() return -1 # measure accuracy and record loss acc1, acc5 = metrics.accuracy(output, lbls, topk=(1, 5)) losses.update(loss.item(), fts.size(0)) top1.update(acc1.item(), fts.size(0)) top5.update(acc5.item(), fts.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() if (not args.val) and (i % args.print_freq == 0): progress.display(i) args.logs["train/loss"].append(losses.avg) args.logs["train/top1"].append(top1.avg) args.logs["train/top5"].append(top5.avg) return 0 def validate(val_loader, model, criterion, args): losses = AverageMeter("Loss", ":.4e") top1 = AverageMeter("Acc@1", ":6.2f") top5 = AverageMeter("Acc@5", ":6.2f") # switch to evaluate mode model.eval() # keep predictions per class preds = th.ones(len(val_loader.tensors[0]), dtype=th.int32, device=args.device) * -1. six = 0 with th.no_grad(): for i, (fts, lbls) in enumerate(val_loader): fts = fts.to(args.device) lbls = lbls.to(args.device) bs = fts.size(0) # compute output output = model(fts) loss = criterion(output, lbls) # store the predicted classes preds[six:six + bs] = th.argmax(output, dim=1) six += bs # measure accuracy and record loss acc1, acc5 = metrics.accuracy(output, lbls, topk=(1, 5)) losses.update(loss.item(), bs) top1.update(acc1[0].item(), bs) top5.update(acc5[0].item(), bs) # make sure that there is no invalid prediction assert th.all(preds >= 0).item() args.preds.append(preds.detach().cpu()) args.logs["test/loss"].append(losses.avg) args.logs["test/top1"].append(top1.avg) args.logs["test/top5"].append(top5.avg) if not args.val: logger.info( " * Acc@1:{top1.avg:.3f} - Acc@5:{top5.avg:.3f}".format( top1=top1, top5=top5 ) ) def adjust_learning_rate(optimizer, args, cfg): """Decay the learning rate based on cosine schedule""" lr = args.lr lr *= 0.5 * (1.0 + math.cos(math.pi * args.epoch / cfg.CLF.N_EPOCHS)) for param_group in optimizer.param_groups: param_group["lr"] = lr args.logs["lr"].append(lr) def save_checkpoint(state, is_best, filename="checkpoint.pth.tar"): th.save(state, filename) if is_best: shutil.copyfile(filename, "model_best.pth.tar") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--model', type=utils.none_or_string_flag, help='Name of the model in the <model_title>_<architecture_name> form.' 'See the table of models in ./prepare_models/README.md for all the model names we support.' 'This is an optional argument that needs to be set along with --models_root_dir and --dataset.' 'When these three arguments are set, the script will load features from:' '<models_root_dir>/<model_title>/<architecture_name>/<dataset>/features_*/X_Y.pth.' 'If you would like to load pre-extracted features from somewhere else' 'then ignore this argument and provide the --train_features_dir and --test_features_dir arguments accordingly') parser.add_argument('--models_root_dir', type=utils.none_or_string_flag, help='Root directory for all models, see prepare_models/README.md for a detailed explanation.' 'This is an optional argument that needs to be set along with --model and --dataset.' 'Please see the help message for the --model argument as well.') parser.add_argument("--dataset", type=utils.none_or_string_flag, help="On which dataset to learn classifiers" 'Possible values are ("in1k", "cog_l1", "cog_l2", "cog_l3", "cog_l4", "cog_l5")' 'This is an optional argument that needs to be set along with --models_root_dir and --model.' 'Please see the help message for the --model argument as well.') parser.add_argument('--train_features_dir', type=utils.none_or_string_flag, help='Path to the directory containing pre-extracted training set features.' 'We expect a features file "X_Y.pth" under <train_features_dir>.' 'This is an optional argument that needs to be set if --models_root_dir, --model and --dataset are not set.') parser.add_argument('--test_features_dir', type=utils.none_or_string_flag, help='Path to the directory containing pre-extracted test set features.' 'We expect a features file "X_Y.pth" under <test_features_dir>.' 'This is an optional argument that needs to be set if --models_root_dir, --model and --dataset are not set.') parser.add_argument('--output_dir', type=utils.none_or_string_flag, help='Where to log program logs.' 'This is an optional argument that needs to be set if --models_root_dir is not set.' 'If not provided, we try to save the logs under' '<models_root_dir>/<model_title>/<architecture_name>/<dataset>/eval_logreg/seed*') # learning rate and momentum are tuned in this program, do not manually set. parser.add_argument("--lr", type=float, default=0.0, help="initial learning rate") parser.add_argument("--wd", type=float, default=0.0, help="weight decay") parser.add_argument("--mom", type=float, default=0.9, help="momentum") # program-related options parser.add_argument("--print_freq", default=100, type=int, help="print frequency (default: 10)") parser.add_argument("--device", type=str, default="cuda") # optionally to overwrite the default config parser.add_argument("opts", default=None, help="see configs/default.py for all options", nargs=argparse.REMAINDER) args = parser.parse_args() if args.device == "cuda" and not th.cuda.is_available(): print("CUDA is not available, I will run on CPU.") args.device = "cpu" # load the config file # create output directory, # locate pre-extracted features, # initialize program logger, # save args and cfg # this function sets the following arg variables: # - train_features_path, type=str # - test_features_path, type=str # - output_dir, type=str args, cfg = utils.init_program(args, _for="logreg") # tune hyper-parameters with optuna logger.info("Running Optuna...") hps_sampler = optuna.samplers.TPESampler(multivariate=True, seed=cfg.EVAL.SEED) study = optuna.create_study(sampler=hps_sampler, direction="maximize") args.val = True logreg = LogReg(args, cfg) study.optimize(logreg, n_trials=cfg.CLF.N_TRIALS, n_jobs=1, show_progress_bar=False) utils.save_pickle(study, os.path.join(args.output_dir, "study.pkl")) logger.info("") logger.info("*" * 50) logger.info("Hyper-parameter search ended") logger.info("best_trial:") logger.info(str(study.best_trial)) logger.info("best_params:") logger.info(str(study.best_params)) logger.info("*" * 50) logger.info("") # train the final classifier with the tuned hyper-parameters del logreg th.cuda.empty_cache() args.lr = study.best_params["lr"] args.wd = study.best_params["wd"] args.val = False logreg = LogReg(args, cfg) logreg()
37.625683
140
0.600392
import argparse import copy import logging import math import os import shutil import time import optuna import torch as th import feature_ops import metrics import utils from iterators import TorchIterator from meters import AverageMeter, ProgressMeter logger = logging.getLogger() class LogReg: def __init__(self, args, cfg): self.args = args self.cfg = cfg trainset = feature_ops.load_feature_set( args.train_features_path, "train", cfg.CLF.NORM_FTS ) if args.val: logger.info("Splitting the training set into train and val") trainset, testset = feature_ops.split_trainset(trainset, cfg.CLF.VAL_PERC) else: testset = feature_ops.load_feature_set(args.test_features_path, "test", cfg.CLF.NORM_FTS) if cfg.CLF.N_SHOT > 0: logger.info( "Simulating few-shot learning setting, {} images per class.".format( cfg.CLF.N_SHOT ) ) trainset = feature_ops.make_fewshot_dataset(trainset, cfg.CLF.N_SHOT) self.trainset = trainset self.testset = testset self.trainset.print_info() self.testset.print_info() if len(list(self.trainset.y.shape)) == 1: classes = th.unique(self.trainset.y) assert th.all(classes == th.unique(self.testset.y)) args.n_classes = classes.size(0) if args.device == "cuda": feature_ops.move_data_to_cuda([self.trainset, self.testset]) def __call__(self, trial=None): th.cuda.empty_cache() args = copy.deepcopy(self.args) cfg = self.cfg x_train = self.trainset.x y_train = self.trainset.y x_test = self.testset.x y_test = self.testset.y train_iter = TorchIterator((x_train, y_train), cfg.CLF.BATCH_SIZE, shuffle=True) test_iter = TorchIterator((x_test, y_test), cfg.CLF.BATCH_SIZE, shuffle=False) model = th.nn.Linear(x_train.size(1), args.n_classes).to(args.device) crit = th.nn.CrossEntropyLoss().to(args.device) if trial is not None: lr_intv = cfg.CLF.LR_INTV wd_intv = cfg.CLF.WD_INTV args.lr = trial.suggest_loguniform("lr", lr_intv[0], lr_intv[1]) args.wd = trial.suggest_loguniform("wd", wd_intv[0], wd_intv[1]) optim = th.optim.SGD( model.parameters(), lr=args.lr, momentum=args.mom, weight_decay=args.wd ) args.exp_dir = os.path.join( args.output_dir, "{}-lr-{}_wd-{}".format("val" if args.val else "final", args.lr, args.wd), ) os.makedirs(args.exp_dir, exist_ok=True) utils.write_to_file(str(model), os.path.join(args.exp_dir, "model.txt")) args.logs = { "train/loss": [], "train/top1": [], "train/top5": [], "test/loss": [], "test/top1": [], "test/top5": [], "lr": [], } args.preds = [] for epoch in range(cfg.CLF.N_EPOCHS): if not args.val: logger.info(f"**Epoch:{epoch}**") args.epoch = epoch train_stat = train(train_iter, model, crit, optim, epoch, args) validate(test_iter, model, crit, args) adjust_learning_rate(optim, args, cfg) if train_stat == -1: break utils.save_pickle(args.logs, f"{args.exp_dir}/logs.pkl") utils.save_pickle(args.preds, f"{args.exp_dir}/preds.pkl") utils.save_pickle(vars(args), f"{args.exp_dir}/args.pkl") th.save( { "model": model.state_dict(), }, f"{args.exp_dir}/model.pth", ) return args.logs["test/top1"][-1] def train(train_loader, model, criterion, optimizer, epoch, args): batch_time = AverageMeter("Time", ":6.3f") losses = AverageMeter("Loss", ":.4e") top1 = AverageMeter("Acc@1", ":6.2f") top5 = AverageMeter("Acc@5", ":6.2f") progress = ProgressMeter( len(train_loader), [batch_time, losses, top1, top5], prefix="Epoch: [{}]".format(epoch), ) model.train() end = time.time() for i, (fts, lbls) in enumerate(train_loader): fts = fts.to(args.device) lbls = lbls.to(args.device) output = model(fts) loss = criterion(output, lbls) if not th.isfinite(loss): logger.info("Loss ({}) is not finite, terminating".format(loss.item())) optimizer.zero_grad() return -1 acc1, acc5 = metrics.accuracy(output, lbls, topk=(1, 5)) losses.update(loss.item(), fts.size(0)) top1.update(acc1.item(), fts.size(0)) top5.update(acc5.item(), fts.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) end = time.time() if (not args.val) and (i % args.print_freq == 0): progress.display(i) args.logs["train/loss"].append(losses.avg) args.logs["train/top1"].append(top1.avg) args.logs["train/top5"].append(top5.avg) return 0 def validate(val_loader, model, criterion, args): losses = AverageMeter("Loss", ":.4e") top1 = AverageMeter("Acc@1", ":6.2f") top5 = AverageMeter("Acc@5", ":6.2f") model.eval() preds = th.ones(len(val_loader.tensors[0]), dtype=th.int32, device=args.device) * -1. six = 0 with th.no_grad(): for i, (fts, lbls) in enumerate(val_loader): fts = fts.to(args.device) lbls = lbls.to(args.device) bs = fts.size(0) output = model(fts) loss = criterion(output, lbls) preds[six:six + bs] = th.argmax(output, dim=1) six += bs acc1, acc5 = metrics.accuracy(output, lbls, topk=(1, 5)) losses.update(loss.item(), bs) top1.update(acc1[0].item(), bs) top5.update(acc5[0].item(), bs) assert th.all(preds >= 0).item() args.preds.append(preds.detach().cpu()) args.logs["test/loss"].append(losses.avg) args.logs["test/top1"].append(top1.avg) args.logs["test/top5"].append(top5.avg) if not args.val: logger.info( " * Acc@1:{top1.avg:.3f} - Acc@5:{top5.avg:.3f}".format( top1=top1, top5=top5 ) ) def adjust_learning_rate(optimizer, args, cfg): lr = args.lr lr *= 0.5 * (1.0 + math.cos(math.pi * args.epoch / cfg.CLF.N_EPOCHS)) for param_group in optimizer.param_groups: param_group["lr"] = lr args.logs["lr"].append(lr) def save_checkpoint(state, is_best, filename="checkpoint.pth.tar"): th.save(state, filename) if is_best: shutil.copyfile(filename, "model_best.pth.tar") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--model', type=utils.none_or_string_flag, help='Name of the model in the <model_title>_<architecture_name> form.' 'See the table of models in ./prepare_models/README.md for all the model names we support.' 'This is an optional argument that needs to be set along with --models_root_dir and --dataset.' 'When these three arguments are set, the script will load features from:' '<models_root_dir>/<model_title>/<architecture_name>/<dataset>/features_*/X_Y.pth.' 'If you would like to load pre-extracted features from somewhere else' 'then ignore this argument and provide the --train_features_dir and --test_features_dir arguments accordingly') parser.add_argument('--models_root_dir', type=utils.none_or_string_flag, help='Root directory for all models, see prepare_models/README.md for a detailed explanation.' 'This is an optional argument that needs to be set along with --model and --dataset.' 'Please see the help message for the --model argument as well.') parser.add_argument("--dataset", type=utils.none_or_string_flag, help="On which dataset to learn classifiers" 'Possible values are ("in1k", "cog_l1", "cog_l2", "cog_l3", "cog_l4", "cog_l5")' 'This is an optional argument that needs to be set along with --models_root_dir and --model.' 'Please see the help message for the --model argument as well.') parser.add_argument('--train_features_dir', type=utils.none_or_string_flag, help='Path to the directory containing pre-extracted training set features.' 'We expect a features file "X_Y.pth" under <train_features_dir>.' 'This is an optional argument that needs to be set if --models_root_dir, --model and --dataset are not set.') parser.add_argument('--test_features_dir', type=utils.none_or_string_flag, help='Path to the directory containing pre-extracted test set features.' 'We expect a features file "X_Y.pth" under <test_features_dir>.' 'This is an optional argument that needs to be set if --models_root_dir, --model and --dataset are not set.') parser.add_argument('--output_dir', type=utils.none_or_string_flag, help='Where to log program logs.' 'This is an optional argument that needs to be set if --models_root_dir is not set.' 'If not provided, we try to save the logs under' '<models_root_dir>/<model_title>/<architecture_name>/<dataset>/eval_logreg/seed*') parser.add_argument("--lr", type=float, default=0.0, help="initial learning rate") parser.add_argument("--wd", type=float, default=0.0, help="weight decay") parser.add_argument("--mom", type=float, default=0.9, help="momentum") parser.add_argument("--print_freq", default=100, type=int, help="print frequency (default: 10)") parser.add_argument("--device", type=str, default="cuda") parser.add_argument("opts", default=None, help="see configs/default.py for all options", nargs=argparse.REMAINDER) args = parser.parse_args() if args.device == "cuda" and not th.cuda.is_available(): print("CUDA is not available, I will run on CPU.") args.device = "cpu" args, cfg = utils.init_program(args, _for="logreg") logger.info("Running Optuna...") hps_sampler = optuna.samplers.TPESampler(multivariate=True, seed=cfg.EVAL.SEED) study = optuna.create_study(sampler=hps_sampler, direction="maximize") args.val = True logreg = LogReg(args, cfg) study.optimize(logreg, n_trials=cfg.CLF.N_TRIALS, n_jobs=1, show_progress_bar=False) utils.save_pickle(study, os.path.join(args.output_dir, "study.pkl")) logger.info("") logger.info("*" * 50) logger.info("Hyper-parameter search ended") logger.info("best_trial:") logger.info(str(study.best_trial)) logger.info("best_params:") logger.info(str(study.best_params)) logger.info("*" * 50) logger.info("") del logreg th.cuda.empty_cache() args.lr = study.best_params["lr"] args.wd = study.best_params["wd"] args.val = False logreg = LogReg(args, cfg) logreg()
true
true
f72c8ab58a23d585b39a3037e18747d52bcb4b75
1,132
py
Python
Chapter02/Ch02_Code/GUI_tabbed_two_mighty_labels.py
mr4dsd43/Python-GUI-Programming-Cookbook-Second-Edition
18e4632106169991e9b75680bdd7250c9d77c3be
[ "MIT" ]
2
2021-01-12T03:13:29.000Z
2021-01-12T03:13:31.000Z
Chapter02/Ch02_Code/GUI_tabbed_two_mighty_labels.py
mr4dsd43/Python-GUI-Programming-Cookbook-Second-Edition
18e4632106169991e9b75680bdd7250c9d77c3be
[ "MIT" ]
null
null
null
Chapter02/Ch02_Code/GUI_tabbed_two_mighty_labels.py
mr4dsd43/Python-GUI-Programming-Cookbook-Second-Edition
18e4632106169991e9b75680bdd7250c9d77c3be
[ "MIT" ]
1
2022-02-22T02:06:32.000Z
2022-02-22T02:06:32.000Z
''' May 2017 @author: Burkhard A. Meier ''' #====================== # imports #====================== import tkinter as tk from tkinter import ttk # Create instance win = tk.Tk() # Add a title win.title("Python GUI") tabControl = ttk.Notebook(win) # Create Tab Control tab1 = ttk.Frame(tabControl) # Create a tab tabControl.add(tab1, text='Tab 1') # Add the tab tab2 = ttk.Frame(tabControl) # Add a second tab tabControl.add(tab2, text='Tab 2') # Make second tab visible tabControl.pack(expand=1, fill="both") # Pack to make visible # LabelFrame using tab1 as the parent mighty = ttk.LabelFrame(tab1, text=' Mighty Python ') mighty.grid(column=0, row=0, padx=8, pady=4) # Label using mighty as the parent a_label = ttk.Label(mighty, text="Enter a name:") a_label.grid(column=0, row=0, sticky='W') # Add another label ttk.Label(mighty, text="Choose a number:").grid(column=1, row=0) # Add some space around each label for child in mighty.winfo_children(): child.grid_configure(padx=8) #====================== # Start GUI #====================== win.mainloop()
25.155556
65
0.620141
import tkinter as tk from tkinter import ttk win = tk.Tk() win.title("Python GUI") tabControl = ttk.Notebook(win) tab1 = ttk.Frame(tabControl) tabControl.add(tab1, text='Tab 1') tab2 = ttk.Frame(tabControl) tabControl.add(tab2, text='Tab 2') tabControl.pack(expand=1, fill="both") mighty = ttk.LabelFrame(tab1, text=' Mighty Python ') mighty.grid(column=0, row=0, padx=8, pady=4) a_label = ttk.Label(mighty, text="Enter a name:") a_label.grid(column=0, row=0, sticky='W') ttk.Label(mighty, text="Choose a number:").grid(column=1, row=0) for child in mighty.winfo_children(): child.grid_configure(padx=8) win.mainloop()
true
true
f72c8ecfbd321747538079c852e41d9f1f85d700
3,446
py
Python
sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_validate.py
kashifkhan/azure-sdk-for-python
9c28b76e89b0855e41bd12d5b4a59b51acd47eec
[ "MIT" ]
null
null
null
sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_validate.py
kashifkhan/azure-sdk-for-python
9c28b76e89b0855e41bd12d5b4a59b51acd47eec
[ "MIT" ]
null
null
null
sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_validate.py
kashifkhan/azure-sdk-for-python
9c28b76e89b0855e41bd12d5b4a59b51acd47eec
[ "MIT" ]
null
null
null
# ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ import functools from ._version import VERSIONS_SUPPORTED def check_for_unsupported_actions_types(*args, **kwargs): client = args[0] # this assumes the client has an _api_version attribute selected_api_version = client._api_version # pylint: disable=protected-access if "actions" not in kwargs: actions = args[2] else: actions = kwargs.get("actions") if actions is None: return actions_version_mapping = { "2022-03-01-preview": [ "ExtractSummaryAction", "RecognizeCustomEntitiesAction", "SingleCategoryClassifyAction", "MultiCategoryClassifyAction" ] } unsupported = { arg: version for version, args in actions_version_mapping.items() for arg in args if arg in [action.__class__.__name__ for action in actions] and selected_api_version != version and VERSIONS_SUPPORTED.index(selected_api_version) < VERSIONS_SUPPORTED.index(version) } if unsupported: error_strings = [ f"'{param}' is only available for API version {version} and up.\n" for param, version in unsupported.items() ] raise ValueError("".join(error_strings)) def validate_multiapi_args(**kwargs): args_mapping = kwargs.pop("args_mapping", None) version_method_added = kwargs.pop("version_method_added", None) custom_wrapper = kwargs.pop("custom_wrapper", None) def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): try: # this assumes the client has an _api_version attribute client = args[0] selected_api_version = client._api_version # pylint: disable=protected-access except AttributeError: return func(*args, **kwargs) # the latest version is selected, we assume all features supported if selected_api_version == VERSIONS_SUPPORTED[-1]: return func(*args, **kwargs) if version_method_added and version_method_added != selected_api_version and \ VERSIONS_SUPPORTED.index(selected_api_version) < VERSIONS_SUPPORTED.index(version_method_added): raise ValueError(f"'{func.__name__}' is only available for API version {version_method_added} and up.") if args_mapping: unsupported = { arg: version for version, args in args_mapping.items() for arg in args if arg in kwargs.keys() and selected_api_version != version and VERSIONS_SUPPORTED.index(selected_api_version) < VERSIONS_SUPPORTED.index(version) } if unsupported: error_strings = [ f"'{param}' is only available for API version {version} and up.\n" for param, version in unsupported.items() ] raise ValueError("".join(error_strings)) if custom_wrapper: custom_wrapper(*args, **kwargs) return func(*args, **kwargs) return wrapper return decorator
35.163265
119
0.589089
import functools from ._version import VERSIONS_SUPPORTED def check_for_unsupported_actions_types(*args, **kwargs): client = args[0] selected_api_version = client._api_version if "actions" not in kwargs: actions = args[2] else: actions = kwargs.get("actions") if actions is None: return actions_version_mapping = { "2022-03-01-preview": [ "ExtractSummaryAction", "RecognizeCustomEntitiesAction", "SingleCategoryClassifyAction", "MultiCategoryClassifyAction" ] } unsupported = { arg: version for version, args in actions_version_mapping.items() for arg in args if arg in [action.__class__.__name__ for action in actions] and selected_api_version != version and VERSIONS_SUPPORTED.index(selected_api_version) < VERSIONS_SUPPORTED.index(version) } if unsupported: error_strings = [ f"'{param}' is only available for API version {version} and up.\n" for param, version in unsupported.items() ] raise ValueError("".join(error_strings)) def validate_multiapi_args(**kwargs): args_mapping = kwargs.pop("args_mapping", None) version_method_added = kwargs.pop("version_method_added", None) custom_wrapper = kwargs.pop("custom_wrapper", None) def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): try: client = args[0] selected_api_version = client._api_version except AttributeError: return func(*args, **kwargs) if selected_api_version == VERSIONS_SUPPORTED[-1]: return func(*args, **kwargs) if version_method_added and version_method_added != selected_api_version and \ VERSIONS_SUPPORTED.index(selected_api_version) < VERSIONS_SUPPORTED.index(version_method_added): raise ValueError(f"'{func.__name__}' is only available for API version {version_method_added} and up.") if args_mapping: unsupported = { arg: version for version, args in args_mapping.items() for arg in args if arg in kwargs.keys() and selected_api_version != version and VERSIONS_SUPPORTED.index(selected_api_version) < VERSIONS_SUPPORTED.index(version) } if unsupported: error_strings = [ f"'{param}' is only available for API version {version} and up.\n" for param, version in unsupported.items() ] raise ValueError("".join(error_strings)) if custom_wrapper: custom_wrapper(*args, **kwargs) return func(*args, **kwargs) return wrapper return decorator
true
true
f72c8ed99253eaa655d08778cb9bf6fa834191af
10,956
py
Python
google/ads/google_ads/v0/proto/resources/shared_criterion_pb2.py
jwygoda/google-ads-python
863892b533240cb45269d9c2cceec47e2c5a8b68
[ "Apache-2.0" ]
null
null
null
google/ads/google_ads/v0/proto/resources/shared_criterion_pb2.py
jwygoda/google-ads-python
863892b533240cb45269d9c2cceec47e2c5a8b68
[ "Apache-2.0" ]
null
null
null
google/ads/google_ads/v0/proto/resources/shared_criterion_pb2.py
jwygoda/google-ads-python
863892b533240cb45269d9c2cceec47e2c5a8b68
[ "Apache-2.0" ]
null
null
null
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/ads/googleads_v0/proto/resources/shared_criterion.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from google.ads.google_ads.v0.proto.common import criteria_pb2 as google_dot_ads_dot_googleads__v0_dot_proto_dot_common_dot_criteria__pb2 from google.ads.google_ads.v0.proto.enums import criterion_type_pb2 as google_dot_ads_dot_googleads__v0_dot_proto_dot_enums_dot_criterion__type__pb2 from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='google/ads/googleads_v0/proto/resources/shared_criterion.proto', package='google.ads.googleads.v0.resources', syntax='proto3', serialized_options=_b('\n%com.google.ads.googleads.v0.resourcesB\024SharedCriterionProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v0/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V0.Resources\312\002!Google\\Ads\\GoogleAds\\V0\\Resources\352\002%Google::Ads::GoogleAds::V0::Resources'), serialized_pb=_b('\n>google/ads/googleads_v0/proto/resources/shared_criterion.proto\x12!google.ads.googleads.v0.resources\x1a\x33google/ads/googleads_v0/proto/common/criteria.proto\x1a\x38google/ads/googleads_v0/proto/enums/criterion_type.proto\x1a\x1egoogle/protobuf/wrappers.proto\"\xdc\x04\n\x0fSharedCriterion\x12\x15\n\rresource_name\x18\x01 \x01(\t\x12\x30\n\nshared_set\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x0c\x63riterion_id\x18\x1a \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12L\n\x04type\x18\x04 \x01(\x0e\x32>.google.ads.googleads.v0.enums.CriterionTypeEnum.CriterionType\x12>\n\x07keyword\x18\x03 \x01(\x0b\x32+.google.ads.googleads.v0.common.KeywordInfoH\x00\x12I\n\ryoutube_video\x18\x05 \x01(\x0b\x32\x30.google.ads.googleads.v0.common.YouTubeVideoInfoH\x00\x12M\n\x0fyoutube_channel\x18\x06 \x01(\x0b\x32\x32.google.ads.googleads.v0.common.YouTubeChannelInfoH\x00\x12\x42\n\tplacement\x18\x07 \x01(\x0b\x32-.google.ads.googleads.v0.common.PlacementInfoH\x00\x12T\n\x13mobile_app_category\x18\x08 \x01(\x0b\x32\x35.google.ads.googleads.v0.common.MobileAppCategoryInfoH\x00\x42\x0b\n\tcriterionB\x81\x02\n%com.google.ads.googleads.v0.resourcesB\x14SharedCriterionProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v0/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V0.Resources\xca\x02!Google\\Ads\\GoogleAds\\V0\\Resources\xea\x02%Google::Ads::GoogleAds::V0::Resourcesb\x06proto3') , dependencies=[google_dot_ads_dot_googleads__v0_dot_proto_dot_common_dot_criteria__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v0_dot_proto_dot_enums_dot_criterion__type__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,]) _SHAREDCRITERION = _descriptor.Descriptor( name='SharedCriterion', full_name='google.ads.googleads.v0.resources.SharedCriterion', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='resource_name', full_name='google.ads.googleads.v0.resources.SharedCriterion.resource_name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='shared_set', full_name='google.ads.googleads.v0.resources.SharedCriterion.shared_set', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='criterion_id', full_name='google.ads.googleads.v0.resources.SharedCriterion.criterion_id', index=2, number=26, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='type', full_name='google.ads.googleads.v0.resources.SharedCriterion.type', index=3, number=4, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='keyword', full_name='google.ads.googleads.v0.resources.SharedCriterion.keyword', index=4, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='youtube_video', full_name='google.ads.googleads.v0.resources.SharedCriterion.youtube_video', index=5, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='youtube_channel', full_name='google.ads.googleads.v0.resources.SharedCriterion.youtube_channel', index=6, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='placement', full_name='google.ads.googleads.v0.resources.SharedCriterion.placement', index=7, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='mobile_app_category', full_name='google.ads.googleads.v0.resources.SharedCriterion.mobile_app_category', index=8, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='criterion', full_name='google.ads.googleads.v0.resources.SharedCriterion.criterion', index=0, containing_type=None, fields=[]), ], serialized_start=245, serialized_end=849, ) _SHAREDCRITERION.fields_by_name['shared_set'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE _SHAREDCRITERION.fields_by_name['criterion_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE _SHAREDCRITERION.fields_by_name['type'].enum_type = google_dot_ads_dot_googleads__v0_dot_proto_dot_enums_dot_criterion__type__pb2._CRITERIONTYPEENUM_CRITERIONTYPE _SHAREDCRITERION.fields_by_name['keyword'].message_type = google_dot_ads_dot_googleads__v0_dot_proto_dot_common_dot_criteria__pb2._KEYWORDINFO _SHAREDCRITERION.fields_by_name['youtube_video'].message_type = google_dot_ads_dot_googleads__v0_dot_proto_dot_common_dot_criteria__pb2._YOUTUBEVIDEOINFO _SHAREDCRITERION.fields_by_name['youtube_channel'].message_type = google_dot_ads_dot_googleads__v0_dot_proto_dot_common_dot_criteria__pb2._YOUTUBECHANNELINFO _SHAREDCRITERION.fields_by_name['placement'].message_type = google_dot_ads_dot_googleads__v0_dot_proto_dot_common_dot_criteria__pb2._PLACEMENTINFO _SHAREDCRITERION.fields_by_name['mobile_app_category'].message_type = google_dot_ads_dot_googleads__v0_dot_proto_dot_common_dot_criteria__pb2._MOBILEAPPCATEGORYINFO _SHAREDCRITERION.oneofs_by_name['criterion'].fields.append( _SHAREDCRITERION.fields_by_name['keyword']) _SHAREDCRITERION.fields_by_name['keyword'].containing_oneof = _SHAREDCRITERION.oneofs_by_name['criterion'] _SHAREDCRITERION.oneofs_by_name['criterion'].fields.append( _SHAREDCRITERION.fields_by_name['youtube_video']) _SHAREDCRITERION.fields_by_name['youtube_video'].containing_oneof = _SHAREDCRITERION.oneofs_by_name['criterion'] _SHAREDCRITERION.oneofs_by_name['criterion'].fields.append( _SHAREDCRITERION.fields_by_name['youtube_channel']) _SHAREDCRITERION.fields_by_name['youtube_channel'].containing_oneof = _SHAREDCRITERION.oneofs_by_name['criterion'] _SHAREDCRITERION.oneofs_by_name['criterion'].fields.append( _SHAREDCRITERION.fields_by_name['placement']) _SHAREDCRITERION.fields_by_name['placement'].containing_oneof = _SHAREDCRITERION.oneofs_by_name['criterion'] _SHAREDCRITERION.oneofs_by_name['criterion'].fields.append( _SHAREDCRITERION.fields_by_name['mobile_app_category']) _SHAREDCRITERION.fields_by_name['mobile_app_category'].containing_oneof = _SHAREDCRITERION.oneofs_by_name['criterion'] DESCRIPTOR.message_types_by_name['SharedCriterion'] = _SHAREDCRITERION _sym_db.RegisterFileDescriptor(DESCRIPTOR) SharedCriterion = _reflection.GeneratedProtocolMessageType('SharedCriterion', (_message.Message,), dict( DESCRIPTOR = _SHAREDCRITERION, __module__ = 'google.ads.googleads_v0.proto.resources.shared_criterion_pb2' , __doc__ = """A criterion belonging to a shared set. Attributes: resource_name: The resource name of the shared criterion. Shared set resource names have the form: ``customers/{customer_id}/sharedCriteria /{shared_set_id}_{criterion_id}`` shared_set: The shared set to which the shared criterion belongs. criterion_id: The ID of the criterion. This field is ignored for mutates. type: The type of the criterion. criterion: The criterion. Exactly one must be set. keyword: Keyword. youtube_video: YouTube Video. youtube_channel: YouTube Channel. placement: Placement. mobile_app_category: Mobile App Category. """, # @@protoc_insertion_point(class_scope:google.ads.googleads.v0.resources.SharedCriterion) )) _sym_db.RegisterMessage(SharedCriterion) DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope)
59.221622
1,457
0.793264
import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database _sym_db = _symbol_database.Default() from google.ads.google_ads.v0.proto.common import criteria_pb2 as google_dot_ads_dot_googleads__v0_dot_proto_dot_common_dot_criteria__pb2 from google.ads.google_ads.v0.proto.enums import criterion_type_pb2 as google_dot_ads_dot_googleads__v0_dot_proto_dot_enums_dot_criterion__type__pb2 from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='google/ads/googleads_v0/proto/resources/shared_criterion.proto', package='google.ads.googleads.v0.resources', syntax='proto3', serialized_options=_b('\n%com.google.ads.googleads.v0.resourcesB\024SharedCriterionProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v0/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V0.Resources\312\002!Google\\Ads\\GoogleAds\\V0\\Resources\352\002%Google::Ads::GoogleAds::V0::Resources'), serialized_pb=_b('\n>google/ads/googleads_v0/proto/resources/shared_criterion.proto\x12!google.ads.googleads.v0.resources\x1a\x33google/ads/googleads_v0/proto/common/criteria.proto\x1a\x38google/ads/googleads_v0/proto/enums/criterion_type.proto\x1a\x1egoogle/protobuf/wrappers.proto\"\xdc\x04\n\x0fSharedCriterion\x12\x15\n\rresource_name\x18\x01 \x01(\t\x12\x30\n\nshared_set\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x0c\x63riterion_id\x18\x1a \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12L\n\x04type\x18\x04 \x01(\x0e\x32>.google.ads.googleads.v0.enums.CriterionTypeEnum.CriterionType\x12>\n\x07keyword\x18\x03 \x01(\x0b\x32+.google.ads.googleads.v0.common.KeywordInfoH\x00\x12I\n\ryoutube_video\x18\x05 \x01(\x0b\x32\x30.google.ads.googleads.v0.common.YouTubeVideoInfoH\x00\x12M\n\x0fyoutube_channel\x18\x06 \x01(\x0b\x32\x32.google.ads.googleads.v0.common.YouTubeChannelInfoH\x00\x12\x42\n\tplacement\x18\x07 \x01(\x0b\x32-.google.ads.googleads.v0.common.PlacementInfoH\x00\x12T\n\x13mobile_app_category\x18\x08 \x01(\x0b\x32\x35.google.ads.googleads.v0.common.MobileAppCategoryInfoH\x00\x42\x0b\n\tcriterionB\x81\x02\n%com.google.ads.googleads.v0.resourcesB\x14SharedCriterionProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v0/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V0.Resources\xca\x02!Google\\Ads\\GoogleAds\\V0\\Resources\xea\x02%Google::Ads::GoogleAds::V0::Resourcesb\x06proto3') , dependencies=[google_dot_ads_dot_googleads__v0_dot_proto_dot_common_dot_criteria__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v0_dot_proto_dot_enums_dot_criterion__type__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,]) _SHAREDCRITERION = _descriptor.Descriptor( name='SharedCriterion', full_name='google.ads.googleads.v0.resources.SharedCriterion', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='resource_name', full_name='google.ads.googleads.v0.resources.SharedCriterion.resource_name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='shared_set', full_name='google.ads.googleads.v0.resources.SharedCriterion.shared_set', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='criterion_id', full_name='google.ads.googleads.v0.resources.SharedCriterion.criterion_id', index=2, number=26, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='type', full_name='google.ads.googleads.v0.resources.SharedCriterion.type', index=3, number=4, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='keyword', full_name='google.ads.googleads.v0.resources.SharedCriterion.keyword', index=4, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='youtube_video', full_name='google.ads.googleads.v0.resources.SharedCriterion.youtube_video', index=5, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='youtube_channel', full_name='google.ads.googleads.v0.resources.SharedCriterion.youtube_channel', index=6, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='placement', full_name='google.ads.googleads.v0.resources.SharedCriterion.placement', index=7, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='mobile_app_category', full_name='google.ads.googleads.v0.resources.SharedCriterion.mobile_app_category', index=8, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='criterion', full_name='google.ads.googleads.v0.resources.SharedCriterion.criterion', index=0, containing_type=None, fields=[]), ], serialized_start=245, serialized_end=849, ) _SHAREDCRITERION.fields_by_name['shared_set'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE _SHAREDCRITERION.fields_by_name['criterion_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE _SHAREDCRITERION.fields_by_name['type'].enum_type = google_dot_ads_dot_googleads__v0_dot_proto_dot_enums_dot_criterion__type__pb2._CRITERIONTYPEENUM_CRITERIONTYPE _SHAREDCRITERION.fields_by_name['keyword'].message_type = google_dot_ads_dot_googleads__v0_dot_proto_dot_common_dot_criteria__pb2._KEYWORDINFO _SHAREDCRITERION.fields_by_name['youtube_video'].message_type = google_dot_ads_dot_googleads__v0_dot_proto_dot_common_dot_criteria__pb2._YOUTUBEVIDEOINFO _SHAREDCRITERION.fields_by_name['youtube_channel'].message_type = google_dot_ads_dot_googleads__v0_dot_proto_dot_common_dot_criteria__pb2._YOUTUBECHANNELINFO _SHAREDCRITERION.fields_by_name['placement'].message_type = google_dot_ads_dot_googleads__v0_dot_proto_dot_common_dot_criteria__pb2._PLACEMENTINFO _SHAREDCRITERION.fields_by_name['mobile_app_category'].message_type = google_dot_ads_dot_googleads__v0_dot_proto_dot_common_dot_criteria__pb2._MOBILEAPPCATEGORYINFO _SHAREDCRITERION.oneofs_by_name['criterion'].fields.append( _SHAREDCRITERION.fields_by_name['keyword']) _SHAREDCRITERION.fields_by_name['keyword'].containing_oneof = _SHAREDCRITERION.oneofs_by_name['criterion'] _SHAREDCRITERION.oneofs_by_name['criterion'].fields.append( _SHAREDCRITERION.fields_by_name['youtube_video']) _SHAREDCRITERION.fields_by_name['youtube_video'].containing_oneof = _SHAREDCRITERION.oneofs_by_name['criterion'] _SHAREDCRITERION.oneofs_by_name['criterion'].fields.append( _SHAREDCRITERION.fields_by_name['youtube_channel']) _SHAREDCRITERION.fields_by_name['youtube_channel'].containing_oneof = _SHAREDCRITERION.oneofs_by_name['criterion'] _SHAREDCRITERION.oneofs_by_name['criterion'].fields.append( _SHAREDCRITERION.fields_by_name['placement']) _SHAREDCRITERION.fields_by_name['placement'].containing_oneof = _SHAREDCRITERION.oneofs_by_name['criterion'] _SHAREDCRITERION.oneofs_by_name['criterion'].fields.append( _SHAREDCRITERION.fields_by_name['mobile_app_category']) _SHAREDCRITERION.fields_by_name['mobile_app_category'].containing_oneof = _SHAREDCRITERION.oneofs_by_name['criterion'] DESCRIPTOR.message_types_by_name['SharedCriterion'] = _SHAREDCRITERION _sym_db.RegisterFileDescriptor(DESCRIPTOR) SharedCriterion = _reflection.GeneratedProtocolMessageType('SharedCriterion', (_message.Message,), dict( DESCRIPTOR = _SHAREDCRITERION, __module__ = 'google.ads.googleads_v0.proto.resources.shared_criterion_pb2' , __doc__ = """A criterion belonging to a shared set. Attributes: resource_name: The resource name of the shared criterion. Shared set resource names have the form: ``customers/{customer_id}/sharedCriteria /{shared_set_id}_{criterion_id}`` shared_set: The shared set to which the shared criterion belongs. criterion_id: The ID of the criterion. This field is ignored for mutates. type: The type of the criterion. criterion: The criterion. Exactly one must be set. keyword: Keyword. youtube_video: YouTube Video. youtube_channel: YouTube Channel. placement: Placement. mobile_app_category: Mobile App Category. """, # @@protoc_insertion_point(class_scope:google.ads.googleads.v0.resources.SharedCriterion) )) _sym_db.RegisterMessage(SharedCriterion) DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope)
true
true
f72c907b1f918fdf342d234b59f8c92fc6aa1d93
2,070
py
Python
cows_bulls.py
hmlewis-astro/coding_practice
a781443399766bf13df0d2de93f0ce3acda0c77d
[ "MIT" ]
null
null
null
cows_bulls.py
hmlewis-astro/coding_practice
a781443399766bf13df0d2de93f0ce3acda0c77d
[ "MIT" ]
null
null
null
cows_bulls.py
hmlewis-astro/coding_practice
a781443399766bf13df0d2de93f0ce3acda0c77d
[ "MIT" ]
null
null
null
''' File name: pythonpractice.py Author: Hannah Lewis Date created: 08/03/2020 Date last modified: 08/03/2020 Python Version: 3.7 ''' import random def main(): ''' Create a program that will play the “cows and bulls” game with the user. ''' print("You will try to guess a random 4-digit number.") print("A 'cow' is a correct digit in the correct place.") print("A 'bull' is a correct digit in the wrong place.") print("The game ends when you get 4 cows!\n") print("You can type 'exit' at any time to end the game.\n") num = str(random.randint(10000, 99999))[1:5] # Get random number, remove first digit so that first digit can be 0 guess = input("Give me your best guess: ") # Get first guess count = 0 cow = 0 bull = 0 guessing = True while guessing: assert len(guess) == 4, "Input must be 4-digits long." if guess == 'exit': # Player can exit at any time print("The number was " + str(num) + ".") print("Better luck next time.") guessing = False break count += 1 for i in range(0,4): # Compare digits if num[i] == guess[i]: cow+=1 elif num[i] in guess: bull+=1 print("You got {} cows, and {} bulls.".format(cow,bull)) # How many cows and bulls if cow == 4: # If all digits are correct if count == 1: print("You got it on the first try!") guessing = False if count > 1: print("You got it! It took you", count, "tries.") print("The number was " + str(num) + ".") guessing = False else: # Guess again cow = bull = 0 guess = input("Guess again: ") #TODO: ask if they want to play another game return if __name__ == '__main__': print("Ready to Cows and Bulls?") main() # Runs exercise
27.972973
117
0.522705
import random def main(): print("You will try to guess a random 4-digit number.") print("A 'cow' is a correct digit in the correct place.") print("A 'bull' is a correct digit in the wrong place.") print("The game ends when you get 4 cows!\n") print("You can type 'exit' at any time to end the game.\n") num = str(random.randint(10000, 99999))[1:5] guess = input("Give me your best guess: ") count = 0 cow = 0 bull = 0 guessing = True while guessing: assert len(guess) == 4, "Input must be 4-digits long." if guess == 'exit': print("The number was " + str(num) + ".") print("Better luck next time.") guessing = False break count += 1 for i in range(0,4): if num[i] == guess[i]: cow+=1 elif num[i] in guess: bull+=1 print("You got {} cows, and {} bulls.".format(cow,bull)) if cow == 4: if count == 1: print("You got it on the first try!") guessing = False if count > 1: print("You got it! It took you", count, "tries.") print("The number was " + str(num) + ".") guessing = False else: cow = bull = 0 guess = input("Guess again: ") return if __name__ == '__main__': print("Ready to Cows and Bulls?") main()
true
true
f72c90b37b41d597ef1c839e1131577727a7329a
227
py
Python
game/admin.py
zxalif/simpleapi
89d9f1c81b7c8e46d9764573fc1070a453751b4a
[ "MIT" ]
null
null
null
game/admin.py
zxalif/simpleapi
89d9f1c81b7c8e46d9764573fc1070a453751b4a
[ "MIT" ]
8
2020-06-05T23:34:44.000Z
2022-02-10T09:11:05.000Z
game/admin.py
zxalif/simpleapi
89d9f1c81b7c8e46d9764573fc1070a453751b4a
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import ( Category, Game, Thread, ThreadImage ) admin.site.register(Category) admin.site.register(Game) admin.site.register(Thread) admin.site.register(ThreadImage)
17.461538
32
0.748899
from django.contrib import admin from .models import ( Category, Game, Thread, ThreadImage ) admin.site.register(Category) admin.site.register(Game) admin.site.register(Thread) admin.site.register(ThreadImage)
true
true
f72c9168c0692e02e3f54b61d9c5f5e6399fc4d3
867
py
Python
blog/pelican-plugins/headerid/headerid.py
lemonsong/lemonsong.github.io
14a65b8c2506c95bab64f50143f3850be3edadc1
[ "MIT" ]
null
null
null
blog/pelican-plugins/headerid/headerid.py
lemonsong/lemonsong.github.io
14a65b8c2506c95bab64f50143f3850be3edadc1
[ "MIT" ]
1
2022-01-10T04:39:05.000Z
2022-01-10T04:39:05.000Z
blog/pelican-plugins/headerid/headerid.py
lemonsong/lemonsong.github.io
14a65b8c2506c95bab64f50143f3850be3edadc1
[ "MIT" ]
null
null
null
from pelican import readers from pelican.readers import PelicanHTMLTranslator from pelican import signals from docutils import nodes def register(): class HeaderIDPatchedPelicanHTMLTranslator(PelicanHTMLTranslator): def depart_title(self, node): close_tag = self.context[-1] parent = node.parent if isinstance(parent, nodes.section) and parent.hasattr('ids') and parent['ids']: anchor_name = parent['ids'][0] # add permalink anchor if close_tag.startswith('</h'): self.body.append( '<a class="headerlink" href="#%s" title="Permalink to this headline">*</a>' % anchor_name ) PelicanHTMLTranslator.depart_title(self, node) readers.PelicanHTMLTranslator = HeaderIDPatchedPelicanHTMLTranslator
43.35
113
0.635525
from pelican import readers from pelican.readers import PelicanHTMLTranslator from pelican import signals from docutils import nodes def register(): class HeaderIDPatchedPelicanHTMLTranslator(PelicanHTMLTranslator): def depart_title(self, node): close_tag = self.context[-1] parent = node.parent if isinstance(parent, nodes.section) and parent.hasattr('ids') and parent['ids']: anchor_name = parent['ids'][0] if close_tag.startswith('</h'): self.body.append( '<a class="headerlink" href="#%s" title="Permalink to this headline">*</a>' % anchor_name ) PelicanHTMLTranslator.depart_title(self, node) readers.PelicanHTMLTranslator = HeaderIDPatchedPelicanHTMLTranslator
true
true
f72c916ef8e95900c5ab3a87d685611c982bda39
2,960
py
Python
linsae/cogs/Events.py
drakedeveloper/Linsae
1a866fbb95df3a7270e446dca18e9dca8beb2c3a
[ "Apache-2.0" ]
1
2019-06-27T00:47:21.000Z
2019-06-27T00:47:21.000Z
linsae/cogs/Events.py
drakedeveloper/Linsae
1a866fbb95df3a7270e446dca18e9dca8beb2c3a
[ "Apache-2.0" ]
null
null
null
linsae/cogs/Events.py
drakedeveloper/Linsae
1a866fbb95df3a7270e446dca18e9dca8beb2c3a
[ "Apache-2.0" ]
null
null
null
import discord import time import asyncio from datetime import datetime import time from discord.ext import tasks, commands from tinydb import TinyDB, Query import re class Events(commands.Cog): def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_guild_join(self, guild): role = await guild.create_role(name="Muted", colour=discord.Colour.dark_grey()) for channel in guild.channels: await channel.set_permissions(role, send_messages = False) await asyncio.sleep(delay=5) for member in guild.members: if member.guild_permissions.administrator and member.id != self.bot.user.id: join_message = discord.Embed(title="__**Linsae!**__", description=f"**Hello, {member.mention}, This is me linsae and in order for me to work you need to do some configuration, sooo let's get started!**", colour=0x4298f4, timestamp=datetime.utcnow()) join_message.add_field(name="__Knowledge__", value=f"""**First of all, {member.mention} let me introduce my self: - My name as you know is Linsae and i'm glad to meet you. - My developer is Ɗrake#7418 and if you need any help with bots or something feel free to contact him! - My birthday is 6/25/2019.**""") join_message.add_field(name="__Configuration__", value=""" Alright so i'm a support bot that helps moderators and make their lifes easier, so what do i do ? .If a member needs help with something he can just type ***?support*** in a specific channel that i will menion later. .i have many moderator commands like ban, warn, kick, mute and more.... --> Now in order to do all that the i need to config somethings in the server and don't worry i won't do harm to it!i will just create some channels and roles and ask you things but for that to work you need to type ***?ticketconfig*** in any channel and i will give you instructions!""") join_message.set_footer( text="For more help just try to read this embed again or contact the developer!", icon_url=self.bot.user.avatar_url) join_message.set_author(name=self.bot.user) join_message.set_thumbnail(url=guild.icon_url) await member.send(embed=join_message) @commands.Cog.listener() async def on_message(self, message): if str(message.channel) == "ticket-request": if message.content != "?support": await message.delete() if message.content == "nigga" or message.content == "nigger" or message.content == "nigro": await message.delete() await message.channel.send("You can't say that!") def setup(bot): bot.add_cog(Events(bot))
55.849057
294
0.631419
import discord import time import asyncio from datetime import datetime import time from discord.ext import tasks, commands from tinydb import TinyDB, Query import re class Events(commands.Cog): def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_guild_join(self, guild): role = await guild.create_role(name="Muted", colour=discord.Colour.dark_grey()) for channel in guild.channels: await channel.set_permissions(role, send_messages = False) await asyncio.sleep(delay=5) for member in guild.members: if member.guild_permissions.administrator and member.id != self.bot.user.id: join_message = discord.Embed(title="__**Linsae!**__", description=f"**Hello, {member.mention}, This is me linsae and in order for me to work you need to do some configuration, sooo let's get started!**", colour=0x4298f4, timestamp=datetime.utcnow()) join_message.add_field(name="__Knowledge__", value=f"""**First of all, {member.mention} let me introduce my self: - My name as you know is Linsae and i'm glad to meet you. - My developer is Ɗrake#7418 and if you need any help with bots or something feel free to contact him! - My birthday is 6/25/2019.**""") join_message.add_field(name="__Configuration__", value=""" Alright so i'm a support bot that helps moderators and make their lifes easier, so what do i do ? .If a member needs help with something he can just type ***?support*** in a specific channel that i will menion later. .i have many moderator commands like ban, warn, kick, mute and more.... --> Now in order to do all that the i need to config somethings in the server and don't worry i won't do harm to it!i will just create some channels and roles and ask you things but for that to work you need to type ***?ticketconfig*** in any channel and i will give you instructions!""") join_message.set_footer( text="For more help just try to read this embed again or contact the developer!", icon_url=self.bot.user.avatar_url) join_message.set_author(name=self.bot.user) join_message.set_thumbnail(url=guild.icon_url) await member.send(embed=join_message) @commands.Cog.listener() async def on_message(self, message): if str(message.channel) == "ticket-request": if message.content != "?support": await message.delete() if message.content == "nigga" or message.content == "nigger" or message.content == "nigro": await message.delete() await message.channel.send("You can't say that!") def setup(bot): bot.add_cog(Events(bot))
true
true
f72c919a5fbacff307b79548546b94830a8d5ed5
26,995
py
Python
kivymd/uix/list.py
akaminetzkyp/KivyMD
940791ee1217e09184d8916c0eccc7534f097a48
[ "MIT" ]
1
2020-07-01T12:39:51.000Z
2020-07-01T12:39:51.000Z
kivymd/uix/list.py
ayo6706/KivyMD
c67850fd9f505d20a9e86ab89a39918daf34cd43
[ "MIT" ]
null
null
null
kivymd/uix/list.py
ayo6706/KivyMD
c67850fd9f505d20a9e86ab89a39918daf34cd43
[ "MIT" ]
null
null
null
""" Components/List =============== .. seealso:: `Material Design spec, Lists <https://material.io/components/lists>`_ .. rubric:: Lists are continuous, vertical indexes of text or images. .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/lists.png :align: center The class :class:`~MDList` in combination with a :class:`~BaseListItem` like :class:`~OneLineListItem` will create a list that expands as items are added to it, working nicely with `Kivy's` :class:`~kivy.uix.scrollview.ScrollView`. Due to the variety in sizes and controls in the `Material Design spec`, this module suffers from a certain level of complexity to keep the widgets compliant, flexible and performant. For this `KivyMD` provides list items that try to cover the most common usecases, when those are insufficient, there's a base class called :class:`~BaseListItem` which you can use to create your own list items. This documentation will only cover the provided ones, for custom implementations please refer to this module's source code. `KivyMD` provides the following list items classes for use: Text only ListItems ------------------- - OneLineListItem_ - TwoLineListItem_ - ThreeLineListItem_ ListItems with widget containers -------------------------------- These widgets will take other widgets that inherit from :class:`~ILeftBody`, :class:`ILeftBodyTouch`, :class:`~IRightBody` or :class:`~IRightBodyTouch` and put them in their corresponding container. As the name implies, :class:`~ILeftBody` and :class:`~IRightBody` will signal that the widget goes into the left or right container, respectively. :class:`~ILeftBodyTouch` and :class:`~IRightBodyTouch` do the same thing, except these widgets will also receive touch events that occur within their surfaces. `KivyMD` provides base classes such as :class:`~ImageLeftWidget`, :class:`~ImageRightWidget`, :class:`~IconRightWidget`, :class:`~IconLeftWidget`, based on the above classes. .. rubric:: Allows the use of items with custom widgets on the left. - OneLineAvatarListItem_ - TwoLineAvatarListItem_ - ThreeLineAvatarListItem_ - OneLineIconListItem_ - TwoLineIconListItem_ - ThreeLineIconListItem_ .. rubric:: It allows the use of elements with custom widgets on the left and the right. - OneLineAvatarIconListItem_ - TwoLineAvatarIconListItem_ - ThreeLineAvatarIconListItem_ Usage ----- .. code-block:: python from kivy.lang import Builder from kivymd.app import MDApp from kivymd.uix.list import OneLineListItem KV = ''' ScrollView: MDList: id: container ''' class Test(MDApp): def build(self): return Builder.load_string(KV) def on_start(self): for i in range(20): self.root.ids.container.add_widget( OneLineListItem(text=f"Single-line item {i}") ) Test().run() .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/lists.gif :align: center .. OneLineListItem: OneLineListItem --------------- .. code-block:: kv OneLineListItem: text: "Single-line item" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/OneLineListItem.png :align: center .. TwoLineListItem: TwoLineListItem --------------- .. code-block:: kv TwoLineListItem: text: "Two-line item" secondary_text: "Secondary text here" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/TwoLineListItem.png :align: center .. ThreeLineListItem: ThreeLineListItem ----------------- .. code-block:: kv ThreeLineListItem: text: "Three-line item" secondary_text: "This is a multi-line label where you can" tertiary_text: "fit more text than usual" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/ThreeLineListItem.png :align: center .. OneLineAvatarListItem: OneLineAvatarListItem --------------------- .. code-block:: kv OneLineAvatarListItem: text: "Single-line item with avatar" ImageLeftWidget: source: "data/logo/kivy-icon-256.png" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/lists-map.png :align: center .. TwoLineAvatarListItem: TwoLineAvatarListItem --------------------- .. code-block:: kv TwoLineAvatarListItem: text: "Two-line item with avatar" secondary_text: "Secondary text here" ImageLeftWidget: source: "data/logo/kivy-icon-256.png" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/TwoLineAvatarListItem.png :align: center .. ThreeLineAvatarListItem: ThreeLineAvatarListItem ----------------------- .. code-block:: kv ThreeLineAvatarListItem: text: "Three-line item with avatar" secondary_text: "Secondary text here" tertiary_text: "fit more text than usual" ImageLeftWidget: source: "data/logo/kivy-icon-256.png" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/ThreeLineAvatarListItem.png :align: center .. OneLineIconListItem: OneLineIconListItem ------------------- .. code-block:: kv OneLineAvatarListItem: text: "Single-line item with avatar" IconLeftWidget: icon: "language-python" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/OneLineIconListItem.png :align: center .. TwoLineIconListItem: TwoLineIconListItem ------------------- .. code-block:: kv TwoLineIconListItem: text: "Two-line item with avatar" secondary_text: "Secondary text here" IconLeftWidget: icon: "language-python" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/TwoLineIconListItem.png :align: center .. ThreeLineIconListItem: ThreeLineIconListItem --------------------- .. code-block:: kv ThreeLineIconListItem: text: "Three-line item with avatar" secondary_text: "Secondary text here" tertiary_text: "fit more text than usual" IconLeftWidget: icon: "language-python" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/ThreeLineIconListItem.png :align: center .. OneLineAvatarIconListItem: OneLineAvatarIconListItem ------------------------- .. code-block:: kv OneLineAvatarIconListItem: text: "One-line item with avatar" IconLeftWidget: icon: "plus" IconRightWidget: icon: "minus" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/OneLineAvatarIconListItem.png :align: center .. TwoLineAvatarIconListItem: TwoLineAvatarIconListItem ------------------------- .. code-block:: kv TwoLineAvatarIconListItem: text: "Two-line item with avatar" secondary_text: "Secondary text here" IconLeftWidget: icon: "plus" IconRightWidget: icon: "minus" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/TwoLineAvatarIconListItem.png :align: center .. ThreeLineAvatarIconListItem: ThreeLineAvatarIconListItem --------------------------- .. code-block:: kv ThreeLineAvatarIconListItem: text: "Three-line item with avatar" secondary_text: "Secondary text here" tertiary_text: "fit more text than usual" IconLeftWidget: icon: "plus" IconRightWidget: icon: "minus" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/ThreeLineAvatarIconListItem.png :align: center Custom list item ---------------- .. code-block:: python from kivy.lang import Builder from kivy.properties import StringProperty from kivymd.app import MDApp from kivymd.uix.list import IRightBodyTouch, OneLineAvatarIconListItem from kivymd.uix.selectioncontrol import MDCheckbox from kivymd.icon_definitions import md_icons KV = ''' <ListItemWithCheckbox>: IconLeftWidget: icon: root.icon RightCheckbox: BoxLayout: ScrollView: MDList: id: scroll ''' class ListItemWithCheckbox(OneLineAvatarIconListItem): '''Custom list item.''' icon = StringProperty("android") class RightCheckbox(IRightBodyTouch, MDCheckbox): '''Custom right container.''' class MainApp(MDApp): def build(self): return Builder.load_string(KV) def on_start(self): icons = list(md_icons.keys()) for i in range(30): self.root.ids.scroll.add_widget( ListItemWithCheckbox(text=f"Item {i}", icon=icons[i]) ) MainApp().run() .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/custom-list-item.png :align: center .. code-block:: python from kivy.lang import Builder from kivymd.app import MDApp from kivymd.uix.boxlayout import MDBoxLayout from kivymd.uix.list import IRightBodyTouch KV = ''' OneLineAvatarIconListItem: text: "One-line item with avatar" on_size: self.ids._right_container.width = container.width self.ids._right_container.x = container.width IconLeftWidget: icon: "settings" Container: id: container MDIconButton: icon: "minus" MDIconButton: icon: "plus" ''' class Container(IRightBodyTouch, MDBoxLayout): adaptive_width = True class MainApp(MDApp): def build(self): return Builder.load_string(KV) MainApp().run() .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/custom-list-right-container.png :align: center """ from kivy.lang import Builder from kivy.metrics import dp from kivy.properties import ( StringProperty, NumericProperty, ListProperty, OptionProperty, BooleanProperty, ) from kivy.uix.behaviors import ButtonBehavior from kivy.uix.floatlayout import FloatLayout from kivy.uix.image import Image import kivymd.material_resources as m_res from kivymd.uix.behaviors import RectangularRippleBehavior from kivymd.uix.button import MDIconButton from kivymd.theming import ThemableBehavior from kivymd.font_definitions import theme_font_styles from kivymd.uix.gridlayout import MDGridLayout from kivymd.uix.selectioncontrol import MDCheckbox Builder.load_string( """ #:import m_res kivymd.material_resources <MDList> cols: 1 adaptive_height: True padding: 0, self._list_vertical_padding <BaseListItem> size_hint_y: None canvas: Color: rgba: self.theme_cls.divider_color if root.divider is not None\ else (0, 0, 0, 0) Line: points: (root.x ,root.y, root.x+self.width, root.y)\ if root.divider == 'Full' else\ (root.x+root._txt_left_pad, root.y,\ root.x+self.width-root._txt_left_pad-root._txt_right_pad,\ root.y) Color: rgba: root.bg_color if root.bg_color else (0, 0, 0, 0) Rectangle: pos: self.pos size: self.size BoxLayout: id: _text_container orientation: 'vertical' pos: root.pos padding: root._txt_left_pad, root._txt_top_pad,\ root._txt_right_pad, root._txt_bot_pad MDLabel: id: _lbl_primary text: root.text font_style: root.font_style theme_text_color: root.theme_text_color text_color: root.text_color size_hint_y: None height: self.texture_size[1] markup: True shorten_from: 'right' shorten: True MDLabel: id: _lbl_secondary text: '' if root._num_lines == 1 else root.secondary_text font_style: root.secondary_font_style theme_text_color: root.secondary_theme_text_color text_color: root.secondary_text_color size_hint_y: None height: 0 if root._num_lines == 1 else self.texture_size[1] shorten: True shorten_from: 'right' markup: True MDLabel: id: _lbl_tertiary text: '' if root._num_lines == 1 else root.tertiary_text font_style: root.tertiary_font_style theme_text_color: root.tertiary_theme_text_color text_color: root.tertiary_text_color size_hint_y: None height: 0 if root._num_lines == 1 else self.texture_size[1] shorten: True shorten_from: 'right' markup: True <OneLineAvatarListItem> BoxLayout: id: _left_container size_hint: None, None x: root.x + dp(16) y: root.y + root.height/2 - self.height/2 size: dp(40), dp(40) <ThreeLineAvatarListItem> BoxLayout: id: _left_container size_hint: None, None x: root.x + dp(16) y: root.y + root.height - root._txt_top_pad - self.height - dp(5) size: dp(40), dp(40) <OneLineIconListItem> BoxLayout: id: _left_container size_hint: None, None x: root.x + dp(16) y: root.y + root.height/2 - self.height/2 size: dp(48), dp(48) <ThreeLineIconListItem> BoxLayout: id: _left_container size_hint: None, None x: root.x + dp(16) y: root.y + root.height - root._txt_top_pad - self.height - dp(5) size: dp(48), dp(48) <OneLineRightIconListItem> BoxLayout: id: _right_container size_hint: None, None x: root.x + root.width - m_res.HORIZ_MARGINS - self.width y: root.y + root.height/2 - self.height/2 size: dp(48), dp(48) <ThreeLineRightIconListItem> BoxLayout: id: _right_container size_hint: None, None x: root.x + root.width - m_res.HORIZ_MARGINS - self.width y: root.y + root.height/2 - self.height/2 size: dp(48), dp(48) <OneLineAvatarIconListItem> BoxLayout: id: _right_container size_hint: None, None x: root.x + root.width - m_res.HORIZ_MARGINS - self.width y: root.y + root.height/2 - self.height/2 size: dp(48), dp(48) <TwoLineAvatarIconListItem> BoxLayout: id: _right_container size_hint: None, None x: root.x + root.width - m_res.HORIZ_MARGINS - self.width y: root.y + root.height/2 - self.height/2 size: dp(48), dp(48) <ThreeLineAvatarIconListItem> BoxLayout: id: _right_container size_hint: None, None x: root.x + root.width - m_res.HORIZ_MARGINS - self.width y: root.y + root.height - root._txt_top_pad - self.height - dp(5) size: dp(48), dp(48) """ ) class MDList(MDGridLayout): """ListItem container. Best used in conjunction with a :class:`kivy.uix.ScrollView`. When adding (or removing) a widget, it will resize itself to fit its children, plus top and bottom paddings as described by the `MD` spec. """ _list_vertical_padding = NumericProperty("8dp") def add_widget(self, widget, index=0, canvas=None): super().add_widget(widget, index, canvas) self.height += widget.height def remove_widget(self, widget): super().remove_widget(widget) self.height -= widget.height class BaseListItem( ThemableBehavior, RectangularRippleBehavior, ButtonBehavior, FloatLayout ): """ Base class to all ListItems. Not supposed to be instantiated on its own. """ text = StringProperty() """ Text shown in the first line. :attr:`text` is a :class:`~kivy.properties.StringProperty` and defaults to `''`. """ text_color = ListProperty(None) """ Text color in ``rgba`` format used if :attr:`~theme_text_color` is set to `'Custom'`. :attr:`text_color` is a :class:`~kivy.properties.ListProperty` and defaults to `None`. """ font_style = OptionProperty("Subtitle1", options=theme_font_styles) """ Text font style. See ``kivymd.font_definitions.py``. :attr:`font_style` is a :class:`~kivy.properties.OptionProperty` and defaults to `'Subtitle1'`. """ theme_text_color = StringProperty("Primary", allownone=True) """ Theme text color in ``rgba`` format for primary text. :attr:`theme_text_color` is a :class:`~kivy.properties.StringProperty` and defaults to `'Primary'`. """ secondary_text = StringProperty() """ Text shown in the second line. :attr:`secondary_text` is a :class:`~kivy.properties.StringProperty` and defaults to `''`. """ tertiary_text = StringProperty() """ The text is displayed on the third line. :attr:`tertiary_text` is a :class:`~kivy.properties.StringProperty` and defaults to `''`. """ secondary_text_color = ListProperty(None) """ Text color in ``rgba`` format used for secondary text if :attr:`~secondary_theme_text_color` is set to `'Custom'`. :attr:`secondary_text_color` is a :class:`~kivy.properties.ListProperty` and defaults to `None`. """ tertiary_text_color = ListProperty(None) """ Text color in ``rgba`` format used for tertiary text if :attr:`~secondary_theme_text_color` is set to 'Custom'. :attr:`tertiary_text_color` is a :class:`~kivy.properties.ListProperty` and defaults to `None`. """ secondary_theme_text_color = StringProperty("Secondary", allownone=True) """ Theme text color for secondary text. :attr:`secondary_theme_text_color` is a :class:`~kivy.properties.StringProperty` and defaults to `'Secondary'`. """ tertiary_theme_text_color = StringProperty("Secondary", allownone=True) """ Theme text color for tertiary text. :attr:`tertiary_theme_text_color` is a :class:`~kivy.properties.StringProperty` and defaults to `'Secondary'`. """ secondary_font_style = OptionProperty("Body1", options=theme_font_styles) """ Font style for secondary line. See ``kivymd.font_definitions.py``. :attr:`secondary_font_style` is a :class:`~kivy.properties.OptionProperty` and defaults to `'Body1'`. """ tertiary_font_style = OptionProperty("Body1", options=theme_font_styles) """ Font style for tertiary line. See ``kivymd.font_definitions.py``. :attr:`tertiary_font_style` is a :class:`~kivy.properties.OptionProperty` and defaults to `'Body1'`. """ divider = OptionProperty( "Full", options=["Full", "Inset", None], allownone=True ) """ Divider mode. Available options are: `'Full'`, `'Inset'` and default to `'Full'`. :attr:`tertiary_font_style` is a :class:`~kivy.properties.OptionProperty` and defaults to `'Body1'`. """ bg_color = ListProperty() """ Background color for menu item. :attr:`bg_color` is a :class:`~kivy.properties.ListProperty` and defaults to `[]`. """ _txt_left_pad = NumericProperty("16dp") _txt_top_pad = NumericProperty() _txt_bot_pad = NumericProperty() _txt_right_pad = NumericProperty(m_res.HORIZ_MARGINS) _num_lines = 3 _no_ripple_effect = BooleanProperty(False) class ILeftBody: """ Pseudo-interface for widgets that go in the left container for ListItems that support it. Implements nothing and requires no implementation, for annotation only. """ pass class ILeftBodyTouch: """ Same as :class:`~ILeftBody`, but allows the widget to receive touch events instead of triggering the ListItem's ripple effect. """ pass class IRightBody: """ Pseudo-interface for widgets that go in the right container for ListItems that support it. Implements nothing and requires no implementation, for annotation only. """ pass class IRightBodyTouch: """ Same as :class:`~IRightBody`, but allows the widget to receive touch events instead of triggering the ``ListItem``'s ripple effect """ pass class ContainerSupport: """ Overrides ``add_widget`` in a ``ListItem`` to include support for ``I*Body`` widgets when the appropiate containers are present. """ _touchable_widgets = ListProperty() def add_widget(self, widget, index=0): if issubclass(widget.__class__, ILeftBody): self.ids._left_container.add_widget(widget) elif issubclass(widget.__class__, ILeftBodyTouch): self.ids._left_container.add_widget(widget) self._touchable_widgets.append(widget) elif issubclass(widget.__class__, IRightBody): self.ids._right_container.add_widget(widget) elif issubclass(widget.__class__, IRightBodyTouch): self.ids._right_container.add_widget(widget) self._touchable_widgets.append(widget) else: return super().add_widget(widget) def remove_widget(self, widget): super().remove_widget(widget) if widget in self._touchable_widgets: self._touchable_widgets.remove(widget) def on_touch_down(self, touch): if self.propagate_touch_to_touchable_widgets(touch, "down"): return super().on_touch_down(touch) def on_touch_move(self, touch, *args): if self.propagate_touch_to_touchable_widgets(touch, "move", *args): return super().on_touch_move(touch, *args) def on_touch_up(self, touch): if self.propagate_touch_to_touchable_widgets(touch, "up"): return super().on_touch_up(touch) def propagate_touch_to_touchable_widgets(self, touch, touch_event, *args): triggered = False for i in self._touchable_widgets: if i.collide_point(touch.x, touch.y): triggered = True if touch_event == "down": i.on_touch_down(touch) elif touch_event == "move": i.on_touch_move(touch, *args) elif touch_event == "up": i.on_touch_up(touch) return triggered class OneLineListItem(BaseListItem): """A one line list item.""" _txt_top_pad = NumericProperty("16dp") _txt_bot_pad = NumericProperty("15dp") # dp(20) - dp(5) _height = NumericProperty() _num_lines = 1 def __init__(self, **kwargs): super().__init__(**kwargs) self.height = dp(48) if not self._height else self._height class TwoLineListItem(BaseListItem): """A two line list item.""" _txt_top_pad = NumericProperty("20dp") _txt_bot_pad = NumericProperty("15dp") # dp(20) - dp(5) _height = NumericProperty() def __init__(self, **kwargs): super().__init__(**kwargs) self.height = dp(72) if not self._height else self._height class ThreeLineListItem(BaseListItem): """A three line list item.""" _txt_top_pad = NumericProperty("16dp") _txt_bot_pad = NumericProperty("15dp") # dp(20) - dp(5) _height = NumericProperty() _num_lines = 3 def __init__(self, **kwargs): super().__init__(**kwargs) self.height = dp(88) if not self._height else self._height class OneLineAvatarListItem(ContainerSupport, BaseListItem): _txt_left_pad = NumericProperty("72dp") _txt_top_pad = NumericProperty("20dp") _txt_bot_pad = NumericProperty("19dp") # dp(24) - dp(5) _height = NumericProperty() _num_lines = 1 def __init__(self, **kwargs): super().__init__(**kwargs) self.height = dp(56) if not self._height else self._height class TwoLineAvatarListItem(OneLineAvatarListItem): _txt_top_pad = NumericProperty("20dp") _txt_bot_pad = NumericProperty("15dp") # dp(20) - dp(5) _height = NumericProperty() _num_lines = 2 def __init__(self, **kwargs): super().__init__(**kwargs) self.height = dp(72) if not self._height else self._height class ThreeLineAvatarListItem(ContainerSupport, ThreeLineListItem): _txt_left_pad = NumericProperty("72dp") class OneLineIconListItem(ContainerSupport, OneLineListItem): _txt_left_pad = NumericProperty("72dp") class TwoLineIconListItem(OneLineIconListItem): _txt_top_pad = NumericProperty("20dp") _txt_bot_pad = NumericProperty("15dp") # dp(20) - dp(5) _height = NumericProperty() _num_lines = 2 def __init__(self, **kwargs): super().__init__(**kwargs) self.height = dp(72) if not self._height else self._height class ThreeLineIconListItem(ContainerSupport, ThreeLineListItem): _txt_left_pad = NumericProperty("72dp") class OneLineRightIconListItem(ContainerSupport, OneLineListItem): # dp(40) = dp(16) + dp(24): _txt_right_pad = NumericProperty("40dp") def __init__(self, **kwargs): super().__init__(**kwargs) self._txt_right_pad = dp(40) + m_res.HORIZ_MARGINS class TwoLineRightIconListItem(OneLineRightIconListItem): _txt_top_pad = NumericProperty("20dp") _txt_bot_pad = NumericProperty("15dp") # dp(20) - dp(5) _height = NumericProperty() _num_lines = 2 def __init__(self, **kwargs): super().__init__(**kwargs) self.height = dp(72) if not self._height else self._height class ThreeLineRightIconListItem(ContainerSupport, ThreeLineListItem): # dp(40) = dp(16) + dp(24): _txt_right_pad = NumericProperty("40dp") def __init__(self, **kwargs): super().__init__(**kwargs) self._txt_right_pad = dp(40) + m_res.HORIZ_MARGINS class OneLineAvatarIconListItem(OneLineAvatarListItem): # dp(40) = dp(16) + dp(24): _txt_right_pad = NumericProperty("40dp") def __init__(self, **kwargs): super().__init__(**kwargs) self._txt_right_pad = dp(40) + m_res.HORIZ_MARGINS class TwoLineAvatarIconListItem(TwoLineAvatarListItem): # dp(40) = dp(16) + dp(24): _txt_right_pad = NumericProperty("40dp") def __init__(self, **kwargs): super().__init__(**kwargs) self._txt_right_pad = dp(40) + m_res.HORIZ_MARGINS class ThreeLineAvatarIconListItem(ThreeLineAvatarListItem): # dp(40) = dp(16) + dp(24): _txt_right_pad = NumericProperty("40dp") def __init__(self, **kwargs): super().__init__(**kwargs) self._txt_right_pad = dp(40) + m_res.HORIZ_MARGINS class ImageLeftWidget(ILeftBody, Image): pass class ImageRightWidget(IRightBodyTouch, Image): pass class IconRightWidget(IRightBodyTouch, MDIconButton): pass class IconLeftWidget(ILeftBodyTouch, MDIconButton): pass class CheckboxLeftWidget(ILeftBodyTouch, MDCheckbox): pass
27.185297
113
0.655195
from kivy.lang import Builder from kivy.metrics import dp from kivy.properties import ( StringProperty, NumericProperty, ListProperty, OptionProperty, BooleanProperty, ) from kivy.uix.behaviors import ButtonBehavior from kivy.uix.floatlayout import FloatLayout from kivy.uix.image import Image import kivymd.material_resources as m_res from kivymd.uix.behaviors import RectangularRippleBehavior from kivymd.uix.button import MDIconButton from kivymd.theming import ThemableBehavior from kivymd.font_definitions import theme_font_styles from kivymd.uix.gridlayout import MDGridLayout from kivymd.uix.selectioncontrol import MDCheckbox Builder.load_string( """ #:import m_res kivymd.material_resources <MDList> cols: 1 adaptive_height: True padding: 0, self._list_vertical_padding <BaseListItem> size_hint_y: None canvas: Color: rgba: self.theme_cls.divider_color if root.divider is not None\ else (0, 0, 0, 0) Line: points: (root.x ,root.y, root.x+self.width, root.y)\ if root.divider == 'Full' else\ (root.x+root._txt_left_pad, root.y,\ root.x+self.width-root._txt_left_pad-root._txt_right_pad,\ root.y) Color: rgba: root.bg_color if root.bg_color else (0, 0, 0, 0) Rectangle: pos: self.pos size: self.size BoxLayout: id: _text_container orientation: 'vertical' pos: root.pos padding: root._txt_left_pad, root._txt_top_pad,\ root._txt_right_pad, root._txt_bot_pad MDLabel: id: _lbl_primary text: root.text font_style: root.font_style theme_text_color: root.theme_text_color text_color: root.text_color size_hint_y: None height: self.texture_size[1] markup: True shorten_from: 'right' shorten: True MDLabel: id: _lbl_secondary text: '' if root._num_lines == 1 else root.secondary_text font_style: root.secondary_font_style theme_text_color: root.secondary_theme_text_color text_color: root.secondary_text_color size_hint_y: None height: 0 if root._num_lines == 1 else self.texture_size[1] shorten: True shorten_from: 'right' markup: True MDLabel: id: _lbl_tertiary text: '' if root._num_lines == 1 else root.tertiary_text font_style: root.tertiary_font_style theme_text_color: root.tertiary_theme_text_color text_color: root.tertiary_text_color size_hint_y: None height: 0 if root._num_lines == 1 else self.texture_size[1] shorten: True shorten_from: 'right' markup: True <OneLineAvatarListItem> BoxLayout: id: _left_container size_hint: None, None x: root.x + dp(16) y: root.y + root.height/2 - self.height/2 size: dp(40), dp(40) <ThreeLineAvatarListItem> BoxLayout: id: _left_container size_hint: None, None x: root.x + dp(16) y: root.y + root.height - root._txt_top_pad - self.height - dp(5) size: dp(40), dp(40) <OneLineIconListItem> BoxLayout: id: _left_container size_hint: None, None x: root.x + dp(16) y: root.y + root.height/2 - self.height/2 size: dp(48), dp(48) <ThreeLineIconListItem> BoxLayout: id: _left_container size_hint: None, None x: root.x + dp(16) y: root.y + root.height - root._txt_top_pad - self.height - dp(5) size: dp(48), dp(48) <OneLineRightIconListItem> BoxLayout: id: _right_container size_hint: None, None x: root.x + root.width - m_res.HORIZ_MARGINS - self.width y: root.y + root.height/2 - self.height/2 size: dp(48), dp(48) <ThreeLineRightIconListItem> BoxLayout: id: _right_container size_hint: None, None x: root.x + root.width - m_res.HORIZ_MARGINS - self.width y: root.y + root.height/2 - self.height/2 size: dp(48), dp(48) <OneLineAvatarIconListItem> BoxLayout: id: _right_container size_hint: None, None x: root.x + root.width - m_res.HORIZ_MARGINS - self.width y: root.y + root.height/2 - self.height/2 size: dp(48), dp(48) <TwoLineAvatarIconListItem> BoxLayout: id: _right_container size_hint: None, None x: root.x + root.width - m_res.HORIZ_MARGINS - self.width y: root.y + root.height/2 - self.height/2 size: dp(48), dp(48) <ThreeLineAvatarIconListItem> BoxLayout: id: _right_container size_hint: None, None x: root.x + root.width - m_res.HORIZ_MARGINS - self.width y: root.y + root.height - root._txt_top_pad - self.height - dp(5) size: dp(48), dp(48) """ ) class MDList(MDGridLayout): _list_vertical_padding = NumericProperty("8dp") def add_widget(self, widget, index=0, canvas=None): super().add_widget(widget, index, canvas) self.height += widget.height def remove_widget(self, widget): super().remove_widget(widget) self.height -= widget.height class BaseListItem( ThemableBehavior, RectangularRippleBehavior, ButtonBehavior, FloatLayout ): text = StringProperty() text_color = ListProperty(None) font_style = OptionProperty("Subtitle1", options=theme_font_styles) theme_text_color = StringProperty("Primary", allownone=True) secondary_text = StringProperty() tertiary_text = StringProperty() secondary_text_color = ListProperty(None) tertiary_text_color = ListProperty(None) secondary_theme_text_color = StringProperty("Secondary", allownone=True) tertiary_theme_text_color = StringProperty("Secondary", allownone=True) secondary_font_style = OptionProperty("Body1", options=theme_font_styles) tertiary_font_style = OptionProperty("Body1", options=theme_font_styles) divider = OptionProperty( "Full", options=["Full", "Inset", None], allownone=True ) bg_color = ListProperty() _txt_left_pad = NumericProperty("16dp") _txt_top_pad = NumericProperty() _txt_bot_pad = NumericProperty() _txt_right_pad = NumericProperty(m_res.HORIZ_MARGINS) _num_lines = 3 _no_ripple_effect = BooleanProperty(False) class ILeftBody: pass class ILeftBodyTouch: pass class IRightBody: pass class IRightBodyTouch: pass class ContainerSupport: _touchable_widgets = ListProperty() def add_widget(self, widget, index=0): if issubclass(widget.__class__, ILeftBody): self.ids._left_container.add_widget(widget) elif issubclass(widget.__class__, ILeftBodyTouch): self.ids._left_container.add_widget(widget) self._touchable_widgets.append(widget) elif issubclass(widget.__class__, IRightBody): self.ids._right_container.add_widget(widget) elif issubclass(widget.__class__, IRightBodyTouch): self.ids._right_container.add_widget(widget) self._touchable_widgets.append(widget) else: return super().add_widget(widget) def remove_widget(self, widget): super().remove_widget(widget) if widget in self._touchable_widgets: self._touchable_widgets.remove(widget) def on_touch_down(self, touch): if self.propagate_touch_to_touchable_widgets(touch, "down"): return super().on_touch_down(touch) def on_touch_move(self, touch, *args): if self.propagate_touch_to_touchable_widgets(touch, "move", *args): return super().on_touch_move(touch, *args) def on_touch_up(self, touch): if self.propagate_touch_to_touchable_widgets(touch, "up"): return super().on_touch_up(touch) def propagate_touch_to_touchable_widgets(self, touch, touch_event, *args): triggered = False for i in self._touchable_widgets: if i.collide_point(touch.x, touch.y): triggered = True if touch_event == "down": i.on_touch_down(touch) elif touch_event == "move": i.on_touch_move(touch, *args) elif touch_event == "up": i.on_touch_up(touch) return triggered class OneLineListItem(BaseListItem): _txt_top_pad = NumericProperty("16dp") _txt_bot_pad = NumericProperty("15dp") _height = NumericProperty() _num_lines = 1 def __init__(self, **kwargs): super().__init__(**kwargs) self.height = dp(48) if not self._height else self._height class TwoLineListItem(BaseListItem): _txt_top_pad = NumericProperty("20dp") _txt_bot_pad = NumericProperty("15dp") _height = NumericProperty() def __init__(self, **kwargs): super().__init__(**kwargs) self.height = dp(72) if not self._height else self._height class ThreeLineListItem(BaseListItem): _txt_top_pad = NumericProperty("16dp") _txt_bot_pad = NumericProperty("15dp") _height = NumericProperty() _num_lines = 3 def __init__(self, **kwargs): super().__init__(**kwargs) self.height = dp(88) if not self._height else self._height class OneLineAvatarListItem(ContainerSupport, BaseListItem): _txt_left_pad = NumericProperty("72dp") _txt_top_pad = NumericProperty("20dp") _txt_bot_pad = NumericProperty("19dp") _height = NumericProperty() _num_lines = 1 def __init__(self, **kwargs): super().__init__(**kwargs) self.height = dp(56) if not self._height else self._height class TwoLineAvatarListItem(OneLineAvatarListItem): _txt_top_pad = NumericProperty("20dp") _txt_bot_pad = NumericProperty("15dp") _height = NumericProperty() _num_lines = 2 def __init__(self, **kwargs): super().__init__(**kwargs) self.height = dp(72) if not self._height else self._height class ThreeLineAvatarListItem(ContainerSupport, ThreeLineListItem): _txt_left_pad = NumericProperty("72dp") class OneLineIconListItem(ContainerSupport, OneLineListItem): _txt_left_pad = NumericProperty("72dp") class TwoLineIconListItem(OneLineIconListItem): _txt_top_pad = NumericProperty("20dp") _txt_bot_pad = NumericProperty("15dp") _height = NumericProperty() _num_lines = 2 def __init__(self, **kwargs): super().__init__(**kwargs) self.height = dp(72) if not self._height else self._height class ThreeLineIconListItem(ContainerSupport, ThreeLineListItem): _txt_left_pad = NumericProperty("72dp") class OneLineRightIconListItem(ContainerSupport, OneLineListItem): _txt_right_pad = NumericProperty("40dp") def __init__(self, **kwargs): super().__init__(**kwargs) self._txt_right_pad = dp(40) + m_res.HORIZ_MARGINS class TwoLineRightIconListItem(OneLineRightIconListItem): _txt_top_pad = NumericProperty("20dp") _txt_bot_pad = NumericProperty("15dp") _height = NumericProperty() _num_lines = 2 def __init__(self, **kwargs): super().__init__(**kwargs) self.height = dp(72) if not self._height else self._height class ThreeLineRightIconListItem(ContainerSupport, ThreeLineListItem): _txt_right_pad = NumericProperty("40dp") def __init__(self, **kwargs): super().__init__(**kwargs) self._txt_right_pad = dp(40) + m_res.HORIZ_MARGINS class OneLineAvatarIconListItem(OneLineAvatarListItem): _txt_right_pad = NumericProperty("40dp") def __init__(self, **kwargs): super().__init__(**kwargs) self._txt_right_pad = dp(40) + m_res.HORIZ_MARGINS class TwoLineAvatarIconListItem(TwoLineAvatarListItem): _txt_right_pad = NumericProperty("40dp") def __init__(self, **kwargs): super().__init__(**kwargs) self._txt_right_pad = dp(40) + m_res.HORIZ_MARGINS class ThreeLineAvatarIconListItem(ThreeLineAvatarListItem): _txt_right_pad = NumericProperty("40dp") def __init__(self, **kwargs): super().__init__(**kwargs) self._txt_right_pad = dp(40) + m_res.HORIZ_MARGINS class ImageLeftWidget(ILeftBody, Image): pass class ImageRightWidget(IRightBodyTouch, Image): pass class IconRightWidget(IRightBodyTouch, MDIconButton): pass class IconLeftWidget(ILeftBodyTouch, MDIconButton): pass class CheckboxLeftWidget(ILeftBodyTouch, MDCheckbox): pass
true
true
f72c928677b51e691762e5e54a0552edfcb7fb7d
4,361
py
Python
lab4/predict_income_romain_claret_and_sylvain_robert-nicoud_lab4.py
RomainClaret/msc.ml.labs
4e6b8e1c1ab841ab8ebbaee13f6ae43e9a1c44a5
[ "MIT" ]
null
null
null
lab4/predict_income_romain_claret_and_sylvain_robert-nicoud_lab4.py
RomainClaret/msc.ml.labs
4e6b8e1c1ab841ab8ebbaee13f6ae43e9a1c44a5
[ "MIT" ]
null
null
null
lab4/predict_income_romain_claret_and_sylvain_robert-nicoud_lab4.py
RomainClaret/msc.ml.labs
4e6b8e1c1ab841ab8ebbaee13f6ae43e9a1c44a5
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # 12.04.21 # Assignment lab 04 # Master Class: Machine Learning (5MI2018) # Faculty of Economic Science # University of Neuchatel (Switzerland) # Lab 4, see ML21_Exercise_4.pdf for more information # https://github.com/RomainClaret/msc.ml.labs # Authors: # - Romain Claret @RomainClaret # - Sylvain Robert-Nicoud @Nic0uds import warnings import pickle import pandas as pd from sklearn.preprocessing import LabelEncoder from sklearn.metrics import accuracy_score warnings.filterwarnings("ignore") # SPLITING ADULT.TEST FILE IN SUBFILES #spliting the adult.test file into several files to simulate weeks filename = 'adult.test' file_handler = open(filename, 'r').readlines()[1:] prefix_file = "adult_2021_cw_" week_number = 1 split_into = 10 line_count = 0 file_length = len(file_handler) for i in range(0,file_length): if i % ((file_length)//split_into) == 0 and i+((file_length//split_into)//2) < file_length: open(str(prefix_file)+str(week_number) + ".csv", "w+").writelines(file_handler[i:i+(file_length//split_into)]) week_number += 1 # RUN PIPELINE MODEL FROM OTHER FILE #input file, and save the predictions into a different file. #Example: #Let's say you have the input data weekly in the file adult_2021_cw_12.csv. #This second script should read the input from this file and use the classifier to make predictions and write those predictions in the file adult_2021_cw_12_pred.csv . # load pipeline model pipeline_model = pickle.load( open("grid_search_model.pickle", "rb" )) weeks_count = 10 filename = 'adult.test' prefix_file = "adult_2021_cw_" # get the features names and the values of the categories from adult.names (build a dictionary) data_dict = {} with open('adult.names') as f: for l in f: if l[0] == '|' or ':' not in l: continue c = l.split(':') if c[1].startswith(' continuous'): data_dict[c[0]] = "" else: data_dict[c[0]] = c[1].replace("\n","").replace(".","").replace(" ","").split(",") header = list(data_dict.keys())+['income'] # for each week based on a count and a naming convention for i in range (weeks_count): filename = str(prefix_file)+str(i+1)+".csv" df_weekly = pd.read_table(filename, sep=r',\s', na_values='?', skiprows=[0], header=None, names=header).dropna() drop_list = ["education", "occupation", "relationship"] df_weekly = df_weekly.drop(columns=drop_list) dict_replace = { 'marital-status' : { 'Never-married': 'Not-Married', 'Married-civ-spouse': 'Married', 'Divorced': 'Not-Married', 'Married-spouse-absent': 'Married', 'Separated': 'Married', 'Married-AF-spouse': 'Married', 'Widowed': 'Not-Married' }, 'workclass': { 'State-gov': 'Government', 'Self-emp-not-inc': 'Self-Employment', 'Federal-gov': 'Government', 'Local-gov': 'Government', 'Self-emp-inc': 'Self-Employment' } } df_weekly.replace(dict_replace, inplace=True) df_weekly["income"].replace({"<=50K.": "<=50K", ">50K.": ">50K"}, inplace=True) for l in ["marital-status", "sex", "income"]: l_enc = LabelEncoder() encoder_weekly = l_enc.fit(df_weekly[l]) df_weekly["encoded_"+l] = encoder_weekly.transform(df_weekly[l]) y_hat_dtree_weekly = pipeline_model.predict(df_weekly) pref_filename = str(prefix_file)+str(i+1)+"_pred.csv" print(pref_filename, "accuracy_score:",accuracy_score(df_weekly["encoded_income"],y_hat_dtree_weekly),"\n") # save the prediction into file pd.DataFrame(y_hat_dtree_weekly).to_csv(str(pref_filename),header=["pred_income"], index=None) # lab 03 results: # adult_2021_cw_1.csv accuracy_score: 0.8293736501079914 # adult_2021_cw_2.csv accuracy_score: 0.8503253796095445 # adult_2021_cw_3.csv accuracy_score: 0.8427807486631016 # adult_2021_cw_4.csv accuracy_score: 0.8307860262008734 # adult_2021_cw_5.csv accuracy_score: 0.8507462686567164 # adult_2021_cw_6.csv accuracy_score: 0.854978354978355 # adult_2021_cw_7.csv accuracy_score: 0.8545454545454545 # adult_2021_cw_8.csv accuracy_score: 0.8514531754574811 # adult_2021_cw_9.csv accuracy_score: 0.8296943231441049 # adult_2021_cw_10.csv accuracy_score: 0.8574537540805223
36.341667
167
0.687686
import warnings import pickle import pandas as pd from sklearn.preprocessing import LabelEncoder from sklearn.metrics import accuracy_score warnings.filterwarnings("ignore") filename = 'adult.test' file_handler = open(filename, 'r').readlines()[1:] prefix_file = "adult_2021_cw_" week_number = 1 split_into = 10 line_count = 0 file_length = len(file_handler) for i in range(0,file_length): if i % ((file_length)//split_into) == 0 and i+((file_length//split_into)//2) < file_length: open(str(prefix_file)+str(week_number) + ".csv", "w+").writelines(file_handler[i:i+(file_length//split_into)]) week_number += 1 #This second script should read the input from this file and use the classifier to make predictions and write those predictions in the file adult_2021_cw_12_pred.csv . # load pipeline model pipeline_model = pickle.load( open("grid_search_model.pickle", "rb" )) weeks_count = 10 filename = 'adult.test' prefix_file = "adult_2021_cw_" # get the features names and the values of the categories from adult.names (build a dictionary) data_dict = {} with open('adult.names') as f: for l in f: if l[0] == '|' or ':' not in l: continue c = l.split(':') if c[1].startswith(' continuous'): data_dict[c[0]] = "" else: data_dict[c[0]] = c[1].replace("\n","").replace(".","").replace(" ","").split(",") header = list(data_dict.keys())+['income'] # for each week based on a count and a naming convention for i in range (weeks_count): filename = str(prefix_file)+str(i+1)+".csv" df_weekly = pd.read_table(filename, sep=r',\s', na_values='?', skiprows=[0], header=None, names=header).dropna() drop_list = ["education", "occupation", "relationship"] df_weekly = df_weekly.drop(columns=drop_list) dict_replace = { 'marital-status' : { 'Never-married': 'Not-Married', 'Married-civ-spouse': 'Married', 'Divorced': 'Not-Married', 'Married-spouse-absent': 'Married', 'Separated': 'Married', 'Married-AF-spouse': 'Married', 'Widowed': 'Not-Married' }, 'workclass': { 'State-gov': 'Government', 'Self-emp-not-inc': 'Self-Employment', 'Federal-gov': 'Government', 'Local-gov': 'Government', 'Self-emp-inc': 'Self-Employment' } } df_weekly.replace(dict_replace, inplace=True) df_weekly["income"].replace({"<=50K.": "<=50K", ">50K.": ">50K"}, inplace=True) for l in ["marital-status", "sex", "income"]: l_enc = LabelEncoder() encoder_weekly = l_enc.fit(df_weekly[l]) df_weekly["encoded_"+l] = encoder_weekly.transform(df_weekly[l]) y_hat_dtree_weekly = pipeline_model.predict(df_weekly) pref_filename = str(prefix_file)+str(i+1)+"_pred.csv" print(pref_filename, "accuracy_score:",accuracy_score(df_weekly["encoded_income"],y_hat_dtree_weekly),"\n") # save the prediction into file pd.DataFrame(y_hat_dtree_weekly).to_csv(str(pref_filename),header=["pred_income"], index=None) # lab 03 results: # adult_2021_cw_1.csv accuracy_score: 0.8293736501079914 # adult_2021_cw_2.csv accuracy_score: 0.8503253796095445 # adult_2021_cw_3.csv accuracy_score: 0.8427807486631016 # adult_2021_cw_4.csv accuracy_score: 0.8307860262008734 # adult_2021_cw_5.csv accuracy_score: 0.8507462686567164 # adult_2021_cw_6.csv accuracy_score: 0.854978354978355 # adult_2021_cw_7.csv accuracy_score: 0.8545454545454545 # adult_2021_cw_8.csv accuracy_score: 0.8514531754574811 # adult_2021_cw_9.csv accuracy_score: 0.8296943231441049 # adult_2021_cw_10.csv accuracy_score: 0.8574537540805223
true
true
f72c93dc9d0c650ab8f3bacc646cd04dbfed3888
92
py
Python
app/admin/__init__.py
baz1nga/Work-Shift
77df03120c4bc512703f02a653a6bbc982b14857
[ "MIT" ]
null
null
null
app/admin/__init__.py
baz1nga/Work-Shift
77df03120c4bc512703f02a653a6bbc982b14857
[ "MIT" ]
null
null
null
app/admin/__init__.py
baz1nga/Work-Shift
77df03120c4bc512703f02a653a6bbc982b14857
[ "MIT" ]
null
null
null
from flask import Blueprint bp = Blueprint('admin', __name__) from app.admin import views
15.333333
33
0.771739
from flask import Blueprint bp = Blueprint('admin', __name__) from app.admin import views
true
true
f72c94f9f4bcc67b00da8e6ffb7d26d5bc04f527
1,108
py
Python
puzzle14/14a.py
muellerd/advent_of_code20
4d9619de165b584f406ef8a1b136d79355dfe3e1
[ "MIT" ]
null
null
null
puzzle14/14a.py
muellerd/advent_of_code20
4d9619de165b584f406ef8a1b136d79355dfe3e1
[ "MIT" ]
null
null
null
puzzle14/14a.py
muellerd/advent_of_code20
4d9619de165b584f406ef8a1b136d79355dfe3e1
[ "MIT" ]
null
null
null
rows = [] with open("C:\\Privat\\advent_of_code20\\puzzle14\\input1.txt") as f: for line in f: rows.append(line.strip()) #print(rows) memory = {} currentMask = "" for line in rows: split = line.split(' = ') if 'mask' in split[0]: currentMask = split[1].strip() else: # value in bit bit = format(int(split[1]), '036b') # bit through mask maskl = len(currentMask) bitl = len(bit) result = '' #print(bit) #print(currentMask) for i in range(0, len(bit)): maskBit = currentMask[i] bitBit = bit[i] if maskBit != 'X': result += maskBit else: result += bitBit #print(result) toWrite = int(result, 2) # replace in memory memoryPosition = split[0][4:-1] if not memoryPosition in memory: memory[memoryPosition] = 0 memory[memoryPosition] = toWrite #print(memory) sum = 0 for key in memory: sum += memory[key] print("Sum of all values in memory: " + str(sum))
21.307692
69
0.525271
rows = [] with open("C:\\Privat\\advent_of_code20\\puzzle14\\input1.txt") as f: for line in f: rows.append(line.strip()) memory = {} currentMask = "" for line in rows: split = line.split(' = ') if 'mask' in split[0]: currentMask = split[1].strip() else: bit = format(int(split[1]), '036b') maskl = len(currentMask) bitl = len(bit) result = '' for i in range(0, len(bit)): maskBit = currentMask[i] bitBit = bit[i] if maskBit != 'X': result += maskBit else: result += bitBit toWrite = int(result, 2) memoryPosition = split[0][4:-1] if not memoryPosition in memory: memory[memoryPosition] = 0 memory[memoryPosition] = toWrite sum = 0 for key in memory: sum += memory[key] print("Sum of all values in memory: " + str(sum))
true
true
f72c9587c2b7459c937e13b276ff7e0feb632297
3,314
py
Python
detect_image.py
YunYang1994/CodeFun
36fcdbfb4ed55fbb8f8dbc6f900842cc7bb9f068
[ "MIT" ]
150
2019-06-19T03:54:40.000Z
2019-10-21T07:09:02.000Z
detect_image.py
YunYang1994/cv-notebooks
36fcdbfb4ed55fbb8f8dbc6f900842cc7bb9f068
[ "MIT" ]
7
2019-11-26T07:27:42.000Z
2020-04-02T03:35:29.000Z
detect_image.py
YunYang1994/cv-notebooks
36fcdbfb4ed55fbb8f8dbc6f900842cc7bb9f068
[ "MIT" ]
25
2019-11-27T11:07:56.000Z
2020-03-19T15:44:20.000Z
#! /usr/bin/env python # coding=utf-8 #================================================================ # Copyright (C) 2020 * Ltd. All rights reserved. # # Editor : VIM # File name : detect_image.py # Author : YunYang1994 # Created date: 2020-03-19 14:05:53 # Description : # #================================================================ import os import cv2 import time import numpy as np import tensorflow as tf from PIL import Image, ImageFont, ImageDraw from mtcnn import pnet, rnet, onet from models import IResnet from utils import detect_face, align_face, recognize_face model = IResnet(tflite_model="IResnet.tflite") font = ImageFont.truetype('weghts/HuaWenXinWei-1.ttf', 30) image = cv2.imread("/Users/yangyun/多人照片/5.jpg") image_h, image_w, _ = image.shape org_image = image.copy() image = cv2.cvtColor(image ,cv2.COLOR_BGR2RGB) total_boxes, points = detect_face(image, 20, pnet, rnet, onet, [0.6, 0.7, 0.9], 0.709) for idx, (bounding_box, keypoints) in enumerate(zip(total_boxes, points.T)): bounding_boxes = { 'box': [int(bounding_box[0]), int(bounding_box[1]), int(bounding_box[2]-bounding_box[0]), int(bounding_box[3]-bounding_box[1])], 'confidence': bounding_box[-1], 'keypoints': { 'left_eye': (int(keypoints[0]), int(keypoints[5])), 'right_eye': (int(keypoints[1]), int(keypoints[6])), 'nose': (int(keypoints[2]), int(keypoints[7])), 'mouth_left': (int(keypoints[3]), int(keypoints[8])), 'mouth_right': (int(keypoints[4]), int(keypoints[9])), } } bounding_box = bounding_boxes['box'] keypoints = bounding_boxes['keypoints'] cv2.circle(org_image,(keypoints['left_eye']), 2, (255,0,0), 3) cv2.circle(org_image,(keypoints['right_eye']), 2, (255,0,0), 3) cv2.circle(org_image,(keypoints['nose']), 2, (255,0,0), 3) cv2.circle(org_image,(keypoints['mouth_left']), 2, (255,0,0), 3) cv2.circle(org_image,(keypoints['mouth_right']),2, (255,0,0), 3) cv2.rectangle(org_image, (bounding_box[0], bounding_box[1]), (bounding_box[0]+bounding_box[2], bounding_box[1] + bounding_box[3]), (0,255,0), 2) # align face and extract it out align_image = align_face(image, keypoints) marigin = 16 xmin = max(bounding_box[0] - marigin, 0) ymin = max(bounding_box[1] - marigin, 0) xmax = min(bounding_box[0] + bounding_box[2] + marigin, image_w) ymax = min(bounding_box[1] + bounding_box[3] + marigin, image_h) crop_image = align_image[ymin:ymax, xmin:xmax, :] if crop_image is not None: t1 = time.time() embedding = model(crop_image) person = recognize_face(embedding) org_image_pil = Image.fromarray(org_image) draw = ImageDraw.Draw(org_image_pil) text_size = draw.textsize(person, font) draw.text((bounding_box[0], bounding_box[1]-16), person, fill=(0, 0, 255), font=font) org_image = np.array(org_image_pil) t2 = time.time() print("time: %.2fms" %((t2-t1)*1000)) org_image = cv2.cvtColor(org_image, cv2.COLOR_BGR2RGB) image = Image.fromarray(org_image) image.show() # image.save("test.png")
36.822222
96
0.601992
import os import cv2 import time import numpy as np import tensorflow as tf from PIL import Image, ImageFont, ImageDraw from mtcnn import pnet, rnet, onet from models import IResnet from utils import detect_face, align_face, recognize_face model = IResnet(tflite_model="IResnet.tflite") font = ImageFont.truetype('weghts/HuaWenXinWei-1.ttf', 30) image = cv2.imread("/Users/yangyun/多人照片/5.jpg") image_h, image_w, _ = image.shape org_image = image.copy() image = cv2.cvtColor(image ,cv2.COLOR_BGR2RGB) total_boxes, points = detect_face(image, 20, pnet, rnet, onet, [0.6, 0.7, 0.9], 0.709) for idx, (bounding_box, keypoints) in enumerate(zip(total_boxes, points.T)): bounding_boxes = { 'box': [int(bounding_box[0]), int(bounding_box[1]), int(bounding_box[2]-bounding_box[0]), int(bounding_box[3]-bounding_box[1])], 'confidence': bounding_box[-1], 'keypoints': { 'left_eye': (int(keypoints[0]), int(keypoints[5])), 'right_eye': (int(keypoints[1]), int(keypoints[6])), 'nose': (int(keypoints[2]), int(keypoints[7])), 'mouth_left': (int(keypoints[3]), int(keypoints[8])), 'mouth_right': (int(keypoints[4]), int(keypoints[9])), } } bounding_box = bounding_boxes['box'] keypoints = bounding_boxes['keypoints'] cv2.circle(org_image,(keypoints['left_eye']), 2, (255,0,0), 3) cv2.circle(org_image,(keypoints['right_eye']), 2, (255,0,0), 3) cv2.circle(org_image,(keypoints['nose']), 2, (255,0,0), 3) cv2.circle(org_image,(keypoints['mouth_left']), 2, (255,0,0), 3) cv2.circle(org_image,(keypoints['mouth_right']),2, (255,0,0), 3) cv2.rectangle(org_image, (bounding_box[0], bounding_box[1]), (bounding_box[0]+bounding_box[2], bounding_box[1] + bounding_box[3]), (0,255,0), 2) align_image = align_face(image, keypoints) marigin = 16 xmin = max(bounding_box[0] - marigin, 0) ymin = max(bounding_box[1] - marigin, 0) xmax = min(bounding_box[0] + bounding_box[2] + marigin, image_w) ymax = min(bounding_box[1] + bounding_box[3] + marigin, image_h) crop_image = align_image[ymin:ymax, xmin:xmax, :] if crop_image is not None: t1 = time.time() embedding = model(crop_image) person = recognize_face(embedding) org_image_pil = Image.fromarray(org_image) draw = ImageDraw.Draw(org_image_pil) text_size = draw.textsize(person, font) draw.text((bounding_box[0], bounding_box[1]-16), person, fill=(0, 0, 255), font=font) org_image = np.array(org_image_pil) t2 = time.time() print("time: %.2fms" %((t2-t1)*1000)) org_image = cv2.cvtColor(org_image, cv2.COLOR_BGR2RGB) image = Image.fromarray(org_image) image.show()
true
true
f72c966881d67f6b446e37599487a4a5d041df9b
60,197
py
Python
heat/engine/resources/openstack/nova/server.py
maestro-hybrid-cloud/heat
91a4bb3170bd81b1c67a896706851e55709c9b5a
[ "Apache-2.0" ]
null
null
null
heat/engine/resources/openstack/nova/server.py
maestro-hybrid-cloud/heat
91a4bb3170bd81b1c67a896706851e55709c9b5a
[ "Apache-2.0" ]
null
null
null
heat/engine/resources/openstack/nova/server.py
maestro-hybrid-cloud/heat
91a4bb3170bd81b1c67a896706851e55709c9b5a
[ "Apache-2.0" ]
null
null
null
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import uuidutils import six from heat.common import exception from heat.common.i18n import _ from heat.engine import attributes from heat.engine.clients import progress from heat.engine import constraints from heat.engine import function from heat.engine import properties from heat.engine.resources.openstack.neutron import port as neutron_port from heat.engine.resources.openstack.neutron import subnet from heat.engine.resources.openstack.nova import server_network_mixin from heat.engine.resources import scheduler_hints as sh from heat.engine.resources import stack_user from heat.engine import support from heat.rpc import api as rpc_api cfg.CONF.import_opt('default_software_config_transport', 'heat.common.config') LOG = logging.getLogger(__name__) class Server(stack_user.StackUser, sh.SchedulerHintsMixin, server_network_mixin.ServerNetworkMixin): PROPERTIES = ( NAME, IMAGE, BLOCK_DEVICE_MAPPING, BLOCK_DEVICE_MAPPING_V2, FLAVOR, FLAVOR_UPDATE_POLICY, IMAGE_UPDATE_POLICY, KEY_NAME, ADMIN_USER, AVAILABILITY_ZONE, SECURITY_GROUPS, NETWORKS, SCHEDULER_HINTS, METADATA, USER_DATA_FORMAT, USER_DATA, RESERVATION_ID, CONFIG_DRIVE, DISK_CONFIG, PERSONALITY, ADMIN_PASS, SOFTWARE_CONFIG_TRANSPORT ) = ( 'name', 'image', 'block_device_mapping', 'block_device_mapping_v2', 'flavor', 'flavor_update_policy', 'image_update_policy', 'key_name', 'admin_user', 'availability_zone', 'security_groups', 'networks', 'scheduler_hints', 'metadata', 'user_data_format', 'user_data', 'reservation_id', 'config_drive', 'diskConfig', 'personality', 'admin_pass', 'software_config_transport' ) _BLOCK_DEVICE_MAPPING_KEYS = ( BLOCK_DEVICE_MAPPING_DEVICE_NAME, BLOCK_DEVICE_MAPPING_VOLUME_ID, BLOCK_DEVICE_MAPPING_SNAPSHOT_ID, BLOCK_DEVICE_MAPPING_VOLUME_SIZE, BLOCK_DEVICE_MAPPING_DELETE_ON_TERM, ) = ( 'device_name', 'volume_id', 'snapshot_id', 'volume_size', 'delete_on_termination', ) _BLOCK_DEVICE_MAPPING_V2_KEYS = ( BLOCK_DEVICE_MAPPING_DEVICE_NAME, BLOCK_DEVICE_MAPPING_VOLUME_ID, BLOCK_DEVICE_MAPPING_IMAGE_ID, BLOCK_DEVICE_MAPPING_SNAPSHOT_ID, BLOCK_DEVICE_MAPPING_SWAP_SIZE, BLOCK_DEVICE_MAPPING_DEVICE_TYPE, BLOCK_DEVICE_MAPPING_DISK_BUS, BLOCK_DEVICE_MAPPING_BOOT_INDEX, BLOCK_DEVICE_MAPPING_VOLUME_SIZE, BLOCK_DEVICE_MAPPING_DELETE_ON_TERM, ) = ( 'device_name', 'volume_id', 'image_id', 'snapshot_id', 'swap_size', 'device_type', 'disk_bus', 'boot_index', 'volume_size', 'delete_on_termination', ) _NETWORK_KEYS = ( NETWORK_UUID, NETWORK_ID, NETWORK_FIXED_IP, NETWORK_PORT, NETWORK_SUBNET, NETWORK_PORT_EXTRA ) = ( 'uuid', 'network', 'fixed_ip', 'port', 'subnet', 'port_extra_properties' ) _SOFTWARE_CONFIG_FORMATS = ( HEAT_CFNTOOLS, RAW, SOFTWARE_CONFIG ) = ( 'HEAT_CFNTOOLS', 'RAW', 'SOFTWARE_CONFIG' ) _SOFTWARE_CONFIG_TRANSPORTS = ( POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE ) = ( 'POLL_SERVER_CFN', 'POLL_SERVER_HEAT', 'POLL_TEMP_URL', 'ZAQAR_MESSAGE' ) ATTRIBUTES = ( NAME_ATTR, ADDRESSES, NETWORKS_ATTR, FIRST_ADDRESS, INSTANCE_NAME, ACCESSIPV4, ACCESSIPV6, CONSOLE_URLS, ) = ( 'name', 'addresses', 'networks', 'first_address', 'instance_name', 'accessIPv4', 'accessIPv6', 'console_urls', ) properties_schema = { NAME: properties.Schema( properties.Schema.STRING, _('Server name.'), update_allowed=True ), IMAGE: properties.Schema( properties.Schema.STRING, _('The ID or name of the image to boot with.'), constraints=[ constraints.CustomConstraint('glance.image') ], update_allowed=True ), BLOCK_DEVICE_MAPPING: properties.Schema( properties.Schema.LIST, _('Block device mappings for this server.'), schema=properties.Schema( properties.Schema.MAP, schema={ BLOCK_DEVICE_MAPPING_DEVICE_NAME: properties.Schema( properties.Schema.STRING, _('A device name where the volume will be ' 'attached in the system at /dev/device_name. ' 'This value is typically vda.'), required=True ), BLOCK_DEVICE_MAPPING_VOLUME_ID: properties.Schema( properties.Schema.STRING, _('The ID of the volume to boot from. Only one ' 'of volume_id or snapshot_id should be ' 'provided.'), constraints=[ constraints.CustomConstraint('cinder.volume') ] ), BLOCK_DEVICE_MAPPING_SNAPSHOT_ID: properties.Schema( properties.Schema.STRING, _('The ID of the snapshot to create a volume ' 'from.'), constraints=[ constraints.CustomConstraint('cinder.snapshot') ] ), BLOCK_DEVICE_MAPPING_VOLUME_SIZE: properties.Schema( properties.Schema.INTEGER, _('The size of the volume, in GB. It is safe to ' 'leave this blank and have the Compute service ' 'infer the size.') ), BLOCK_DEVICE_MAPPING_DELETE_ON_TERM: properties.Schema( properties.Schema.BOOLEAN, _('Indicate whether the volume should be deleted ' 'when the server is terminated.') ), }, ) ), BLOCK_DEVICE_MAPPING_V2: properties.Schema( properties.Schema.LIST, _('Block device mappings v2 for this server.'), schema=properties.Schema( properties.Schema.MAP, schema={ BLOCK_DEVICE_MAPPING_DEVICE_NAME: properties.Schema( properties.Schema.STRING, _('A device name where the volume will be ' 'attached in the system at /dev/device_name. ' 'This value is typically vda.'), ), BLOCK_DEVICE_MAPPING_VOLUME_ID: properties.Schema( properties.Schema.STRING, _('The volume_id can be boot or non-boot device ' 'to the server.'), constraints=[ constraints.CustomConstraint('cinder.volume') ] ), BLOCK_DEVICE_MAPPING_IMAGE_ID: properties.Schema( properties.Schema.STRING, _('The ID of the image to create a volume from.'), constraints=[ constraints.CustomConstraint('glance.image') ], ), BLOCK_DEVICE_MAPPING_SNAPSHOT_ID: properties.Schema( properties.Schema.STRING, _('The ID of the snapshot to create a volume ' 'from.'), constraints=[ constraints.CustomConstraint('cinder.snapshot') ] ), BLOCK_DEVICE_MAPPING_SWAP_SIZE: properties.Schema( properties.Schema.INTEGER, _('The size of the swap, in MB.') ), BLOCK_DEVICE_MAPPING_DEVICE_TYPE: properties.Schema( properties.Schema.STRING, _('Device type: at the moment we can make distinction' ' only between disk and cdrom.'), constraints=[ constraints.AllowedValues(['cdrom', 'disk']), ], ), BLOCK_DEVICE_MAPPING_DISK_BUS: properties.Schema( properties.Schema.STRING, _('Bus of the device: hypervisor driver chooses a ' 'suitable default if omitted.'), constraints=[ constraints.AllowedValues(['ide', 'lame_bus', 'scsi', 'usb', 'virtio']), ], ), BLOCK_DEVICE_MAPPING_BOOT_INDEX: properties.Schema( properties.Schema.INTEGER, _('Integer used for ordering the boot disks.'), ), BLOCK_DEVICE_MAPPING_VOLUME_SIZE: properties.Schema( properties.Schema.INTEGER, _('Size of the block device in GB. If it is omitted, ' 'hypervisor driver calculates size.'), ), BLOCK_DEVICE_MAPPING_DELETE_ON_TERM: properties.Schema( properties.Schema.BOOLEAN, _('Indicate whether the volume should be deleted ' 'when the server is terminated.') ), }, ), support_status=support.SupportStatus(version='2015.1') ), FLAVOR: properties.Schema( properties.Schema.STRING, _('The ID or name of the flavor to boot onto.'), required=True, update_allowed=True, constraints=[ constraints.CustomConstraint('nova.flavor') ] ), FLAVOR_UPDATE_POLICY: properties.Schema( properties.Schema.STRING, _('Policy on how to apply a flavor update; either by requesting ' 'a server resize or by replacing the entire server.'), default='RESIZE', constraints=[ constraints.AllowedValues(['RESIZE', 'REPLACE']), ], update_allowed=True ), IMAGE_UPDATE_POLICY: properties.Schema( properties.Schema.STRING, _('Policy on how to apply an image-id update; either by ' 'requesting a server rebuild or by replacing the entire server'), default='REBUILD', constraints=[ constraints.AllowedValues(['REBUILD', 'REPLACE', 'REBUILD_PRESERVE_EPHEMERAL']), ], update_allowed=True ), KEY_NAME: properties.Schema( properties.Schema.STRING, _('Name of keypair to inject into the server.'), constraints=[ constraints.CustomConstraint('nova.keypair') ] ), ADMIN_USER: properties.Schema( properties.Schema.STRING, _('Name of the administrative user to use on the server.'), support_status=support.SupportStatus( status=support.HIDDEN, version='5.0.0', message=_('The default cloud-init user set up for each image ' '(e.g. "ubuntu" for Ubuntu 12.04+, "fedora" for ' 'Fedora 19+ and "cloud-user" for CentOS/RHEL 6.5).'), previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.1', previous_status=support.SupportStatus(version='2013.2') ) ) ), AVAILABILITY_ZONE: properties.Schema( properties.Schema.STRING, _('Name of the availability zone for server placement.') ), SECURITY_GROUPS: properties.Schema( properties.Schema.LIST, _('List of security group names or IDs. Cannot be used if ' 'neutron ports are associated with this server; assign ' 'security groups to the ports instead.'), default=[] ), NETWORKS: properties.Schema( properties.Schema.LIST, _('An ordered list of nics to be added to this server, with ' 'information about connected networks, fixed ips, port etc.'), schema=properties.Schema( properties.Schema.MAP, schema={ NETWORK_UUID: properties.Schema( properties.Schema.STRING, _('ID of network to create a port on.'), support_status=support.SupportStatus( status=support.HIDDEN, version='5.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, message=_('Use property %s.') % NETWORK_ID, version='2014.1' ) ), constraints=[ constraints.CustomConstraint('neutron.network') ] ), NETWORK_ID: properties.Schema( properties.Schema.STRING, _('Name or ID of network to create a port on.'), constraints=[ constraints.CustomConstraint('neutron.network') ] ), NETWORK_FIXED_IP: properties.Schema( properties.Schema.STRING, _('Fixed IP address to specify for the port ' 'created on the requested network.'), constraints=[ constraints.CustomConstraint('ip_addr') ] ), NETWORK_PORT: properties.Schema( properties.Schema.STRING, _('ID of an existing port to associate with this ' 'server.'), constraints=[ constraints.CustomConstraint('neutron.port') ] ), NETWORK_PORT_EXTRA: properties.Schema( properties.Schema.MAP, _('Dict, which has expand properties for port. ' 'Used only if port property is not specified ' 'for creating port.'), schema=neutron_port.Port.extra_properties_schema, support_status=support.SupportStatus(version='6.0.0') ), NETWORK_SUBNET: properties.Schema( properties.Schema.STRING, _('Subnet in which to allocate the IP address for ' 'port. Used for creating port, based on derived ' 'properties. If subnet is specified, network ' 'property becomes optional.'), support_status=support.SupportStatus(version='5.0.0') ) }, ), update_allowed=True ), SCHEDULER_HINTS: properties.Schema( properties.Schema.MAP, _('Arbitrary key-value pairs specified by the client to help ' 'boot a server.') ), METADATA: properties.Schema( properties.Schema.MAP, _('Arbitrary key/value metadata to store for this server. Both ' 'keys and values must be 255 characters or less. Non-string ' 'values will be serialized to JSON (and the serialized ' 'string must be 255 characters or less).'), update_allowed=True ), USER_DATA_FORMAT: properties.Schema( properties.Schema.STRING, _('How the user_data should be formatted for the server. For ' 'HEAT_CFNTOOLS, the user_data is bundled as part of the ' 'heat-cfntools cloud-init boot configuration data. For RAW ' 'the user_data is passed to Nova unmodified. ' 'For SOFTWARE_CONFIG user_data is bundled as part of the ' 'software config data, and metadata is derived from any ' 'associated SoftwareDeployment resources.'), default=HEAT_CFNTOOLS, constraints=[ constraints.AllowedValues(_SOFTWARE_CONFIG_FORMATS), ] ), SOFTWARE_CONFIG_TRANSPORT: properties.Schema( properties.Schema.STRING, _('How the server should receive the metadata required for ' 'software configuration. POLL_SERVER_CFN will allow calls to ' 'the cfn API action DescribeStackResource authenticated with ' 'the provided keypair. POLL_SERVER_HEAT will allow calls to ' 'the Heat API resource-show using the provided keystone ' 'credentials. POLL_TEMP_URL will create and populate a ' 'Swift TempURL with metadata for polling. ZAQAR_MESSAGE will ' 'create a dedicated zaqar queue and post the metadata ' 'for polling.'), default=cfg.CONF.default_software_config_transport, constraints=[ constraints.AllowedValues(_SOFTWARE_CONFIG_TRANSPORTS), ] ), USER_DATA: properties.Schema( properties.Schema.STRING, _('User data script to be executed by cloud-init.'), default='' ), RESERVATION_ID: properties.Schema( properties.Schema.STRING, _('A UUID for the set of servers being requested.') ), CONFIG_DRIVE: properties.Schema( properties.Schema.BOOLEAN, _('If True, enable config drive on the server.') ), DISK_CONFIG: properties.Schema( properties.Schema.STRING, _('Control how the disk is partitioned when the server is ' 'created.'), constraints=[ constraints.AllowedValues(['AUTO', 'MANUAL']), ] ), PERSONALITY: properties.Schema( properties.Schema.MAP, _('A map of files to create/overwrite on the server upon boot. ' 'Keys are file names and values are the file contents.'), default={} ), ADMIN_PASS: properties.Schema( properties.Schema.STRING, _('The administrator password for the server.'), update_allowed=True ), } attributes_schema = { NAME_ATTR: attributes.Schema( _('Name of the server.'), type=attributes.Schema.STRING ), ADDRESSES: attributes.Schema( _('A dict of all network addresses with corresponding port_id. ' 'Each network will have two keys in dict, they are network ' 'name and network id. ' 'The port ID may be obtained through the following expression: ' '"{get_attr: [<server>, addresses, <network name_or_id>, 0, ' 'port]}".'), type=attributes.Schema.MAP ), NETWORKS_ATTR: attributes.Schema( _('A dict of assigned network addresses of the form: ' '{"public": [ip1, ip2...], "private": [ip3, ip4], ' '"public_uuid": [ip1, ip2...], "private_uuid": [ip3, ip4]}. ' 'Each network will have two keys in dict, they are network ' 'name and network id. '), type=attributes.Schema.MAP ), FIRST_ADDRESS: attributes.Schema( _('Convenience attribute to fetch the first assigned network ' 'address, or an empty string if nothing has been assigned at ' 'this time. Result may not be predictable if the server has ' 'addresses from more than one network.'), support_status=support.SupportStatus( status=support.HIDDEN, version='5.0.0', message=_('Use the networks attribute instead of ' 'first_address. For example: "{get_attr: ' '[<server name>, networks, <network name>, 0]}"'), previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.2', previous_status=support.SupportStatus(version='2013.2') ) ) ), INSTANCE_NAME: attributes.Schema( _('AWS compatible instance name.'), type=attributes.Schema.STRING ), ACCESSIPV4: attributes.Schema( _('The manually assigned alternative public IPv4 address ' 'of the server.'), type=attributes.Schema.STRING ), ACCESSIPV6: attributes.Schema( _('The manually assigned alternative public IPv6 address ' 'of the server.'), type=attributes.Schema.STRING ), CONSOLE_URLS: attributes.Schema( _("URLs of server's consoles. " "To get a specific console type, the requested type " "can be specified as parameter to the get_attr function, " "e.g. get_attr: [ <server>, console_urls, novnc ]. " "Currently supported types are " "novnc, xvpvnc, spice-html5, rdp-html5, serial."), support_status=support.SupportStatus(version='2015.1'), type=attributes.Schema.MAP ), } # Server host name limit to 53 characters by due to typical default # linux HOST_NAME_MAX of 64, minus the .novalocal appended to the name physical_resource_name_limit = 53 default_client_name = 'nova' entity = 'servers' def translation_rules(self): return [properties.TranslationRule( self.properties, properties.TranslationRule.REPLACE, source_path=[self.NETWORKS, self.NETWORK_ID], value_name=self.NETWORK_UUID)] def __init__(self, name, json_snippet, stack): super(Server, self).__init__(name, json_snippet, stack) if self.user_data_software_config(): self._register_access_key() def _server_name(self): name = self.properties[self.NAME] if name: return name return self.physical_resource_name() def _config_drive(self): # This method is overridden by the derived CloudServer resource return self.properties[self.CONFIG_DRIVE] def _populate_deployments_metadata(self, meta): meta['deployments'] = meta.get('deployments', []) meta['os-collect-config'] = meta.get('os-collect-config', {}) if self.transport_poll_server_heat(): meta['os-collect-config'].update({'heat': { 'user_id': self._get_user_id(), 'password': self.password, 'auth_url': self.context.auth_url, 'project_id': self.stack.stack_user_project_id, 'stack_id': self.stack.identifier().stack_path(), 'resource_name': self.name}}) if self.transport_zaqar_message(): queue_id = self.physical_resource_name() self.data_set('metadata_queue_id', queue_id) zaqar_plugin = self.client_plugin('zaqar') zaqar = zaqar_plugin.create_for_tenant( self.stack.stack_user_project_id) queue = zaqar.queue(queue_id) queue.post({'body': meta, 'ttl': zaqar_plugin.DEFAULT_TTL}) meta['os-collect-config'].update({'zaqar': { 'user_id': self._get_user_id(), 'password': self.password, 'auth_url': self.context.auth_url, 'project_id': self.stack.stack_user_project_id, 'queue_id': queue_id}}) elif self.transport_poll_server_cfn(): meta['os-collect-config'].update({'cfn': { 'metadata_url': '%s/v1/' % cfg.CONF.heat_metadata_server_url, 'access_key_id': self.access_key, 'secret_access_key': self.secret_key, 'stack_name': self.stack.name, 'path': '%s.Metadata' % self.name}}) elif self.transport_poll_temp_url(): container = self.physical_resource_name() object_name = str(uuid.uuid4()) self.client('swift').put_container(container) url = self.client_plugin('swift').get_temp_url( container, object_name, method='GET') put_url = self.client_plugin('swift').get_temp_url( container, object_name) self.data_set('metadata_put_url', put_url) self.data_set('metadata_object_name', object_name) meta['os-collect-config'].update({'request': { 'metadata_url': url}}) self.client('swift').put_object( container, object_name, jsonutils.dumps(meta)) self.metadata_set(meta) def _register_access_key(self): """Access is limited to this resource, which created the keypair.""" def access_allowed(resource_name): return resource_name == self.name if self.transport_poll_server_cfn(): self.stack.register_access_allowed_handler( self.access_key, access_allowed) elif self.transport_poll_server_heat(): self.stack.register_access_allowed_handler( self._get_user_id(), access_allowed) def _create_transport_credentials(self): if self.transport_poll_server_cfn(): self._create_user() self._create_keypair() elif (self.transport_poll_server_heat() or self.transport_zaqar_message()): self.password = uuid.uuid4().hex self._create_user() self._register_access_key() @property def access_key(self): return self.data().get('access_key') @property def secret_key(self): return self.data().get('secret_key') @property def password(self): return self.data().get('password') @password.setter def password(self, password): if password is None: self.data_delete('password') else: self.data_set('password', password, True) def user_data_raw(self): return self.properties[self.USER_DATA_FORMAT] == self.RAW def user_data_software_config(self): return self.properties[ self.USER_DATA_FORMAT] == self.SOFTWARE_CONFIG def transport_poll_server_cfn(self): return self.properties[ self.SOFTWARE_CONFIG_TRANSPORT] == self.POLL_SERVER_CFN def transport_poll_server_heat(self): return self.properties[ self.SOFTWARE_CONFIG_TRANSPORT] == self.POLL_SERVER_HEAT def transport_poll_temp_url(self): return self.properties[ self.SOFTWARE_CONFIG_TRANSPORT] == self.POLL_TEMP_URL def transport_zaqar_message(self): return self.properties.get( self.SOFTWARE_CONFIG_TRANSPORT) == self.ZAQAR_MESSAGE def get_software_config(self, ud_content): try: sc = self.rpc_client().show_software_config( self.context, ud_content) return sc[rpc_api.SOFTWARE_CONFIG_CONFIG] except Exception as ex: self.rpc_client().ignore_error_named(ex, 'NotFound') return ud_content def handle_create(self): security_groups = self.properties[self.SECURITY_GROUPS] user_data_format = self.properties[self.USER_DATA_FORMAT] ud_content = self.properties[self.USER_DATA] if self.user_data_software_config() or self.user_data_raw(): if uuidutils.is_uuid_like(ud_content): # attempt to load the userdata from software config ud_content = self.get_software_config(ud_content) metadata = self.metadata_get(True) or {} if self.user_data_software_config(): self._create_transport_credentials() self._populate_deployments_metadata(metadata) userdata = self.client_plugin().build_userdata( metadata, ud_content, instance_user=None, user_data_format=user_data_format) flavor = self.properties[self.FLAVOR] availability_zone = self.properties[self.AVAILABILITY_ZONE] image = self.properties[self.IMAGE] if image: image = self.client_plugin('glance').get_image_id(image) flavor_id = self.client_plugin().get_flavor_id(flavor) instance_meta = self.properties[self.METADATA] if instance_meta is not None: instance_meta = self.client_plugin().meta_serialize( instance_meta) scheduler_hints = self._scheduler_hints( self.properties[self.SCHEDULER_HINTS]) nics = self._build_nics(self.properties[self.NETWORKS]) block_device_mapping = self._build_block_device_mapping( self.properties[self.BLOCK_DEVICE_MAPPING]) block_device_mapping_v2 = self._build_block_device_mapping_v2( self.properties[self.BLOCK_DEVICE_MAPPING_V2]) reservation_id = self.properties[self.RESERVATION_ID] disk_config = self.properties[self.DISK_CONFIG] admin_pass = self.properties[self.ADMIN_PASS] or None personality_files = self.properties[self.PERSONALITY] key_name = self.properties[self.KEY_NAME] server = None try: server = self.client().servers.create( name=self._server_name(), image=image, flavor=flavor_id, key_name=key_name, security_groups=security_groups, userdata=userdata, meta=instance_meta, scheduler_hints=scheduler_hints, nics=nics, availability_zone=availability_zone, block_device_mapping=block_device_mapping, block_device_mapping_v2=block_device_mapping_v2, reservation_id=reservation_id, config_drive=self._config_drive(), disk_config=disk_config, files=personality_files, admin_pass=admin_pass) finally: # Avoid a race condition where the thread could be canceled # before the ID is stored if server is not None: self.resource_id_set(server.id) return server.id def check_create_complete(self, server_id): check = self.client_plugin()._check_active(server_id) if check: self.store_external_ports() return check def handle_check(self): server = self.client().servers.get(self.resource_id) status = self.client_plugin().get_status(server) checks = [{'attr': 'status', 'expected': 'ACTIVE', 'current': status}] self._verify_check_conditions(checks) @classmethod def _build_block_device_mapping(cls, bdm): if not bdm: return None bdm_dict = {} for mapping in bdm: mapping_parts = [] snapshot_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID) if snapshot_id: mapping_parts.append(snapshot_id) mapping_parts.append('snap') else: volume_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID) mapping_parts.append(volume_id) mapping_parts.append('') volume_size = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_SIZE) delete = mapping.get(cls.BLOCK_DEVICE_MAPPING_DELETE_ON_TERM) if volume_size: mapping_parts.append(str(volume_size)) else: mapping_parts.append('') if delete: mapping_parts.append(str(delete)) device_name = mapping.get(cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME) bdm_dict[device_name] = ':'.join(mapping_parts) return bdm_dict @classmethod def _build_block_device_mapping_v2(cls, bdm_v2): if not bdm_v2: return None bdm_v2_list = [] for mapping in bdm_v2: bmd_dict = None if mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID): bmd_dict = { 'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID), 'source_type': 'volume', 'destination_type': 'volume', 'boot_index': 0, 'delete_on_termination': False, } elif mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID): bmd_dict = { 'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID), 'source_type': 'snapshot', 'destination_type': 'volume', 'boot_index': 0, 'delete_on_termination': False, } elif mapping.get(cls.BLOCK_DEVICE_MAPPING_IMAGE_ID): bmd_dict = { 'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_IMAGE_ID), 'source_type': 'image', 'destination_type': 'volume', 'boot_index': 0, 'delete_on_termination': False, } elif mapping.get(cls.BLOCK_DEVICE_MAPPING_SWAP_SIZE): bmd_dict = { 'source_type': 'blank', 'destination_type': 'local', 'boot_index': -1, 'delete_on_termination': True, 'guest_format': 'swap', 'volume_size': mapping.get( cls.BLOCK_DEVICE_MAPPING_SWAP_SIZE), } # NOTE(prazumovsky): In case of server doesn't take empty value of # device name, need to escape from such situation. device_name = mapping.get(cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME) if device_name: bmd_dict[cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME] = device_name update_props = (cls.BLOCK_DEVICE_MAPPING_DEVICE_TYPE, cls.BLOCK_DEVICE_MAPPING_DISK_BUS, cls.BLOCK_DEVICE_MAPPING_BOOT_INDEX, cls.BLOCK_DEVICE_MAPPING_VOLUME_SIZE, cls.BLOCK_DEVICE_MAPPING_DELETE_ON_TERM) for update_prop in update_props: if mapping.get(update_prop) is not None: bmd_dict[update_prop] = mapping.get(update_prop) if bmd_dict: bdm_v2_list.append(bmd_dict) return bdm_v2_list def _add_port_for_address(self, server): """Method adds port id to list of addresses. This method is used only for resolving attributes. """ nets = copy.deepcopy(server.addresses) ifaces = server.interface_list() ip_mac_mapping_on_port_id = dict(((iface.fixed_ips[0]['ip_address'], iface.mac_addr), iface.port_id) for iface in ifaces) for net_name in nets: for addr in nets[net_name]: addr['port'] = ip_mac_mapping_on_port_id.get( (addr['addr'], addr['OS-EXT-IPS-MAC:mac_addr'])) return self._extend_networks(nets) def _extend_networks(self, networks): """Method adds same networks with replaced name on network id. This method is used only for resolving attributes. """ nets = copy.deepcopy(networks) for key in list(nets.keys()): try: net_id = self.client_plugin().get_net_id_by_label(key) except (exception.NovaNetworkNotFound, exception.PhysicalResourceNameAmbiguity): net_id = None if net_id: nets[net_id] = nets[key] return nets def _resolve_attribute(self, name): if name == self.FIRST_ADDRESS: return self.client_plugin().server_to_ipaddress( self.resource_id) or '' if name == self.NAME_ATTR: return self._server_name() try: server = self.client().servers.get(self.resource_id) except Exception as e: self.client_plugin().ignore_not_found(e) return '' if name == self.ADDRESSES: return self._add_port_for_address(server) if name == self.NETWORKS_ATTR: return self._extend_networks(server.networks) if name == self.INSTANCE_NAME: return getattr(server, 'OS-EXT-SRV-ATTR:instance_name', None) if name == self.ACCESSIPV4: return server.accessIPv4 if name == self.ACCESSIPV6: return server.accessIPv6 if name == self.CONSOLE_URLS: return self.client_plugin('nova').get_console_urls(server) def add_dependencies(self, deps): super(Server, self).add_dependencies(deps) # Depend on any Subnet in this template with the same # network_id as the networks attached to this server. # It is not known which subnet a server might be assigned # to so all subnets in a network should be created before # the servers in that network. nets = self.properties[self.NETWORKS] if not nets: return for res in six.itervalues(self.stack): if res.has_interface('OS::Neutron::Subnet'): subnet_net = (res.properties.get(subnet.Subnet.NETWORK_ID) or res.properties.get(subnet.Subnet.NETWORK)) for net in nets: # worry about network_id because that could be the match # assigned to the subnet as well and could have been # created by this stack. Regardless, the server should # still wait on the subnet. net_id = (net.get(self.NETWORK_ID) or net.get(self.NETWORK_UUID)) if net_id and net_id == subnet_net: deps += (self, res) break def _update_flavor(self, prop_diff): flavor = prop_diff[self.FLAVOR] flavor_id = self.client_plugin().get_flavor_id(flavor) handler_args = {'args': (flavor_id,)} checker_args = {'args': (flavor_id, flavor)} prg_resize = progress.ServerUpdateProgress(self.resource_id, 'resize', handler_extra=handler_args, checker_extra=checker_args) prg_verify = progress.ServerUpdateProgress(self.resource_id, 'verify_resize') return prg_resize, prg_verify def _update_image(self, prop_diff): image_update_policy = ( prop_diff.get(self.IMAGE_UPDATE_POLICY) or self.properties[self.IMAGE_UPDATE_POLICY]) image = prop_diff[self.IMAGE] image_id = self.client_plugin('glance').get_image_id(image) preserve_ephemeral = ( image_update_policy == 'REBUILD_PRESERVE_EPHEMERAL') password = (prop_diff.get(self.ADMIN_PASS) or self.properties[self.ADMIN_PASS]) kwargs = {'password': password, 'preserve_ephemeral': preserve_ephemeral} prg = progress.ServerUpdateProgress(self.resource_id, 'rebuild', handler_extra={'args': (image_id,), 'kwargs': kwargs}) return prg def _update_networks(self, server, prop_diff): updaters = [] new_networks = prop_diff.get(self.NETWORKS) old_networks = self.properties[self.NETWORKS] if not server: server = self.client().servers.get(self.resource_id) interfaces = server.interface_list() remove_ports, add_nets = self.calculate_networks( old_networks, new_networks, interfaces) for port in remove_ports: updaters.append( progress.ServerUpdateProgress( self.resource_id, 'interface_detach', complete=True, handler_extra={'args': (port,)}) ) for args in add_nets: updaters.append( progress.ServerUpdateProgress( self.resource_id, 'interface_attach', complete=True, handler_extra={'kwargs': args}) ) return updaters def _needs_update(self, after, before, after_props, before_props, prev_resource, check_init_complete=True): result = super(Server, self)._needs_update( after, before, after_props, before_props, prev_resource, check_init_complete=check_init_complete) prop_diff = self.update_template_diff_properties(after_props, before_props) if self.FLAVOR in prop_diff: flavor_update_policy = ( prop_diff.get(self.FLAVOR_UPDATE_POLICY) or self.properties[self.FLAVOR_UPDATE_POLICY]) if flavor_update_policy == 'REPLACE': raise exception.UpdateReplace(self.name) if self.IMAGE in prop_diff: image_update_policy = ( prop_diff.get(self.IMAGE_UPDATE_POLICY) or self.properties[self.IMAGE_UPDATE_POLICY]) if image_update_policy == 'REPLACE': raise exception.UpdateReplace(self.name) return result def handle_update(self, json_snippet, tmpl_diff, prop_diff): if 'Metadata' in tmpl_diff: # If SOFTWARE_CONFIG user_data_format is enabled we require # the "deployments" and "os-collect-config" keys for Deployment # polling. We can attempt to merge the occ data, but any # metadata update containing deployments will be discarded. if self.user_data_software_config(): metadata = self.metadata_get(True) or {} new_occ_md = tmpl_diff['Metadata'].get('os-collect-config', {}) occ_md = metadata.get('os-collect-config', {}) occ_md.update(new_occ_md) tmpl_diff['Metadata']['os-collect-config'] = occ_md deployment_md = metadata.get('deployments', []) tmpl_diff['Metadata']['deployments'] = deployment_md self.metadata_set(tmpl_diff['Metadata']) updaters = [] server = None if self.METADATA in prop_diff: server = self.client().servers.get(self.resource_id) self.client_plugin().meta_update(server, prop_diff[self.METADATA]) if self.FLAVOR in prop_diff: updaters.extend(self._update_flavor(prop_diff)) if self.IMAGE in prop_diff: updaters.append(self._update_image(prop_diff)) elif self.ADMIN_PASS in prop_diff: if not server: server = self.client().servers.get(self.resource_id) server.change_password(prop_diff[self.ADMIN_PASS]) if self.NAME in prop_diff: if not server: server = self.client().servers.get(self.resource_id) self.client_plugin().rename(server, prop_diff[self.NAME]) if self.NETWORKS in prop_diff: updaters.extend(self._update_networks(server, prop_diff)) # NOTE(pas-ha) optimization is possible (starting first task # right away), but we'd rather not, as this method already might # have called several APIs return updaters def check_update_complete(self, updaters): """Push all updaters to completion in list order.""" for prg in updaters: if not prg.called: handler = getattr(self.client_plugin(), prg.handler) prg.called = handler(*prg.handler_args, **prg.handler_kwargs) return False if not prg.complete: check_complete = getattr(self.client_plugin(), prg.checker) prg.complete = check_complete(*prg.checker_args, **prg.checker_kwargs) break status = all(prg.complete for prg in updaters) if status: self.store_external_ports() return status def metadata_update(self, new_metadata=None): """Refresh the metadata if new_metadata is None.""" if new_metadata is None: # Re-resolve the template metadata and merge it with the # current resource metadata. This is necessary because the # attributes referenced in the template metadata may change # and the resource itself adds keys to the metadata which # are not specified in the template (e.g the deployments data) meta = self.metadata_get(refresh=True) or {} tmpl_meta = self.t.metadata() meta.update(tmpl_meta) self.metadata_set(meta) @staticmethod def _check_maximum(count, maximum, msg): """Check a count against a maximum. Unless maximum is -1 which indicates that there is no limit. """ if maximum != -1 and count > maximum: raise exception.StackValidationFailed(message=msg) def _validate_block_device_mapping(self): # either volume_id or snapshot_id needs to be specified, but not both # for block device mapping. bdm = self.properties[self.BLOCK_DEVICE_MAPPING] or [] bootable_vol = False for mapping in bdm: device_name = mapping[self.BLOCK_DEVICE_MAPPING_DEVICE_NAME] if device_name == 'vda': bootable_vol = True volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID) snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID) if volume_id is not None and snapshot_id is not None: raise exception.ResourcePropertyConflict( self.BLOCK_DEVICE_MAPPING_VOLUME_ID, self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID) if volume_id is None and snapshot_id is None: msg = _('Either volume_id or snapshot_id must be specified for' ' device mapping %s') % device_name raise exception.StackValidationFailed(message=msg) bdm_v2 = self.properties[self.BLOCK_DEVICE_MAPPING_V2] or [] if bdm and bdm_v2: raise exception.ResourcePropertyConflict( self.BLOCK_DEVICE_MAPPING, self.BLOCK_DEVICE_MAPPING_V2) for mapping in bdm_v2: volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID) snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID) image_id = mapping.get(self.BLOCK_DEVICE_MAPPING_IMAGE_ID) swap_size = mapping.get(self.BLOCK_DEVICE_MAPPING_SWAP_SIZE) property_tuple = (volume_id, snapshot_id, image_id, swap_size) if property_tuple.count(None) < 3: raise exception.ResourcePropertyConflict( self.BLOCK_DEVICE_MAPPING_VOLUME_ID, self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID, self.BLOCK_DEVICE_MAPPING_IMAGE_ID, self.BLOCK_DEVICE_MAPPING_SWAP_SIZE) if property_tuple.count(None) == 4: msg = _('Either volume_id, snapshot_id, image_id or ' 'swap_size must be specified.') raise exception.StackValidationFailed(message=msg) if any((volume_id, snapshot_id, image_id)): bootable_vol = True return bootable_vol def validate(self): """Validate any of the provided params.""" super(Server, self).validate() if self.user_data_software_config(): if 'deployments' in self.t.metadata(): msg = _('deployments key not allowed in resource metadata ' 'with user_data_format of SOFTWARE_CONFIG') raise exception.StackValidationFailed(message=msg) bootable_vol = self._validate_block_device_mapping() # make sure the image exists if specified. image = self.properties[self.IMAGE] if not image and not bootable_vol: msg = _('Neither image nor bootable volume is specified for' ' instance %s') % self.name raise exception.StackValidationFailed(message=msg) # network properties 'uuid' and 'network' shouldn't be used # both at once for all networks networks = self.properties[self.NETWORKS] or [] # record if any networks include explicit ports networks_with_port = False for network in networks: networks_with_port = (networks_with_port or network.get(self.NETWORK_PORT)) self._validate_network(network) # retrieve provider's absolute limits if it will be needed metadata = self.properties[self.METADATA] personality = self.properties[self.PERSONALITY] if metadata is not None or personality: limits = self.client_plugin().absolute_limits() # if 'security_groups' present for the server and explict 'port' # in one or more entries in 'networks', raise validation error if networks_with_port and self.properties[self.SECURITY_GROUPS]: raise exception.ResourcePropertyConflict( self.SECURITY_GROUPS, "/".join([self.NETWORKS, self.NETWORK_PORT])) # verify that the number of metadata entries is not greater # than the maximum number allowed in the provider's absolute # limits if metadata is not None: msg = _('Instance metadata must not contain greater than %s ' 'entries. This is the maximum number allowed by your ' 'service provider') % limits['maxServerMeta'] self._check_maximum(len(metadata), limits['maxServerMeta'], msg) # verify the number of personality files and the size of each # personality file against the provider's absolute limits if personality: msg = _("The personality property may not contain " "greater than %s entries.") % limits['maxPersonality'] self._check_maximum(len(personality), limits['maxPersonality'], msg) for path, contents in personality.items(): msg = (_("The contents of personality file \"%(path)s\" " "is larger than the maximum allowed personality " "file size (%(max_size)s bytes).") % {'path': path, 'max_size': limits['maxPersonalitySize']}) self._check_maximum(len(bytes(contents.encode('utf-8'))), limits['maxPersonalitySize'], msg) def _delete_temp_url(self): object_name = self.data().get('metadata_object_name') if not object_name: return try: container = self.physical_resource_name() swift = self.client('swift') swift.delete_object(container, object_name) headers = swift.head_container(container) if int(headers['x-container-object-count']) == 0: swift.delete_container(container) except Exception as ex: self.client_plugin('swift').ignore_not_found(ex) def _delete_queue(self): queue_id = self.data().get('metadata_queue_id') if not queue_id: return client_plugin = self.client_plugin('zaqar') zaqar = client_plugin.create_for_tenant( self.stack.stack_user_project_id) try: zaqar.queue(queue_id).delete() except Exception as ex: client_plugin.ignore_not_found(ex) self.data_delete('metadata_queue_id') def handle_snapshot_delete(self, state): if state[0] != self.FAILED: image_id = self.client().servers.create_image( self.resource_id, self.physical_resource_name()) return progress.ServerDeleteProgress( self.resource_id, image_id, False) return self.handle_delete() def handle_delete(self): if self.resource_id is None: return if self.user_data_software_config(): self._delete_user() self._delete_temp_url() self._delete_queue() # remove internal and external ports self._delete_internal_ports() self.data_delete('external_ports') try: self.client().servers.delete(self.resource_id) except Exception as e: self.client_plugin().ignore_not_found(e) return return progress.ServerDeleteProgress(self.resource_id) def check_delete_complete(self, prg): if not prg: return True if not prg.image_complete: image = self.client().images.get(prg.image_id) if image.status in ('DELETED', 'ERROR'): raise exception.Error(image.status) elif image.status == 'ACTIVE': prg.image_complete = True if not self.handle_delete(): return True return False return self.client_plugin().check_delete_server_complete( prg.server_id) def handle_suspend(self): """Suspend a server. Note we do not wait for the SUSPENDED state, this is polled for by check_suspend_complete in a similar way to the create logic so we can take advantage of coroutines. """ if self.resource_id is None: raise exception.Error(_('Cannot suspend %s, resource_id not set') % self.name) try: server = self.client().servers.get(self.resource_id) except Exception as e: if self.client_plugin().is_not_found(e): raise exception.NotFound(_('Failed to find server %s') % self.resource_id) else: raise else: # if the server has been suspended successful, # no need to suspend again if self.client_plugin().get_status(server) != 'SUSPENDED': LOG.debug('suspending server %s' % self.resource_id) server.suspend() return server.id def check_suspend_complete(self, server_id): cp = self.client_plugin() server = cp.fetch_server(server_id) if not server: return False status = cp.get_status(server) LOG.debug('%(name)s check_suspend_complete status = %(status)s' % {'name': self.name, 'status': status}) if status in list(cp.deferred_server_statuses + ['ACTIVE']): return status == 'SUSPENDED' else: exc = exception.ResourceUnknownStatus( result=_('Suspend of server %s failed') % server.name, resource_status=status) raise exc def handle_resume(self): """Resume a server. Note we do not wait for the ACTIVE state, this is polled for by check_resume_complete in a similar way to the create logic so we can take advantage of coroutines. """ if self.resource_id is None: raise exception.Error(_('Cannot resume %s, resource_id not set') % self.name) try: server = self.client().servers.get(self.resource_id) except Exception as e: if self.client_plugin().is_not_found(e): raise exception.NotFound(_('Failed to find server %s') % self.resource_id) else: raise else: # if the server has been resumed successful, # no need to resume again if self.client_plugin().get_status(server) != 'ACTIVE': LOG.debug('resuming server %s' % self.resource_id) server.resume() return server.id def check_resume_complete(self, server_id): return self.client_plugin()._check_active(server_id) def handle_snapshot(self): image_id = self.client().servers.create_image( self.resource_id, self.physical_resource_name()) self.data_set('snapshot_image_id', image_id) return image_id def check_snapshot_complete(self, image_id): image = self.client().images.get(image_id) if image.status == 'ACTIVE': return True elif image.status == 'ERROR' or image.status == 'DELETED': raise exception.Error(image.status) return False def handle_delete_snapshot(self, snapshot): image_id = snapshot['resource_data'].get('snapshot_image_id') try: self.client().images.delete(image_id) except Exception as e: self.client_plugin().ignore_not_found(e) def handle_restore(self, defn, restore_data): image_id = restore_data['resource_data']['snapshot_image_id'] props = function.resolve(self.properties.data) props[self.IMAGE] = image_id return defn.freeze(properties=props) def prepare_for_replace(self): self.prepare_ports_for_replace() def restore_prev_rsrc(self, convergence=False): self.restore_ports_after_rollback(convergence=convergence) def resource_mapping(): return { 'OS::Nova::Server': Server, }
42.037011
79
0.570842
import copy import uuid from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import uuidutils import six from heat.common import exception from heat.common.i18n import _ from heat.engine import attributes from heat.engine.clients import progress from heat.engine import constraints from heat.engine import function from heat.engine import properties from heat.engine.resources.openstack.neutron import port as neutron_port from heat.engine.resources.openstack.neutron import subnet from heat.engine.resources.openstack.nova import server_network_mixin from heat.engine.resources import scheduler_hints as sh from heat.engine.resources import stack_user from heat.engine import support from heat.rpc import api as rpc_api cfg.CONF.import_opt('default_software_config_transport', 'heat.common.config') LOG = logging.getLogger(__name__) class Server(stack_user.StackUser, sh.SchedulerHintsMixin, server_network_mixin.ServerNetworkMixin): PROPERTIES = ( NAME, IMAGE, BLOCK_DEVICE_MAPPING, BLOCK_DEVICE_MAPPING_V2, FLAVOR, FLAVOR_UPDATE_POLICY, IMAGE_UPDATE_POLICY, KEY_NAME, ADMIN_USER, AVAILABILITY_ZONE, SECURITY_GROUPS, NETWORKS, SCHEDULER_HINTS, METADATA, USER_DATA_FORMAT, USER_DATA, RESERVATION_ID, CONFIG_DRIVE, DISK_CONFIG, PERSONALITY, ADMIN_PASS, SOFTWARE_CONFIG_TRANSPORT ) = ( 'name', 'image', 'block_device_mapping', 'block_device_mapping_v2', 'flavor', 'flavor_update_policy', 'image_update_policy', 'key_name', 'admin_user', 'availability_zone', 'security_groups', 'networks', 'scheduler_hints', 'metadata', 'user_data_format', 'user_data', 'reservation_id', 'config_drive', 'diskConfig', 'personality', 'admin_pass', 'software_config_transport' ) _BLOCK_DEVICE_MAPPING_KEYS = ( BLOCK_DEVICE_MAPPING_DEVICE_NAME, BLOCK_DEVICE_MAPPING_VOLUME_ID, BLOCK_DEVICE_MAPPING_SNAPSHOT_ID, BLOCK_DEVICE_MAPPING_VOLUME_SIZE, BLOCK_DEVICE_MAPPING_DELETE_ON_TERM, ) = ( 'device_name', 'volume_id', 'snapshot_id', 'volume_size', 'delete_on_termination', ) _BLOCK_DEVICE_MAPPING_V2_KEYS = ( BLOCK_DEVICE_MAPPING_DEVICE_NAME, BLOCK_DEVICE_MAPPING_VOLUME_ID, BLOCK_DEVICE_MAPPING_IMAGE_ID, BLOCK_DEVICE_MAPPING_SNAPSHOT_ID, BLOCK_DEVICE_MAPPING_SWAP_SIZE, BLOCK_DEVICE_MAPPING_DEVICE_TYPE, BLOCK_DEVICE_MAPPING_DISK_BUS, BLOCK_DEVICE_MAPPING_BOOT_INDEX, BLOCK_DEVICE_MAPPING_VOLUME_SIZE, BLOCK_DEVICE_MAPPING_DELETE_ON_TERM, ) = ( 'device_name', 'volume_id', 'image_id', 'snapshot_id', 'swap_size', 'device_type', 'disk_bus', 'boot_index', 'volume_size', 'delete_on_termination', ) _NETWORK_KEYS = ( NETWORK_UUID, NETWORK_ID, NETWORK_FIXED_IP, NETWORK_PORT, NETWORK_SUBNET, NETWORK_PORT_EXTRA ) = ( 'uuid', 'network', 'fixed_ip', 'port', 'subnet', 'port_extra_properties' ) _SOFTWARE_CONFIG_FORMATS = ( HEAT_CFNTOOLS, RAW, SOFTWARE_CONFIG ) = ( 'HEAT_CFNTOOLS', 'RAW', 'SOFTWARE_CONFIG' ) _SOFTWARE_CONFIG_TRANSPORTS = ( POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE ) = ( 'POLL_SERVER_CFN', 'POLL_SERVER_HEAT', 'POLL_TEMP_URL', 'ZAQAR_MESSAGE' ) ATTRIBUTES = ( NAME_ATTR, ADDRESSES, NETWORKS_ATTR, FIRST_ADDRESS, INSTANCE_NAME, ACCESSIPV4, ACCESSIPV6, CONSOLE_URLS, ) = ( 'name', 'addresses', 'networks', 'first_address', 'instance_name', 'accessIPv4', 'accessIPv6', 'console_urls', ) properties_schema = { NAME: properties.Schema( properties.Schema.STRING, _('Server name.'), update_allowed=True ), IMAGE: properties.Schema( properties.Schema.STRING, _('The ID or name of the image to boot with.'), constraints=[ constraints.CustomConstraint('glance.image') ], update_allowed=True ), BLOCK_DEVICE_MAPPING: properties.Schema( properties.Schema.LIST, _('Block device mappings for this server.'), schema=properties.Schema( properties.Schema.MAP, schema={ BLOCK_DEVICE_MAPPING_DEVICE_NAME: properties.Schema( properties.Schema.STRING, _('A device name where the volume will be ' 'attached in the system at /dev/device_name. ' 'This value is typically vda.'), required=True ), BLOCK_DEVICE_MAPPING_VOLUME_ID: properties.Schema( properties.Schema.STRING, _('The ID of the volume to boot from. Only one ' 'of volume_id or snapshot_id should be ' 'provided.'), constraints=[ constraints.CustomConstraint('cinder.volume') ] ), BLOCK_DEVICE_MAPPING_SNAPSHOT_ID: properties.Schema( properties.Schema.STRING, _('The ID of the snapshot to create a volume ' 'from.'), constraints=[ constraints.CustomConstraint('cinder.snapshot') ] ), BLOCK_DEVICE_MAPPING_VOLUME_SIZE: properties.Schema( properties.Schema.INTEGER, _('The size of the volume, in GB. It is safe to ' 'leave this blank and have the Compute service ' 'infer the size.') ), BLOCK_DEVICE_MAPPING_DELETE_ON_TERM: properties.Schema( properties.Schema.BOOLEAN, _('Indicate whether the volume should be deleted ' 'when the server is terminated.') ), }, ) ), BLOCK_DEVICE_MAPPING_V2: properties.Schema( properties.Schema.LIST, _('Block device mappings v2 for this server.'), schema=properties.Schema( properties.Schema.MAP, schema={ BLOCK_DEVICE_MAPPING_DEVICE_NAME: properties.Schema( properties.Schema.STRING, _('A device name where the volume will be ' 'attached in the system at /dev/device_name. ' 'This value is typically vda.'), ), BLOCK_DEVICE_MAPPING_VOLUME_ID: properties.Schema( properties.Schema.STRING, _('The volume_id can be boot or non-boot device ' 'to the server.'), constraints=[ constraints.CustomConstraint('cinder.volume') ] ), BLOCK_DEVICE_MAPPING_IMAGE_ID: properties.Schema( properties.Schema.STRING, _('The ID of the image to create a volume from.'), constraints=[ constraints.CustomConstraint('glance.image') ], ), BLOCK_DEVICE_MAPPING_SNAPSHOT_ID: properties.Schema( properties.Schema.STRING, _('The ID of the snapshot to create a volume ' 'from.'), constraints=[ constraints.CustomConstraint('cinder.snapshot') ] ), BLOCK_DEVICE_MAPPING_SWAP_SIZE: properties.Schema( properties.Schema.INTEGER, _('The size of the swap, in MB.') ), BLOCK_DEVICE_MAPPING_DEVICE_TYPE: properties.Schema( properties.Schema.STRING, _('Device type: at the moment we can make distinction' ' only between disk and cdrom.'), constraints=[ constraints.AllowedValues(['cdrom', 'disk']), ], ), BLOCK_DEVICE_MAPPING_DISK_BUS: properties.Schema( properties.Schema.STRING, _('Bus of the device: hypervisor driver chooses a ' 'suitable default if omitted.'), constraints=[ constraints.AllowedValues(['ide', 'lame_bus', 'scsi', 'usb', 'virtio']), ], ), BLOCK_DEVICE_MAPPING_BOOT_INDEX: properties.Schema( properties.Schema.INTEGER, _('Integer used for ordering the boot disks.'), ), BLOCK_DEVICE_MAPPING_VOLUME_SIZE: properties.Schema( properties.Schema.INTEGER, _('Size of the block device in GB. If it is omitted, ' 'hypervisor driver calculates size.'), ), BLOCK_DEVICE_MAPPING_DELETE_ON_TERM: properties.Schema( properties.Schema.BOOLEAN, _('Indicate whether the volume should be deleted ' 'when the server is terminated.') ), }, ), support_status=support.SupportStatus(version='2015.1') ), FLAVOR: properties.Schema( properties.Schema.STRING, _('The ID or name of the flavor to boot onto.'), required=True, update_allowed=True, constraints=[ constraints.CustomConstraint('nova.flavor') ] ), FLAVOR_UPDATE_POLICY: properties.Schema( properties.Schema.STRING, _('Policy on how to apply a flavor update; either by requesting ' 'a server resize or by replacing the entire server.'), default='RESIZE', constraints=[ constraints.AllowedValues(['RESIZE', 'REPLACE']), ], update_allowed=True ), IMAGE_UPDATE_POLICY: properties.Schema( properties.Schema.STRING, _('Policy on how to apply an image-id update; either by ' 'requesting a server rebuild or by replacing the entire server'), default='REBUILD', constraints=[ constraints.AllowedValues(['REBUILD', 'REPLACE', 'REBUILD_PRESERVE_EPHEMERAL']), ], update_allowed=True ), KEY_NAME: properties.Schema( properties.Schema.STRING, _('Name of keypair to inject into the server.'), constraints=[ constraints.CustomConstraint('nova.keypair') ] ), ADMIN_USER: properties.Schema( properties.Schema.STRING, _('Name of the administrative user to use on the server.'), support_status=support.SupportStatus( status=support.HIDDEN, version='5.0.0', message=_('The default cloud-init user set up for each image ' '(e.g. "ubuntu" for Ubuntu 12.04+, "fedora" for ' 'Fedora 19+ and "cloud-user" for CentOS/RHEL 6.5).'), previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.1', previous_status=support.SupportStatus(version='2013.2') ) ) ), AVAILABILITY_ZONE: properties.Schema( properties.Schema.STRING, _('Name of the availability zone for server placement.') ), SECURITY_GROUPS: properties.Schema( properties.Schema.LIST, _('List of security group names or IDs. Cannot be used if ' 'neutron ports are associated with this server; assign ' 'security groups to the ports instead.'), default=[] ), NETWORKS: properties.Schema( properties.Schema.LIST, _('An ordered list of nics to be added to this server, with ' 'information about connected networks, fixed ips, port etc.'), schema=properties.Schema( properties.Schema.MAP, schema={ NETWORK_UUID: properties.Schema( properties.Schema.STRING, _('ID of network to create a port on.'), support_status=support.SupportStatus( status=support.HIDDEN, version='5.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, message=_('Use property %s.') % NETWORK_ID, version='2014.1' ) ), constraints=[ constraints.CustomConstraint('neutron.network') ] ), NETWORK_ID: properties.Schema( properties.Schema.STRING, _('Name or ID of network to create a port on.'), constraints=[ constraints.CustomConstraint('neutron.network') ] ), NETWORK_FIXED_IP: properties.Schema( properties.Schema.STRING, _('Fixed IP address to specify for the port ' 'created on the requested network.'), constraints=[ constraints.CustomConstraint('ip_addr') ] ), NETWORK_PORT: properties.Schema( properties.Schema.STRING, _('ID of an existing port to associate with this ' 'server.'), constraints=[ constraints.CustomConstraint('neutron.port') ] ), NETWORK_PORT_EXTRA: properties.Schema( properties.Schema.MAP, _('Dict, which has expand properties for port. ' 'Used only if port property is not specified ' 'for creating port.'), schema=neutron_port.Port.extra_properties_schema, support_status=support.SupportStatus(version='6.0.0') ), NETWORK_SUBNET: properties.Schema( properties.Schema.STRING, _('Subnet in which to allocate the IP address for ' 'port. Used for creating port, based on derived ' 'properties. If subnet is specified, network ' 'property becomes optional.'), support_status=support.SupportStatus(version='5.0.0') ) }, ), update_allowed=True ), SCHEDULER_HINTS: properties.Schema( properties.Schema.MAP, _('Arbitrary key-value pairs specified by the client to help ' 'boot a server.') ), METADATA: properties.Schema( properties.Schema.MAP, _('Arbitrary key/value metadata to store for this server. Both ' 'keys and values must be 255 characters or less. Non-string ' 'values will be serialized to JSON (and the serialized ' 'string must be 255 characters or less).'), update_allowed=True ), USER_DATA_FORMAT: properties.Schema( properties.Schema.STRING, _('How the user_data should be formatted for the server. For ' 'HEAT_CFNTOOLS, the user_data is bundled as part of the ' 'heat-cfntools cloud-init boot configuration data. For RAW ' 'the user_data is passed to Nova unmodified. ' 'For SOFTWARE_CONFIG user_data is bundled as part of the ' 'software config data, and metadata is derived from any ' 'associated SoftwareDeployment resources.'), default=HEAT_CFNTOOLS, constraints=[ constraints.AllowedValues(_SOFTWARE_CONFIG_FORMATS), ] ), SOFTWARE_CONFIG_TRANSPORT: properties.Schema( properties.Schema.STRING, _('How the server should receive the metadata required for ' 'software configuration. POLL_SERVER_CFN will allow calls to ' 'the cfn API action DescribeStackResource authenticated with ' 'the provided keypair. POLL_SERVER_HEAT will allow calls to ' 'the Heat API resource-show using the provided keystone ' 'credentials. POLL_TEMP_URL will create and populate a ' 'Swift TempURL with metadata for polling. ZAQAR_MESSAGE will ' 'create a dedicated zaqar queue and post the metadata ' 'for polling.'), default=cfg.CONF.default_software_config_transport, constraints=[ constraints.AllowedValues(_SOFTWARE_CONFIG_TRANSPORTS), ] ), USER_DATA: properties.Schema( properties.Schema.STRING, _('User data script to be executed by cloud-init.'), default='' ), RESERVATION_ID: properties.Schema( properties.Schema.STRING, _('A UUID for the set of servers being requested.') ), CONFIG_DRIVE: properties.Schema( properties.Schema.BOOLEAN, _('If True, enable config drive on the server.') ), DISK_CONFIG: properties.Schema( properties.Schema.STRING, _('Control how the disk is partitioned when the server is ' 'created.'), constraints=[ constraints.AllowedValues(['AUTO', 'MANUAL']), ] ), PERSONALITY: properties.Schema( properties.Schema.MAP, _('A map of files to create/overwrite on the server upon boot. ' 'Keys are file names and values are the file contents.'), default={} ), ADMIN_PASS: properties.Schema( properties.Schema.STRING, _('The administrator password for the server.'), update_allowed=True ), } attributes_schema = { NAME_ATTR: attributes.Schema( _('Name of the server.'), type=attributes.Schema.STRING ), ADDRESSES: attributes.Schema( _('A dict of all network addresses with corresponding port_id. ' 'Each network will have two keys in dict, they are network ' 'name and network id. ' 'The port ID may be obtained through the following expression: ' '"{get_attr: [<server>, addresses, <network name_or_id>, 0, ' 'port]}".'), type=attributes.Schema.MAP ), NETWORKS_ATTR: attributes.Schema( _('A dict of assigned network addresses of the form: ' '{"public": [ip1, ip2...], "private": [ip3, ip4], ' '"public_uuid": [ip1, ip2...], "private_uuid": [ip3, ip4]}. ' 'Each network will have two keys in dict, they are network ' 'name and network id. '), type=attributes.Schema.MAP ), FIRST_ADDRESS: attributes.Schema( _('Convenience attribute to fetch the first assigned network ' 'address, or an empty string if nothing has been assigned at ' 'this time. Result may not be predictable if the server has ' 'addresses from more than one network.'), support_status=support.SupportStatus( status=support.HIDDEN, version='5.0.0', message=_('Use the networks attribute instead of ' 'first_address. For example: "{get_attr: ' '[<server name>, networks, <network name>, 0]}"'), previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.2', previous_status=support.SupportStatus(version='2013.2') ) ) ), INSTANCE_NAME: attributes.Schema( _('AWS compatible instance name.'), type=attributes.Schema.STRING ), ACCESSIPV4: attributes.Schema( _('The manually assigned alternative public IPv4 address ' 'of the server.'), type=attributes.Schema.STRING ), ACCESSIPV6: attributes.Schema( _('The manually assigned alternative public IPv6 address ' 'of the server.'), type=attributes.Schema.STRING ), CONSOLE_URLS: attributes.Schema( _("URLs of server's consoles. " "To get a specific console type, the requested type " "can be specified as parameter to the get_attr function, " "e.g. get_attr: [ <server>, console_urls, novnc ]. " "Currently supported types are " "novnc, xvpvnc, spice-html5, rdp-html5, serial."), support_status=support.SupportStatus(version='2015.1'), type=attributes.Schema.MAP ), } # Server host name limit to 53 characters by due to typical default # linux HOST_NAME_MAX of 64, minus the .novalocal appended to the name physical_resource_name_limit = 53 default_client_name = 'nova' entity = 'servers' def translation_rules(self): return [properties.TranslationRule( self.properties, properties.TranslationRule.REPLACE, source_path=[self.NETWORKS, self.NETWORK_ID], value_name=self.NETWORK_UUID)] def __init__(self, name, json_snippet, stack): super(Server, self).__init__(name, json_snippet, stack) if self.user_data_software_config(): self._register_access_key() def _server_name(self): name = self.properties[self.NAME] if name: return name return self.physical_resource_name() def _config_drive(self): # This method is overridden by the derived CloudServer resource return self.properties[self.CONFIG_DRIVE] def _populate_deployments_metadata(self, meta): meta['deployments'] = meta.get('deployments', []) meta['os-collect-config'] = meta.get('os-collect-config', {}) if self.transport_poll_server_heat(): meta['os-collect-config'].update({'heat': { 'user_id': self._get_user_id(), 'password': self.password, 'auth_url': self.context.auth_url, 'project_id': self.stack.stack_user_project_id, 'stack_id': self.stack.identifier().stack_path(), 'resource_name': self.name}}) if self.transport_zaqar_message(): queue_id = self.physical_resource_name() self.data_set('metadata_queue_id', queue_id) zaqar_plugin = self.client_plugin('zaqar') zaqar = zaqar_plugin.create_for_tenant( self.stack.stack_user_project_id) queue = zaqar.queue(queue_id) queue.post({'body': meta, 'ttl': zaqar_plugin.DEFAULT_TTL}) meta['os-collect-config'].update({'zaqar': { 'user_id': self._get_user_id(), 'password': self.password, 'auth_url': self.context.auth_url, 'project_id': self.stack.stack_user_project_id, 'queue_id': queue_id}}) elif self.transport_poll_server_cfn(): meta['os-collect-config'].update({'cfn': { 'metadata_url': '%s/v1/' % cfg.CONF.heat_metadata_server_url, 'access_key_id': self.access_key, 'secret_access_key': self.secret_key, 'stack_name': self.stack.name, 'path': '%s.Metadata' % self.name}}) elif self.transport_poll_temp_url(): container = self.physical_resource_name() object_name = str(uuid.uuid4()) self.client('swift').put_container(container) url = self.client_plugin('swift').get_temp_url( container, object_name, method='GET') put_url = self.client_plugin('swift').get_temp_url( container, object_name) self.data_set('metadata_put_url', put_url) self.data_set('metadata_object_name', object_name) meta['os-collect-config'].update({'request': { 'metadata_url': url}}) self.client('swift').put_object( container, object_name, jsonutils.dumps(meta)) self.metadata_set(meta) def _register_access_key(self): def access_allowed(resource_name): return resource_name == self.name if self.transport_poll_server_cfn(): self.stack.register_access_allowed_handler( self.access_key, access_allowed) elif self.transport_poll_server_heat(): self.stack.register_access_allowed_handler( self._get_user_id(), access_allowed) def _create_transport_credentials(self): if self.transport_poll_server_cfn(): self._create_user() self._create_keypair() elif (self.transport_poll_server_heat() or self.transport_zaqar_message()): self.password = uuid.uuid4().hex self._create_user() self._register_access_key() @property def access_key(self): return self.data().get('access_key') @property def secret_key(self): return self.data().get('secret_key') @property def password(self): return self.data().get('password') @password.setter def password(self, password): if password is None: self.data_delete('password') else: self.data_set('password', password, True) def user_data_raw(self): return self.properties[self.USER_DATA_FORMAT] == self.RAW def user_data_software_config(self): return self.properties[ self.USER_DATA_FORMAT] == self.SOFTWARE_CONFIG def transport_poll_server_cfn(self): return self.properties[ self.SOFTWARE_CONFIG_TRANSPORT] == self.POLL_SERVER_CFN def transport_poll_server_heat(self): return self.properties[ self.SOFTWARE_CONFIG_TRANSPORT] == self.POLL_SERVER_HEAT def transport_poll_temp_url(self): return self.properties[ self.SOFTWARE_CONFIG_TRANSPORT] == self.POLL_TEMP_URL def transport_zaqar_message(self): return self.properties.get( self.SOFTWARE_CONFIG_TRANSPORT) == self.ZAQAR_MESSAGE def get_software_config(self, ud_content): try: sc = self.rpc_client().show_software_config( self.context, ud_content) return sc[rpc_api.SOFTWARE_CONFIG_CONFIG] except Exception as ex: self.rpc_client().ignore_error_named(ex, 'NotFound') return ud_content def handle_create(self): security_groups = self.properties[self.SECURITY_GROUPS] user_data_format = self.properties[self.USER_DATA_FORMAT] ud_content = self.properties[self.USER_DATA] if self.user_data_software_config() or self.user_data_raw(): if uuidutils.is_uuid_like(ud_content): # attempt to load the userdata from software config ud_content = self.get_software_config(ud_content) metadata = self.metadata_get(True) or {} if self.user_data_software_config(): self._create_transport_credentials() self._populate_deployments_metadata(metadata) userdata = self.client_plugin().build_userdata( metadata, ud_content, instance_user=None, user_data_format=user_data_format) flavor = self.properties[self.FLAVOR] availability_zone = self.properties[self.AVAILABILITY_ZONE] image = self.properties[self.IMAGE] if image: image = self.client_plugin('glance').get_image_id(image) flavor_id = self.client_plugin().get_flavor_id(flavor) instance_meta = self.properties[self.METADATA] if instance_meta is not None: instance_meta = self.client_plugin().meta_serialize( instance_meta) scheduler_hints = self._scheduler_hints( self.properties[self.SCHEDULER_HINTS]) nics = self._build_nics(self.properties[self.NETWORKS]) block_device_mapping = self._build_block_device_mapping( self.properties[self.BLOCK_DEVICE_MAPPING]) block_device_mapping_v2 = self._build_block_device_mapping_v2( self.properties[self.BLOCK_DEVICE_MAPPING_V2]) reservation_id = self.properties[self.RESERVATION_ID] disk_config = self.properties[self.DISK_CONFIG] admin_pass = self.properties[self.ADMIN_PASS] or None personality_files = self.properties[self.PERSONALITY] key_name = self.properties[self.KEY_NAME] server = None try: server = self.client().servers.create( name=self._server_name(), image=image, flavor=flavor_id, key_name=key_name, security_groups=security_groups, userdata=userdata, meta=instance_meta, scheduler_hints=scheduler_hints, nics=nics, availability_zone=availability_zone, block_device_mapping=block_device_mapping, block_device_mapping_v2=block_device_mapping_v2, reservation_id=reservation_id, config_drive=self._config_drive(), disk_config=disk_config, files=personality_files, admin_pass=admin_pass) finally: # Avoid a race condition where the thread could be canceled # before the ID is stored if server is not None: self.resource_id_set(server.id) return server.id def check_create_complete(self, server_id): check = self.client_plugin()._check_active(server_id) if check: self.store_external_ports() return check def handle_check(self): server = self.client().servers.get(self.resource_id) status = self.client_plugin().get_status(server) checks = [{'attr': 'status', 'expected': 'ACTIVE', 'current': status}] self._verify_check_conditions(checks) @classmethod def _build_block_device_mapping(cls, bdm): if not bdm: return None bdm_dict = {} for mapping in bdm: mapping_parts = [] snapshot_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID) if snapshot_id: mapping_parts.append(snapshot_id) mapping_parts.append('snap') else: volume_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID) mapping_parts.append(volume_id) mapping_parts.append('') volume_size = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_SIZE) delete = mapping.get(cls.BLOCK_DEVICE_MAPPING_DELETE_ON_TERM) if volume_size: mapping_parts.append(str(volume_size)) else: mapping_parts.append('') if delete: mapping_parts.append(str(delete)) device_name = mapping.get(cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME) bdm_dict[device_name] = ':'.join(mapping_parts) return bdm_dict @classmethod def _build_block_device_mapping_v2(cls, bdm_v2): if not bdm_v2: return None bdm_v2_list = [] for mapping in bdm_v2: bmd_dict = None if mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID): bmd_dict = { 'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID), 'source_type': 'volume', 'destination_type': 'volume', 'boot_index': 0, 'delete_on_termination': False, } elif mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID): bmd_dict = { 'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID), 'source_type': 'snapshot', 'destination_type': 'volume', 'boot_index': 0, 'delete_on_termination': False, } elif mapping.get(cls.BLOCK_DEVICE_MAPPING_IMAGE_ID): bmd_dict = { 'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_IMAGE_ID), 'source_type': 'image', 'destination_type': 'volume', 'boot_index': 0, 'delete_on_termination': False, } elif mapping.get(cls.BLOCK_DEVICE_MAPPING_SWAP_SIZE): bmd_dict = { 'source_type': 'blank', 'destination_type': 'local', 'boot_index': -1, 'delete_on_termination': True, 'guest_format': 'swap', 'volume_size': mapping.get( cls.BLOCK_DEVICE_MAPPING_SWAP_SIZE), } # NOTE(prazumovsky): In case of server doesn't take empty value of device_name = mapping.get(cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME) if device_name: bmd_dict[cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME] = device_name update_props = (cls.BLOCK_DEVICE_MAPPING_DEVICE_TYPE, cls.BLOCK_DEVICE_MAPPING_DISK_BUS, cls.BLOCK_DEVICE_MAPPING_BOOT_INDEX, cls.BLOCK_DEVICE_MAPPING_VOLUME_SIZE, cls.BLOCK_DEVICE_MAPPING_DELETE_ON_TERM) for update_prop in update_props: if mapping.get(update_prop) is not None: bmd_dict[update_prop] = mapping.get(update_prop) if bmd_dict: bdm_v2_list.append(bmd_dict) return bdm_v2_list def _add_port_for_address(self, server): nets = copy.deepcopy(server.addresses) ifaces = server.interface_list() ip_mac_mapping_on_port_id = dict(((iface.fixed_ips[0]['ip_address'], iface.mac_addr), iface.port_id) for iface in ifaces) for net_name in nets: for addr in nets[net_name]: addr['port'] = ip_mac_mapping_on_port_id.get( (addr['addr'], addr['OS-EXT-IPS-MAC:mac_addr'])) return self._extend_networks(nets) def _extend_networks(self, networks): nets = copy.deepcopy(networks) for key in list(nets.keys()): try: net_id = self.client_plugin().get_net_id_by_label(key) except (exception.NovaNetworkNotFound, exception.PhysicalResourceNameAmbiguity): net_id = None if net_id: nets[net_id] = nets[key] return nets def _resolve_attribute(self, name): if name == self.FIRST_ADDRESS: return self.client_plugin().server_to_ipaddress( self.resource_id) or '' if name == self.NAME_ATTR: return self._server_name() try: server = self.client().servers.get(self.resource_id) except Exception as e: self.client_plugin().ignore_not_found(e) return '' if name == self.ADDRESSES: return self._add_port_for_address(server) if name == self.NETWORKS_ATTR: return self._extend_networks(server.networks) if name == self.INSTANCE_NAME: return getattr(server, 'OS-EXT-SRV-ATTR:instance_name', None) if name == self.ACCESSIPV4: return server.accessIPv4 if name == self.ACCESSIPV6: return server.accessIPv6 if name == self.CONSOLE_URLS: return self.client_plugin('nova').get_console_urls(server) def add_dependencies(self, deps): super(Server, self).add_dependencies(deps) nets = self.properties[self.NETWORKS] if not nets: return for res in six.itervalues(self.stack): if res.has_interface('OS::Neutron::Subnet'): subnet_net = (res.properties.get(subnet.Subnet.NETWORK_ID) or res.properties.get(subnet.Subnet.NETWORK)) for net in nets: net_id = (net.get(self.NETWORK_ID) or net.get(self.NETWORK_UUID)) if net_id and net_id == subnet_net: deps += (self, res) break def _update_flavor(self, prop_diff): flavor = prop_diff[self.FLAVOR] flavor_id = self.client_plugin().get_flavor_id(flavor) handler_args = {'args': (flavor_id,)} checker_args = {'args': (flavor_id, flavor)} prg_resize = progress.ServerUpdateProgress(self.resource_id, 'resize', handler_extra=handler_args, checker_extra=checker_args) prg_verify = progress.ServerUpdateProgress(self.resource_id, 'verify_resize') return prg_resize, prg_verify def _update_image(self, prop_diff): image_update_policy = ( prop_diff.get(self.IMAGE_UPDATE_POLICY) or self.properties[self.IMAGE_UPDATE_POLICY]) image = prop_diff[self.IMAGE] image_id = self.client_plugin('glance').get_image_id(image) preserve_ephemeral = ( image_update_policy == 'REBUILD_PRESERVE_EPHEMERAL') password = (prop_diff.get(self.ADMIN_PASS) or self.properties[self.ADMIN_PASS]) kwargs = {'password': password, 'preserve_ephemeral': preserve_ephemeral} prg = progress.ServerUpdateProgress(self.resource_id, 'rebuild', handler_extra={'args': (image_id,), 'kwargs': kwargs}) return prg def _update_networks(self, server, prop_diff): updaters = [] new_networks = prop_diff.get(self.NETWORKS) old_networks = self.properties[self.NETWORKS] if not server: server = self.client().servers.get(self.resource_id) interfaces = server.interface_list() remove_ports, add_nets = self.calculate_networks( old_networks, new_networks, interfaces) for port in remove_ports: updaters.append( progress.ServerUpdateProgress( self.resource_id, 'interface_detach', complete=True, handler_extra={'args': (port,)}) ) for args in add_nets: updaters.append( progress.ServerUpdateProgress( self.resource_id, 'interface_attach', complete=True, handler_extra={'kwargs': args}) ) return updaters def _needs_update(self, after, before, after_props, before_props, prev_resource, check_init_complete=True): result = super(Server, self)._needs_update( after, before, after_props, before_props, prev_resource, check_init_complete=check_init_complete) prop_diff = self.update_template_diff_properties(after_props, before_props) if self.FLAVOR in prop_diff: flavor_update_policy = ( prop_diff.get(self.FLAVOR_UPDATE_POLICY) or self.properties[self.FLAVOR_UPDATE_POLICY]) if flavor_update_policy == 'REPLACE': raise exception.UpdateReplace(self.name) if self.IMAGE in prop_diff: image_update_policy = ( prop_diff.get(self.IMAGE_UPDATE_POLICY) or self.properties[self.IMAGE_UPDATE_POLICY]) if image_update_policy == 'REPLACE': raise exception.UpdateReplace(self.name) return result def handle_update(self, json_snippet, tmpl_diff, prop_diff): if 'Metadata' in tmpl_diff: if self.user_data_software_config(): metadata = self.metadata_get(True) or {} new_occ_md = tmpl_diff['Metadata'].get('os-collect-config', {}) occ_md = metadata.get('os-collect-config', {}) occ_md.update(new_occ_md) tmpl_diff['Metadata']['os-collect-config'] = occ_md deployment_md = metadata.get('deployments', []) tmpl_diff['Metadata']['deployments'] = deployment_md self.metadata_set(tmpl_diff['Metadata']) updaters = [] server = None if self.METADATA in prop_diff: server = self.client().servers.get(self.resource_id) self.client_plugin().meta_update(server, prop_diff[self.METADATA]) if self.FLAVOR in prop_diff: updaters.extend(self._update_flavor(prop_diff)) if self.IMAGE in prop_diff: updaters.append(self._update_image(prop_diff)) elif self.ADMIN_PASS in prop_diff: if not server: server = self.client().servers.get(self.resource_id) server.change_password(prop_diff[self.ADMIN_PASS]) if self.NAME in prop_diff: if not server: server = self.client().servers.get(self.resource_id) self.client_plugin().rename(server, prop_diff[self.NAME]) if self.NETWORKS in prop_diff: updaters.extend(self._update_networks(server, prop_diff)) # have called several APIs return updaters def check_update_complete(self, updaters): for prg in updaters: if not prg.called: handler = getattr(self.client_plugin(), prg.handler) prg.called = handler(*prg.handler_args, **prg.handler_kwargs) return False if not prg.complete: check_complete = getattr(self.client_plugin(), prg.checker) prg.complete = check_complete(*prg.checker_args, **prg.checker_kwargs) break status = all(prg.complete for prg in updaters) if status: self.store_external_ports() return status def metadata_update(self, new_metadata=None): if new_metadata is None: # Re-resolve the template metadata and merge it with the # current resource metadata. This is necessary because the # attributes referenced in the template metadata may change # and the resource itself adds keys to the metadata which # are not specified in the template (e.g the deployments data) meta = self.metadata_get(refresh=True) or {} tmpl_meta = self.t.metadata() meta.update(tmpl_meta) self.metadata_set(meta) @staticmethod def _check_maximum(count, maximum, msg): if maximum != -1 and count > maximum: raise exception.StackValidationFailed(message=msg) def _validate_block_device_mapping(self): # either volume_id or snapshot_id needs to be specified, but not both # for block device mapping. bdm = self.properties[self.BLOCK_DEVICE_MAPPING] or [] bootable_vol = False for mapping in bdm: device_name = mapping[self.BLOCK_DEVICE_MAPPING_DEVICE_NAME] if device_name == 'vda': bootable_vol = True volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID) snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID) if volume_id is not None and snapshot_id is not None: raise exception.ResourcePropertyConflict( self.BLOCK_DEVICE_MAPPING_VOLUME_ID, self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID) if volume_id is None and snapshot_id is None: msg = _('Either volume_id or snapshot_id must be specified for' ' device mapping %s') % device_name raise exception.StackValidationFailed(message=msg) bdm_v2 = self.properties[self.BLOCK_DEVICE_MAPPING_V2] or [] if bdm and bdm_v2: raise exception.ResourcePropertyConflict( self.BLOCK_DEVICE_MAPPING, self.BLOCK_DEVICE_MAPPING_V2) for mapping in bdm_v2: volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID) snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID) image_id = mapping.get(self.BLOCK_DEVICE_MAPPING_IMAGE_ID) swap_size = mapping.get(self.BLOCK_DEVICE_MAPPING_SWAP_SIZE) property_tuple = (volume_id, snapshot_id, image_id, swap_size) if property_tuple.count(None) < 3: raise exception.ResourcePropertyConflict( self.BLOCK_DEVICE_MAPPING_VOLUME_ID, self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID, self.BLOCK_DEVICE_MAPPING_IMAGE_ID, self.BLOCK_DEVICE_MAPPING_SWAP_SIZE) if property_tuple.count(None) == 4: msg = _('Either volume_id, snapshot_id, image_id or ' 'swap_size must be specified.') raise exception.StackValidationFailed(message=msg) if any((volume_id, snapshot_id, image_id)): bootable_vol = True return bootable_vol def validate(self): super(Server, self).validate() if self.user_data_software_config(): if 'deployments' in self.t.metadata(): msg = _('deployments key not allowed in resource metadata ' 'with user_data_format of SOFTWARE_CONFIG') raise exception.StackValidationFailed(message=msg) bootable_vol = self._validate_block_device_mapping() # make sure the image exists if specified. image = self.properties[self.IMAGE] if not image and not bootable_vol: msg = _('Neither image nor bootable volume is specified for' ' instance %s') % self.name raise exception.StackValidationFailed(message=msg) # network properties 'uuid' and 'network' shouldn't be used networks = self.properties[self.NETWORKS] or [] networks_with_port = False for network in networks: networks_with_port = (networks_with_port or network.get(self.NETWORK_PORT)) self._validate_network(network) metadata = self.properties[self.METADATA] personality = self.properties[self.PERSONALITY] if metadata is not None or personality: limits = self.client_plugin().absolute_limits() # if 'security_groups' present for the server and explict 'port' # in one or more entries in 'networks', raise validation error if networks_with_port and self.properties[self.SECURITY_GROUPS]: raise exception.ResourcePropertyConflict( self.SECURITY_GROUPS, "/".join([self.NETWORKS, self.NETWORK_PORT])) # verify that the number of metadata entries is not greater # than the maximum number allowed in the provider's absolute if metadata is not None: msg = _('Instance metadata must not contain greater than %s ' 'entries. This is the maximum number allowed by your ' 'service provider') % limits['maxServerMeta'] self._check_maximum(len(metadata), limits['maxServerMeta'], msg) if personality: msg = _("The personality property may not contain " "greater than %s entries.") % limits['maxPersonality'] self._check_maximum(len(personality), limits['maxPersonality'], msg) for path, contents in personality.items(): msg = (_("The contents of personality file \"%(path)s\" " "is larger than the maximum allowed personality " "file size (%(max_size)s bytes).") % {'path': path, 'max_size': limits['maxPersonalitySize']}) self._check_maximum(len(bytes(contents.encode('utf-8'))), limits['maxPersonalitySize'], msg) def _delete_temp_url(self): object_name = self.data().get('metadata_object_name') if not object_name: return try: container = self.physical_resource_name() swift = self.client('swift') swift.delete_object(container, object_name) headers = swift.head_container(container) if int(headers['x-container-object-count']) == 0: swift.delete_container(container) except Exception as ex: self.client_plugin('swift').ignore_not_found(ex) def _delete_queue(self): queue_id = self.data().get('metadata_queue_id') if not queue_id: return client_plugin = self.client_plugin('zaqar') zaqar = client_plugin.create_for_tenant( self.stack.stack_user_project_id) try: zaqar.queue(queue_id).delete() except Exception as ex: client_plugin.ignore_not_found(ex) self.data_delete('metadata_queue_id') def handle_snapshot_delete(self, state): if state[0] != self.FAILED: image_id = self.client().servers.create_image( self.resource_id, self.physical_resource_name()) return progress.ServerDeleteProgress( self.resource_id, image_id, False) return self.handle_delete() def handle_delete(self): if self.resource_id is None: return if self.user_data_software_config(): self._delete_user() self._delete_temp_url() self._delete_queue() # remove internal and external ports self._delete_internal_ports() self.data_delete('external_ports') try: self.client().servers.delete(self.resource_id) except Exception as e: self.client_plugin().ignore_not_found(e) return return progress.ServerDeleteProgress(self.resource_id) def check_delete_complete(self, prg): if not prg: return True if not prg.image_complete: image = self.client().images.get(prg.image_id) if image.status in ('DELETED', 'ERROR'): raise exception.Error(image.status) elif image.status == 'ACTIVE': prg.image_complete = True if not self.handle_delete(): return True return False return self.client_plugin().check_delete_server_complete( prg.server_id) def handle_suspend(self): if self.resource_id is None: raise exception.Error(_('Cannot suspend %s, resource_id not set') % self.name) try: server = self.client().servers.get(self.resource_id) except Exception as e: if self.client_plugin().is_not_found(e): raise exception.NotFound(_('Failed to find server %s') % self.resource_id) else: raise else: # if the server has been suspended successful, # no need to suspend again if self.client_plugin().get_status(server) != 'SUSPENDED': LOG.debug('suspending server %s' % self.resource_id) server.suspend() return server.id def check_suspend_complete(self, server_id): cp = self.client_plugin() server = cp.fetch_server(server_id) if not server: return False status = cp.get_status(server) LOG.debug('%(name)s check_suspend_complete status = %(status)s' % {'name': self.name, 'status': status}) if status in list(cp.deferred_server_statuses + ['ACTIVE']): return status == 'SUSPENDED' else: exc = exception.ResourceUnknownStatus( result=_('Suspend of server %s failed') % server.name, resource_status=status) raise exc def handle_resume(self): if self.resource_id is None: raise exception.Error(_('Cannot resume %s, resource_id not set') % self.name) try: server = self.client().servers.get(self.resource_id) except Exception as e: if self.client_plugin().is_not_found(e): raise exception.NotFound(_('Failed to find server %s') % self.resource_id) else: raise else: # if the server has been resumed successful, # no need to resume again if self.client_plugin().get_status(server) != 'ACTIVE': LOG.debug('resuming server %s' % self.resource_id) server.resume() return server.id def check_resume_complete(self, server_id): return self.client_plugin()._check_active(server_id) def handle_snapshot(self): image_id = self.client().servers.create_image( self.resource_id, self.physical_resource_name()) self.data_set('snapshot_image_id', image_id) return image_id def check_snapshot_complete(self, image_id): image = self.client().images.get(image_id) if image.status == 'ACTIVE': return True elif image.status == 'ERROR' or image.status == 'DELETED': raise exception.Error(image.status) return False def handle_delete_snapshot(self, snapshot): image_id = snapshot['resource_data'].get('snapshot_image_id') try: self.client().images.delete(image_id) except Exception as e: self.client_plugin().ignore_not_found(e) def handle_restore(self, defn, restore_data): image_id = restore_data['resource_data']['snapshot_image_id'] props = function.resolve(self.properties.data) props[self.IMAGE] = image_id return defn.freeze(properties=props) def prepare_for_replace(self): self.prepare_ports_for_replace() def restore_prev_rsrc(self, convergence=False): self.restore_ports_after_rollback(convergence=convergence) def resource_mapping(): return { 'OS::Nova::Server': Server, }
true
true
f72c9842321b24921292819e0294421b24b2f549
3,305
py
Python
src/draw.py
lRomul/argus-bengali-ai
e64374230f5390a17305769126ff4bfc9a2a8644
[ "MIT" ]
2
2020-05-08T09:25:38.000Z
2020-10-04T16:15:29.000Z
src/draw.py
lRomul/argus-bengali-ai
e64374230f5390a17305769126ff4bfc9a2a8644
[ "MIT" ]
2
2022-01-13T03:19:24.000Z
2022-03-12T00:48:13.000Z
src/draw.py
lRomul/argus-bengali-ai
e64374230f5390a17305769126ff4bfc9a2a8644
[ "MIT" ]
null
null
null
import time import random import numpy as np from pathlib import Path from PIL import Image, ImageDraw, ImageFont, ImageFilter import torch from torch.utils.data import Dataset from src import config def draw_grapheme(grapheme, font_path, size=(137, 236)): height, width = size image = Image.new('RGB', (width, height)) draw = ImageDraw.Draw(image) font_size = np.random.randint(70, 110) font = ImageFont.truetype(str(font_path), font_size) w, h = draw.textsize(grapheme, font=font) width_ratio = np.random.uniform(1.5, 2.5) height_ratio = np.random.uniform(2.5, 3.5) fill = np.random.randint(200, 255) draw.text(((width - w) / width_ratio, (height - h) / height_ratio), grapheme, font=font, fill=fill) image = image.filter(ImageFilter.BLUR) return np.array(image)[:, :, 0] def get_draw_data(): graphemes = [] for grapheme_root_idx, grapheme_root in config.class_map['grapheme_root'].items(): for vowel_diacritic_idx, vowel_diacritic in config.class_map['vowel_diacritic'].items(): for consonant_diacritic_idx, consonant_diacritic in config.class_map['consonant_diacritic'].items(): consonant_diacritic, grapheme_root, vowel_diacritic = [c if c != '0' else '' for c in [consonant_diacritic, grapheme_root, vowel_diacritic]] grapheme = consonant_diacritic + grapheme_root + vowel_diacritic graphemes.append({ 'grapheme': grapheme, 'grapheme_root': grapheme_root_idx, 'vowel_diacritic': vowel_diacritic_idx, 'consonant_diacritic': consonant_diacritic_idx }) return graphemes class BengaliDrawDataset(Dataset): def __init__(self, fonts_dir, transform=None, mixer=None): self.fonts_dir = fonts_dir self.transform = transform self.mixer = mixer self.data = get_draw_data() self.font_paths = sorted(Path(fonts_dir).glob('*.ttf')) def __len__(self): return len(self.data) def get_sample(self, idx): sample = self.data[idx] font_path = np.random.choice(self.font_paths) image = draw_grapheme(sample['grapheme'], font_path, size=config.raw_image_shape) grapheme = torch.tensor(sample['grapheme_root'], dtype=torch.int64) vowel = torch.tensor(sample['vowel_diacritic'], dtype=torch.int64) consonant = torch.tensor(sample['consonant_diacritic'], dtype=torch.int64) target = grapheme, vowel, consonant return image, target def _set_random_seed(self, idx): seed = int(time.time() * 1000.0) + idx random.seed(seed) np.random.seed(seed % (2**32 - 1)) @torch.no_grad() def __getitem__(self, idx): self._set_random_seed(idx) image, target = self.get_sample(idx) if self.mixer is not None: image, target = self.mixer(self, image, target) if self.transform is not None: image = self.transform(image) return image, target
36.318681
112
0.607867
import time import random import numpy as np from pathlib import Path from PIL import Image, ImageDraw, ImageFont, ImageFilter import torch from torch.utils.data import Dataset from src import config def draw_grapheme(grapheme, font_path, size=(137, 236)): height, width = size image = Image.new('RGB', (width, height)) draw = ImageDraw.Draw(image) font_size = np.random.randint(70, 110) font = ImageFont.truetype(str(font_path), font_size) w, h = draw.textsize(grapheme, font=font) width_ratio = np.random.uniform(1.5, 2.5) height_ratio = np.random.uniform(2.5, 3.5) fill = np.random.randint(200, 255) draw.text(((width - w) / width_ratio, (height - h) / height_ratio), grapheme, font=font, fill=fill) image = image.filter(ImageFilter.BLUR) return np.array(image)[:, :, 0] def get_draw_data(): graphemes = [] for grapheme_root_idx, grapheme_root in config.class_map['grapheme_root'].items(): for vowel_diacritic_idx, vowel_diacritic in config.class_map['vowel_diacritic'].items(): for consonant_diacritic_idx, consonant_diacritic in config.class_map['consonant_diacritic'].items(): consonant_diacritic, grapheme_root, vowel_diacritic = [c if c != '0' else '' for c in [consonant_diacritic, grapheme_root, vowel_diacritic]] grapheme = consonant_diacritic + grapheme_root + vowel_diacritic graphemes.append({ 'grapheme': grapheme, 'grapheme_root': grapheme_root_idx, 'vowel_diacritic': vowel_diacritic_idx, 'consonant_diacritic': consonant_diacritic_idx }) return graphemes class BengaliDrawDataset(Dataset): def __init__(self, fonts_dir, transform=None, mixer=None): self.fonts_dir = fonts_dir self.transform = transform self.mixer = mixer self.data = get_draw_data() self.font_paths = sorted(Path(fonts_dir).glob('*.ttf')) def __len__(self): return len(self.data) def get_sample(self, idx): sample = self.data[idx] font_path = np.random.choice(self.font_paths) image = draw_grapheme(sample['grapheme'], font_path, size=config.raw_image_shape) grapheme = torch.tensor(sample['grapheme_root'], dtype=torch.int64) vowel = torch.tensor(sample['vowel_diacritic'], dtype=torch.int64) consonant = torch.tensor(sample['consonant_diacritic'], dtype=torch.int64) target = grapheme, vowel, consonant return image, target def _set_random_seed(self, idx): seed = int(time.time() * 1000.0) + idx random.seed(seed) np.random.seed(seed % (2**32 - 1)) @torch.no_grad() def __getitem__(self, idx): self._set_random_seed(idx) image, target = self.get_sample(idx) if self.mixer is not None: image, target = self.mixer(self, image, target) if self.transform is not None: image = self.transform(image) return image, target
true
true
f72c98f625fd6ff9df578e247df919138a312028
1,260
py
Python
selectionsort.py
maxProgrammer/Entendendo_Algoritmos
8bc6ef9b7869150ef624333490b68d94b197cb75
[ "MIT" ]
null
null
null
selectionsort.py
maxProgrammer/Entendendo_Algoritmos
8bc6ef9b7869150ef624333490b68d94b197cb75
[ "MIT" ]
null
null
null
selectionsort.py
maxProgrammer/Entendendo_Algoritmos
8bc6ef9b7869150ef624333490b68d94b197cb75
[ "MIT" ]
null
null
null
#algoritmo utilizado para ordenação de uma lista. #a cada execução ele percorre toda lista e coloca o menor na posição (n-1) def encontraMenor(lista): #armazena o valor do indice 0 a variavel menorValor = lista[0] #considera que index zero tem o menor valor menorIndex = 0 #percorre lista do indice 1 ao ultimo for i in range(1,len(lista) - 1): #compra se lista[i] é menor que menor valor e #se verdadeiro atualiza menorValor e menorIndex if lista[i] < menorValor: menorValor = lista[i] menorIndex = i #retorna o index do menor valor encontrado return menorIndex #funcao que utiliza a funcaencontra menor #para gerar outra lista ordenada def ordenaSelecao(lista): #lista que receberá itens ordenados ordLista = [] #percorre todos elementos da lista for x in range(len(lista)): # a cada iteracao encontra menor item e o insere # na nova lista. Funcao pop armazena o item na nova lista # e apaga na antiga ao mesmo tempo. menor = encontraMenor(lista) ordLista.append(lista.pop(menor)) #retorna nova lista ordenada return ordLista #teste programa lista = [3,1,13,5,0,100] print(ordenaSelecao(lista))
28
74
0.674603
def encontraMenor(lista): menorValor = lista[0] menorIndex = 0 for i in range(1,len(lista) - 1): if lista[i] < menorValor: menorValor = lista[i] menorIndex = i return menorIndex def ordenaSelecao(lista): ordLista = [] for x in range(len(lista)): menor = encontraMenor(lista) ordLista.append(lista.pop(menor)) return ordLista lista = [3,1,13,5,0,100] print(ordenaSelecao(lista))
true
true
f72c99bc69fba8eb8ab5186eeff081f18b9e24a7
3,124
py
Python
src/dataset_prepare.py
dd-dos/Emotion-detection
23eb94cbceb70890cf6b0f63e84d80eae7336c85
[ "MIT" ]
null
null
null
src/dataset_prepare.py
dd-dos/Emotion-detection
23eb94cbceb70890cf6b0f63e84d80eae7336c85
[ "MIT" ]
null
null
null
src/dataset_prepare.py
dd-dos/Emotion-detection
23eb94cbceb70890cf6b0f63e84d80eae7336c85
[ "MIT" ]
null
null
null
import numpy as np import pandas as pd from PIL import Image from tqdm import tqdm import os # convert string to integer def atoi(s): n = 0 for i in s: n = n*10 + ord(i) - ord("0") return n # making folders outer_names = ['test','train'] inner_names = ['angry', 'disgusted', 'fearful', 'happy', 'sad', 'surprised', 'neutral'] os.makedirs('data', exist_ok=True) for outer_name in outer_names: os.makedirs(os.path.join('data',outer_name), exist_ok=True) for inner_name in inner_names: os.makedirs(os.path.join('data',outer_name,inner_name), exist_ok=True) # to keep count of each category angry = 0 disgusted = 0 fearful = 0 happy = 0 sad = 0 surprised = 0 neutral = 0 angry_test = 0 disgusted_test = 0 fearful_test = 0 happy_test = 0 sad_test = 0 surprised_test = 0 neutral_test = 0 df = pd.read_csv('./fer2013.csv') mat = np.zeros((48,48),dtype=np.uint8) print("Saving images...") # read the csv file line by line for i in tqdm(range(len(df))): txt = df['pixels'][i] words = txt.split() # the image size is 48x48 for j in range(2304): xind = j // 48 yind = j % 48 mat[xind][yind] = atoi(words[j]) img = Image.fromarray(mat) # train if i < 28709: if df['emotion'][i] == 0: img.save('./data/train/angry/im'+str(angry)+'.png') angry += 1 elif df['emotion'][i] == 1: img.save('./data/train/disgusted/im'+str(disgusted)+'.png') disgusted += 1 elif df['emotion'][i] == 2: img.save('./data/train/fearful/im'+str(fearful)+'.png') fearful += 1 elif df['emotion'][i] == 3: img.save('./data/train/happy/im'+str(happy)+'.png') happy += 1 elif df['emotion'][i] == 4: img.save('./data/train/sad/im'+str(sad)+'.png') sad += 1 elif df['emotion'][i] == 5: img.save('./data/train/surprised/im'+str(surprised)+'.png') surprised += 1 elif df['emotion'][i] == 6: img.save('./data/train/neutral/im'+str(neutral)+'.png') neutral += 1 # test else: if df['emotion'][i] == 0: img.save('./data/test/angry/im'+str(angry_test)+'.png') angry_test += 1 elif df['emotion'][i] == 1: img.save('./data/test/disgusted/im'+str(disgusted_test)+'.png') disgusted_test += 1 elif df['emotion'][i] == 2: img.save('./data/test/fearful/im'+str(fearful_test)+'.png') fearful_test += 1 elif df['emotion'][i] == 3: img.save('./data/test/happy/im'+str(happy_test)+'.png') happy_test += 1 elif df['emotion'][i] == 4: img.save('./data/test/sad/im'+str(sad_test)+'.png') sad_test += 1 elif df['emotion'][i] == 5: img.save('./data/test/surprised/im'+str(surprised_test)+'.png') surprised_test += 1 elif df['emotion'][i] == 6: img.save('./data/test/neutral/im'+str(neutral_test)+'.png') neutral_test += 1 print("Done!")
30.330097
87
0.546735
import numpy as np import pandas as pd from PIL import Image from tqdm import tqdm import os def atoi(s): n = 0 for i in s: n = n*10 + ord(i) - ord("0") return n outer_names = ['test','train'] inner_names = ['angry', 'disgusted', 'fearful', 'happy', 'sad', 'surprised', 'neutral'] os.makedirs('data', exist_ok=True) for outer_name in outer_names: os.makedirs(os.path.join('data',outer_name), exist_ok=True) for inner_name in inner_names: os.makedirs(os.path.join('data',outer_name,inner_name), exist_ok=True) angry = 0 disgusted = 0 fearful = 0 happy = 0 sad = 0 surprised = 0 neutral = 0 angry_test = 0 disgusted_test = 0 fearful_test = 0 happy_test = 0 sad_test = 0 surprised_test = 0 neutral_test = 0 df = pd.read_csv('./fer2013.csv') mat = np.zeros((48,48),dtype=np.uint8) print("Saving images...") for i in tqdm(range(len(df))): txt = df['pixels'][i] words = txt.split() for j in range(2304): xind = j // 48 yind = j % 48 mat[xind][yind] = atoi(words[j]) img = Image.fromarray(mat) if i < 28709: if df['emotion'][i] == 0: img.save('./data/train/angry/im'+str(angry)+'.png') angry += 1 elif df['emotion'][i] == 1: img.save('./data/train/disgusted/im'+str(disgusted)+'.png') disgusted += 1 elif df['emotion'][i] == 2: img.save('./data/train/fearful/im'+str(fearful)+'.png') fearful += 1 elif df['emotion'][i] == 3: img.save('./data/train/happy/im'+str(happy)+'.png') happy += 1 elif df['emotion'][i] == 4: img.save('./data/train/sad/im'+str(sad)+'.png') sad += 1 elif df['emotion'][i] == 5: img.save('./data/train/surprised/im'+str(surprised)+'.png') surprised += 1 elif df['emotion'][i] == 6: img.save('./data/train/neutral/im'+str(neutral)+'.png') neutral += 1 else: if df['emotion'][i] == 0: img.save('./data/test/angry/im'+str(angry_test)+'.png') angry_test += 1 elif df['emotion'][i] == 1: img.save('./data/test/disgusted/im'+str(disgusted_test)+'.png') disgusted_test += 1 elif df['emotion'][i] == 2: img.save('./data/test/fearful/im'+str(fearful_test)+'.png') fearful_test += 1 elif df['emotion'][i] == 3: img.save('./data/test/happy/im'+str(happy_test)+'.png') happy_test += 1 elif df['emotion'][i] == 4: img.save('./data/test/sad/im'+str(sad_test)+'.png') sad_test += 1 elif df['emotion'][i] == 5: img.save('./data/test/surprised/im'+str(surprised_test)+'.png') surprised_test += 1 elif df['emotion'][i] == 6: img.save('./data/test/neutral/im'+str(neutral_test)+'.png') neutral_test += 1 print("Done!")
true
true
f72c9a79f61fe1255118ac76e5e76311780f9ee8
1,612
py
Python
demos/grouped_mr_heart/demo_predict.py
mathpluscode/DeepReg
80854094feafec998fa6237199066556c73f31f9
[ "Apache-2.0" ]
null
null
null
demos/grouped_mr_heart/demo_predict.py
mathpluscode/DeepReg
80854094feafec998fa6237199066556c73f31f9
[ "Apache-2.0" ]
null
null
null
demos/grouped_mr_heart/demo_predict.py
mathpluscode/DeepReg
80854094feafec998fa6237199066556c73f31f9
[ "Apache-2.0" ]
null
null
null
import argparse from datetime import datetime from deepreg.predict import predict name = "grouped_mr_heart" # parser is used to simplify testing, by default it is not used # please run the script with --no-test flag to ensure non-testing mode # for instance: # python script.py --no-test parser = argparse.ArgumentParser() parser.add_argument( "--test", help="Execute the script for test purpose", dest="test", action="store_true", ) parser.add_argument( "--no-test", help="Execute the script for non-test purpose", dest="test", action="store_false", ) parser.set_defaults(test=False) args = parser.parse_args() print( "\n\n\n\n\n" "=========================================================\n" "The prediction can also be launched using the following command.\n" "deepreg_predict --gpu '' " f"--config_path demos/{name}/{name}.yaml " f"--ckpt_path demos/{name}/dataset/pretrained/ckpt-4000 " f"--log_root demos/{name} " "--log_dir logs_predict " "--save_png --mode test\n" "=========================================================\n" "\n\n\n\n\n" ) log_root = f"demos/{name}" log_dir = "logs_predict/" + datetime.now().strftime("%Y%m%d-%H%M%S") ckpt_path = f"{log_root}/dataset/pretrained/ckpt-4000" config_path = [f"{log_root}/{name}.yaml"] if args.test: config_path.append("config/test/demo_unpaired_grouped.yaml") predict( gpu="0", gpu_allow_growth=True, ckpt_path=ckpt_path, mode="test", batch_size=1, log_root=log_root, log_dir=log_dir, sample_label="all", config_path=config_path, )
26.866667
72
0.628412
import argparse from datetime import datetime from deepreg.predict import predict name = "grouped_mr_heart" parser = argparse.ArgumentParser() parser.add_argument( "--test", help="Execute the script for test purpose", dest="test", action="store_true", ) parser.add_argument( "--no-test", help="Execute the script for non-test purpose", dest="test", action="store_false", ) parser.set_defaults(test=False) args = parser.parse_args() print( "\n\n\n\n\n" "=========================================================\n" "The prediction can also be launched using the following command.\n" "deepreg_predict --gpu '' " f"--config_path demos/{name}/{name}.yaml " f"--ckpt_path demos/{name}/dataset/pretrained/ckpt-4000 " f"--log_root demos/{name} " "--log_dir logs_predict " "--save_png --mode test\n" "=========================================================\n" "\n\n\n\n\n" ) log_root = f"demos/{name}" log_dir = "logs_predict/" + datetime.now().strftime("%Y%m%d-%H%M%S") ckpt_path = f"{log_root}/dataset/pretrained/ckpt-4000" config_path = [f"{log_root}/{name}.yaml"] if args.test: config_path.append("config/test/demo_unpaired_grouped.yaml") predict( gpu="0", gpu_allow_growth=True, ckpt_path=ckpt_path, mode="test", batch_size=1, log_root=log_root, log_dir=log_dir, sample_label="all", config_path=config_path, )
true
true
f72c9cd27adbff3953b5021bda4fe373f564264d
2,110
py
Python
core/helper/config.py
caostorm/smng
f1cff4010a0645ae8e1182cd3c961d97cecf4a6e
[ "MIT" ]
null
null
null
core/helper/config.py
caostorm/smng
f1cff4010a0645ae8e1182cd3c961d97cecf4a6e
[ "MIT" ]
null
null
null
core/helper/config.py
caostorm/smng
f1cff4010a0645ae8e1182cd3c961d97cecf4a6e
[ "MIT" ]
1
2019-06-26T13:05:45.000Z
2019-06-26T13:05:45.000Z
#coding=utf-8 import json from core.helper.crypt import pwd_crypt from core.helper.globalvar import global_const import sys class options_config: class ErrorTypeNotSupport(BaseException): def __init__(self): pass def __str__(self): return "This type didn't support" def __init__(self): self._config_file_path = global_const().get_value('BASEDIR') + "/etc/options.json" with open(self._config_file_path, "a+") as f: try: # 读取文件,从文件初始化_config对象 f.seek(0) self._config = json.loads(f.read()) except: self._config = {} def _sync_file(self): with open(self._config_file_path, "w+") as f: f.write(json.dumps(self._config)) pass def write(self, key, value): obj = {} if type(value) == type('1.2'): # string obj['type'] = 'string' elif type(value) == type(1.2): # float obj['type'] = 'float' elif type(value) == type(1): # int obj['type'] = 'int' elif type(value) == type(True): # bool obj['type'] = 'bool' else: raise self.ErrorTypeNotSupport encrypto = pwd_crypt() obj['value'] = encrypto.encrypt(str(value)) self._config[key] = obj self._sync_file() def read(self, key): obj = self._config[key] encrypto = pwd_crypt() if obj['type'] == 'string': return str(encrypto.decrypt(obj['value'])) elif obj['type'] == 'float': return float(encrypto.decrypt(obj['value'])) elif obj['type'] == 'int': return int(encrypto.decrypt(obj['value'])) elif obj['type'] == 'bool': real_value = encrypto.decrypt(obj['value']) if 'True' == real_value: return True elif 'False' == real_value: return False elif obj['type'] == 'long': return long(encrypto.decrypt(obj['value']))
30.142857
90
0.519431
import json from core.helper.crypt import pwd_crypt from core.helper.globalvar import global_const import sys class options_config: class ErrorTypeNotSupport(BaseException): def __init__(self): pass def __str__(self): return "This type didn't support" def __init__(self): self._config_file_path = global_const().get_value('BASEDIR') + "/etc/options.json" with open(self._config_file_path, "a+") as f: try: # 读取文件,从文件初始化_config对象 f.seek(0) self._config = json.loads(f.read()) except: self._config = {} def _sync_file(self): with open(self._config_file_path, "w+") as f: f.write(json.dumps(self._config)) pass def write(self, key, value): obj = {} if type(value) == type('1.2'): # string obj['type'] = 'string' elif type(value) == type(1.2): # float obj['type'] = 'float' elif type(value) == type(1): # int obj['type'] = 'int' elif type(value) == type(True): # bool obj['type'] = 'bool' else: raise self.ErrorTypeNotSupport encrypto = pwd_crypt() obj['value'] = encrypto.encrypt(str(value)) self._config[key] = obj self._sync_file() def read(self, key): obj = self._config[key] encrypto = pwd_crypt() if obj['type'] == 'string': return str(encrypto.decrypt(obj['value'])) elif obj['type'] == 'float': return float(encrypto.decrypt(obj['value'])) elif obj['type'] == 'int': return int(encrypto.decrypt(obj['value'])) elif obj['type'] == 'bool': real_value = encrypto.decrypt(obj['value']) if 'True' == real_value: return True elif 'False' == real_value: return False elif obj['type'] == 'long': return long(encrypto.decrypt(obj['value']))
true
true
f72c9d1c53416fbc1312ed7ced97e6c382733715
12,177
py
Python
tensorflow_addons/layers/normalizations.py
tzachar/addons
e352207da32e4670a36a295ea477c476118cb0d9
[ "Apache-2.0" ]
null
null
null
tensorflow_addons/layers/normalizations.py
tzachar/addons
e352207da32e4670a36a295ea477c476118cb0d9
[ "Apache-2.0" ]
null
null
null
tensorflow_addons/layers/normalizations.py
tzachar/addons
e352207da32e4670a36a295ea477c476118cb0d9
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Orginal implementation from keras_contrib/layer/normalization # ============================================================================= from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging import tensorflow as tf @tf.keras.utils.register_keras_serializable(package='Addons') class GroupNormalization(tf.keras.layers.Layer): """Group normalization layer. Group Normalization divides the channels into groups and computes within each group the mean and variance for normalization. Empirically, its accuracy is more stable than batch norm in a wide range of small batch sizes, if learning rate is adjusted linearly with batch sizes. Relation to Layer Normalization: If the number of groups is set to 1, then this operation becomes identical to Layer Normalization. Relation to Instance Normalization: If the number of groups is set to the input dimension (number of groups is equal to number of channels), then this operation becomes identical to Instance Normalization. Arguments groups: Integer, the number of groups for Group Normalization. Can be in the range [1, N] where N is the input dimension. The input dimension must be divisible by the number of groups. axis: Integer, the axis that should be normalized. epsilon: Small float added to variance to avoid dividing by zero. center: If True, add offset of `beta` to normalized tensor. If False, `beta` is ignored. scale: If True, multiply by `gamma`. If False, `gamma` is not used. beta_initializer: Initializer for the beta weight. gamma_initializer: Initializer for the gamma weight. beta_regularizer: Optional regularizer for the beta weight. gamma_regularizer: Optional regularizer for the gamma weight. beta_constraint: Optional constraint for the beta weight. gamma_constraint: Optional constraint for the gamma weight. Input shape Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape Same shape as input. References - [Group Normalization](https://arxiv.org/abs/1803.08494) """ def __init__(self, groups=2, axis=-1, epsilon=1e-3, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, **kwargs): super(GroupNormalization, self).__init__(**kwargs) self.supports_masking = True self.groups = groups self.axis = axis self.epsilon = epsilon self.center = center self.scale = scale self.beta_initializer = tf.keras.initializers.get(beta_initializer) self.gamma_initializer = tf.keras.initializers.get(gamma_initializer) self.beta_regularizer = tf.keras.regularizers.get(beta_regularizer) self.gamma_regularizer = tf.keras.regularizers.get(gamma_regularizer) self.beta_constraint = tf.keras.constraints.get(beta_constraint) self.gamma_constraint = tf.keras.constraints.get(gamma_constraint) self._check_axis() def build(self, input_shape): self._check_if_input_shape_is_none(input_shape) self._set_number_of_groups_for_instance_norm(input_shape) self._check_size_of_dimensions(input_shape) self._create_input_spec(input_shape) self._add_gamma_weight(input_shape) self._add_beta_weight(input_shape) self.built = True super(GroupNormalization, self).build(input_shape) def call(self, inputs): input_shape = tf.keras.backend.int_shape(inputs) tensor_input_shape = tf.shape(inputs) reshaped_inputs, group_shape = self._reshape_into_groups( inputs, input_shape, tensor_input_shape) normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape) outputs = tf.reshape(normalized_inputs, tensor_input_shape) return outputs def get_config(self): config = { 'groups': self.groups, 'axis': self.axis, 'epsilon': self.epsilon, 'center': self.center, 'scale': self.scale, 'beta_initializer': tf.keras.initializers.serialize(self.beta_initializer), 'gamma_initializer': tf.keras.initializers.serialize(self.gamma_initializer), 'beta_regularizer': tf.keras.regularizers.serialize(self.beta_regularizer), 'gamma_regularizer': tf.keras.regularizers.serialize(self.gamma_regularizer), 'beta_constraint': tf.keras.constraints.serialize(self.beta_constraint), 'gamma_constraint': tf.keras.constraints.serialize(self.gamma_constraint) } base_config = super(GroupNormalization, self).get_config() return dict(list(base_config.items()) + list(config.items())) def compute_output_shape(self, input_shape): return input_shape def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape): group_shape = [tensor_input_shape[i] for i in range(len(input_shape))] group_shape[self.axis] = input_shape[self.axis] // self.groups group_shape.insert(1, self.groups) group_shape = tf.stack(group_shape) reshaped_inputs = tf.reshape(inputs, group_shape) return reshaped_inputs, group_shape def _apply_normalization(self, reshaped_inputs, input_shape): group_shape = tf.keras.backend.int_shape(reshaped_inputs) group_reduction_axes = list(range(len(group_shape))) # Remember the ordering of the tensor is [batch, group , steps]. Jump # the first 2 to calculate the variance and the mean mean, variance = tf.nn.moments( reshaped_inputs, group_reduction_axes[2:], keepdims=True) gamma, beta = self._get_reshaped_weights(input_shape) normalized_inputs = tf.nn.batch_normalization( reshaped_inputs, mean=mean, variance=variance, scale=gamma, offset=beta, variance_epsilon=self.epsilon) return normalized_inputs def _get_reshaped_weights(self, input_shape): broadcast_shape = self._create_broadcast_shape(input_shape) gamma = None beta = None if self.scale: gamma = tf.reshape(self.gamma, broadcast_shape) if self.center: beta = tf.reshape(self.beta, broadcast_shape) return gamma, beta def _check_if_input_shape_is_none(self, input_shape): dim = input_shape[self.axis] if dim is None: raise ValueError('Axis ' + str(self.axis) + ' of ' 'input tensor should have a defined dimension ' 'but the layer received an input with shape ' + str(input_shape) + '.') def _set_number_of_groups_for_instance_norm(self, input_shape): dim = input_shape[self.axis] if self.groups == -1: self.groups = dim def _check_size_of_dimensions(self, input_shape): dim = input_shape[self.axis] if dim < self.groups: raise ValueError( 'Number of groups (' + str(self.groups) + ') cannot be ' 'more than the number of channels (' + str(dim) + ').') if dim % self.groups != 0: raise ValueError( 'Number of groups (' + str(self.groups) + ') must be a ' 'multiple of the number of channels (' + str(dim) + ').') def _check_axis(self): if self.axis == 0: raise ValueError( "You are trying to normalize your batch axis. Do you want to " "use tf.layer.batch_normalization instead") def _create_input_spec(self, input_shape): dim = input_shape[self.axis] self.input_spec = tf.keras.layers.InputSpec( ndim=len(input_shape), axes={self.axis: dim}) def _add_gamma_weight(self, input_shape): dim = input_shape[self.axis] shape = (dim,) if self.scale: self.gamma = self.add_weight( shape=shape, name='gamma', initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint) else: self.gamma = None def _add_beta_weight(self, input_shape): dim = input_shape[self.axis] shape = (dim,) if self.center: self.beta = self.add_weight( shape=shape, name='beta', initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint) else: self.beta = None def _create_broadcast_shape(self, input_shape): broadcast_shape = [1] * len(input_shape) broadcast_shape[self.axis] = input_shape[self.axis] // self.groups broadcast_shape.insert(1, self.groups) return broadcast_shape @tf.keras.utils.register_keras_serializable(package='Addons') class InstanceNormalization(GroupNormalization): """Instance normalization layer. Instance Normalization is an specific case of ```GroupNormalization```since it normalizes all features of one channel. The Groupsize is equal to the channel size. Empirically, its accuracy is more stable than batch norm in a wide range of small batch sizes, if learning rate is adjusted linearly with batch sizes. Arguments axis: Integer, the axis that should be normalized. epsilon: Small float added to variance to avoid dividing by zero. center: If True, add offset of `beta` to normalized tensor. If False, `beta` is ignored. scale: If True, multiply by `gamma`. If False, `gamma` is not used. beta_initializer: Initializer for the beta weight. gamma_initializer: Initializer for the gamma weight. beta_regularizer: Optional regularizer for the beta weight. gamma_regularizer: Optional regularizer for the gamma weight. beta_constraint: Optional constraint for the beta weight. gamma_constraint: Optional constraint for the gamma weight. Input shape Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape Same shape as input. References - [Instance Normalization: The Missing Ingredient for Fast Stylization] (https://arxiv.org/abs/1607.08022) """ def __init__(self, **kwargs): if "groups" in kwargs: logging.warning("The given value for groups will be overwritten.") kwargs["groups"] = -1 super(InstanceNormalization, self).__init__(**kwargs)
38.292453
79
0.642687
from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging import tensorflow as tf @tf.keras.utils.register_keras_serializable(package='Addons') class GroupNormalization(tf.keras.layers.Layer): def __init__(self, groups=2, axis=-1, epsilon=1e-3, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, **kwargs): super(GroupNormalization, self).__init__(**kwargs) self.supports_masking = True self.groups = groups self.axis = axis self.epsilon = epsilon self.center = center self.scale = scale self.beta_initializer = tf.keras.initializers.get(beta_initializer) self.gamma_initializer = tf.keras.initializers.get(gamma_initializer) self.beta_regularizer = tf.keras.regularizers.get(beta_regularizer) self.gamma_regularizer = tf.keras.regularizers.get(gamma_regularizer) self.beta_constraint = tf.keras.constraints.get(beta_constraint) self.gamma_constraint = tf.keras.constraints.get(gamma_constraint) self._check_axis() def build(self, input_shape): self._check_if_input_shape_is_none(input_shape) self._set_number_of_groups_for_instance_norm(input_shape) self._check_size_of_dimensions(input_shape) self._create_input_spec(input_shape) self._add_gamma_weight(input_shape) self._add_beta_weight(input_shape) self.built = True super(GroupNormalization, self).build(input_shape) def call(self, inputs): input_shape = tf.keras.backend.int_shape(inputs) tensor_input_shape = tf.shape(inputs) reshaped_inputs, group_shape = self._reshape_into_groups( inputs, input_shape, tensor_input_shape) normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape) outputs = tf.reshape(normalized_inputs, tensor_input_shape) return outputs def get_config(self): config = { 'groups': self.groups, 'axis': self.axis, 'epsilon': self.epsilon, 'center': self.center, 'scale': self.scale, 'beta_initializer': tf.keras.initializers.serialize(self.beta_initializer), 'gamma_initializer': tf.keras.initializers.serialize(self.gamma_initializer), 'beta_regularizer': tf.keras.regularizers.serialize(self.beta_regularizer), 'gamma_regularizer': tf.keras.regularizers.serialize(self.gamma_regularizer), 'beta_constraint': tf.keras.constraints.serialize(self.beta_constraint), 'gamma_constraint': tf.keras.constraints.serialize(self.gamma_constraint) } base_config = super(GroupNormalization, self).get_config() return dict(list(base_config.items()) + list(config.items())) def compute_output_shape(self, input_shape): return input_shape def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape): group_shape = [tensor_input_shape[i] for i in range(len(input_shape))] group_shape[self.axis] = input_shape[self.axis] // self.groups group_shape.insert(1, self.groups) group_shape = tf.stack(group_shape) reshaped_inputs = tf.reshape(inputs, group_shape) return reshaped_inputs, group_shape def _apply_normalization(self, reshaped_inputs, input_shape): group_shape = tf.keras.backend.int_shape(reshaped_inputs) group_reduction_axes = list(range(len(group_shape))) mean, variance = tf.nn.moments( reshaped_inputs, group_reduction_axes[2:], keepdims=True) gamma, beta = self._get_reshaped_weights(input_shape) normalized_inputs = tf.nn.batch_normalization( reshaped_inputs, mean=mean, variance=variance, scale=gamma, offset=beta, variance_epsilon=self.epsilon) return normalized_inputs def _get_reshaped_weights(self, input_shape): broadcast_shape = self._create_broadcast_shape(input_shape) gamma = None beta = None if self.scale: gamma = tf.reshape(self.gamma, broadcast_shape) if self.center: beta = tf.reshape(self.beta, broadcast_shape) return gamma, beta def _check_if_input_shape_is_none(self, input_shape): dim = input_shape[self.axis] if dim is None: raise ValueError('Axis ' + str(self.axis) + ' of ' 'input tensor should have a defined dimension ' 'but the layer received an input with shape ' + str(input_shape) + '.') def _set_number_of_groups_for_instance_norm(self, input_shape): dim = input_shape[self.axis] if self.groups == -1: self.groups = dim def _check_size_of_dimensions(self, input_shape): dim = input_shape[self.axis] if dim < self.groups: raise ValueError( 'Number of groups (' + str(self.groups) + ') cannot be ' 'more than the number of channels (' + str(dim) + ').') if dim % self.groups != 0: raise ValueError( 'Number of groups (' + str(self.groups) + ') must be a ' 'multiple of the number of channels (' + str(dim) + ').') def _check_axis(self): if self.axis == 0: raise ValueError( "You are trying to normalize your batch axis. Do you want to " "use tf.layer.batch_normalization instead") def _create_input_spec(self, input_shape): dim = input_shape[self.axis] self.input_spec = tf.keras.layers.InputSpec( ndim=len(input_shape), axes={self.axis: dim}) def _add_gamma_weight(self, input_shape): dim = input_shape[self.axis] shape = (dim,) if self.scale: self.gamma = self.add_weight( shape=shape, name='gamma', initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint) else: self.gamma = None def _add_beta_weight(self, input_shape): dim = input_shape[self.axis] shape = (dim,) if self.center: self.beta = self.add_weight( shape=shape, name='beta', initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint) else: self.beta = None def _create_broadcast_shape(self, input_shape): broadcast_shape = [1] * len(input_shape) broadcast_shape[self.axis] = input_shape[self.axis] // self.groups broadcast_shape.insert(1, self.groups) return broadcast_shape @tf.keras.utils.register_keras_serializable(package='Addons') class InstanceNormalization(GroupNormalization): def __init__(self, **kwargs): if "groups" in kwargs: logging.warning("The given value for groups will be overwritten.") kwargs["groups"] = -1 super(InstanceNormalization, self).__init__(**kwargs)
true
true
f72c9e1c750207443829a4d4625294cef174db04
4,966
py
Python
restaurants/views.py
sunilsm7/django_resto
b7698653093af7e6f26dd0d0c7b8d6046b402ea4
[ "MIT" ]
1
2017-08-03T01:40:12.000Z
2017-08-03T01:40:12.000Z
restaurants/views.py
sunilsm7/django_resto
b7698653093af7e6f26dd0d0c7b8d6046b402ea4
[ "MIT" ]
null
null
null
restaurants/views.py
sunilsm7/django_resto
b7698653093af7e6f26dd0d0c7b8d6046b402ea4
[ "MIT" ]
null
null
null
from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib import messages from django.contrib.contenttypes.models import ContentType from django.core.paginator import Paginator from django.core.urlresolvers import reverse, reverse_lazy from django.db.models import Q from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import render, get_object_or_404, redirect from django.views import View from django.views.generic import ( CreateView, DetailView, ListView, TemplateView, UpdateView ) from django.views.generic.edit import FormView, FormMixin from django.views.generic.detail import SingleObjectMixin from comments.forms import CommentForm from comments.models import Comment from .forms import ( RestaurantCreateForm, RestaurantLocationCreateForm, RestaurantSearchForm ) from .models import RestaurantLocations # Create your views here. class RestaurantListView(ListView): template_name = 'restaurants/restaurants_list_all.html' paginate_by = 10 form_class = RestaurantSearchForm def get_queryset(self): query = self.request.GET.get('q') queryset = RestaurantLocations.objects.search(query) return queryset class RestaurantDetailView(DetailView, FormView): #form_class = CommentForm template_name = 'restaurants/restaurantlocations_detail.html' queryset = RestaurantLocations.objects.all() # def get_queryset(self): # queryset = RestaurantLocations.objects.all() # return queryset def get_context_data(self, **kwargs): comments = Comment.objects.filter(object_id=objects.id) context = super(RestaurantDetailView, self).get_context_data(**kwargs) return context def render(self, request): objects = get_object_or_404(RestaurantLocations, slug=self.kwargs.get('slug')) comments = Comment.objects.filter(object_id=objects.id) return render(request, 'restaurants/restaurantlocations_detail.html', {'comment_form': self.form, 'comments':comments, 'object':objects}) def get(self, request, *args, **kwargs): self.object = self.get_object() # initial_data = { # "content_type": self.object.get_content_type, # "object_id": self.object.id # } self.form = CommentForm(initial={"content_type": self.object.get_content_type,"object_id": self.object.id}) return self.render(request) #return super(RestaurantDetailView, self).get(request, *args, **kwargs) def post(self, request, *args, **kwargs): if not request.user.is_authenticated: return HttpResponseForbidden() self.object = self.get_object() self.form = CommentForm(request.POST or None) form = self.form if form.is_valid() and request.user.is_authenticated: c_type = form.cleaned_data["content_type"] content_qs = ContentType.objects.filter(app_label ='restaurants') content_type = content_qs.get(model='restaurantlocations') obj_id = form.cleaned_data['object_id'] content_data = form.cleaned_data["content"] parent_obj = None try: parent_id = int(request.POST.get("parent_id")) except: parent_id = None if parent_id: parent_qs = Comment.objects.filter(id=parent_id) if parent_qs.exists() and parent_qs.count() == 1: parent_obj = parent_qs.first() new_comment, created = Comment.objects.get_or_create( user = request.user, content_type= content_type, object_id = obj_id, content = content_data, parent = parent_obj, ) return HttpResponseRedirect(new_comment.get_absolute_url()) else: return self.render(request) #return super(RetaurantComment, self).post(request, *args, **kwargs) class MyRestaurantListView(LoginRequiredMixin, ListView): template_name = 'restaurants/restaurants_list.html' paginate_by = 10 def get_queryset(self): return RestaurantLocations.objects.filter(owner=self.request.user) class RestaurantCreateView(LoginRequiredMixin, CreateView): form_class = RestaurantLocationCreateForm template_name = 'form.html' # success_url = '/restaurants/' login_url = '/login/' def form_valid(self, form): instance = form.save(commit=False) instance.owner = self.request.user return super(RestaurantCreateView, self).form_valid(form) def get_context_data(self, *args, **kwargs): context = super(RestaurantCreateView, self).get_context_data(*args, **kwargs) context['title'] = 'Add Restaurant' return context class RestaurantUpdateView(LoginRequiredMixin, UpdateView): form_class = RestaurantLocationCreateForm template_name = 'restaurants/detail-update.html' # success_url = '/restaurants/' login_url = '/login/' def get_context_data(self, *args, **kwargs): context = super(RestaurantUpdateView, self).get_context_data(*args, **kwargs) name = self.get_object().name context['title'] = '{} {}'.format('Update Restaurant: ', name) return context def get_queryset(self): return RestaurantLocations.objects.filter(owner=self.request.user)
31.833333
139
0.762384
from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib import messages from django.contrib.contenttypes.models import ContentType from django.core.paginator import Paginator from django.core.urlresolvers import reverse, reverse_lazy from django.db.models import Q from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import render, get_object_or_404, redirect from django.views import View from django.views.generic import ( CreateView, DetailView, ListView, TemplateView, UpdateView ) from django.views.generic.edit import FormView, FormMixin from django.views.generic.detail import SingleObjectMixin from comments.forms import CommentForm from comments.models import Comment from .forms import ( RestaurantCreateForm, RestaurantLocationCreateForm, RestaurantSearchForm ) from .models import RestaurantLocations class RestaurantListView(ListView): template_name = 'restaurants/restaurants_list_all.html' paginate_by = 10 form_class = RestaurantSearchForm def get_queryset(self): query = self.request.GET.get('q') queryset = RestaurantLocations.objects.search(query) return queryset class RestaurantDetailView(DetailView, FormView): template_name = 'restaurants/restaurantlocations_detail.html' queryset = RestaurantLocations.objects.all() def get_context_data(self, **kwargs): comments = Comment.objects.filter(object_id=objects.id) context = super(RestaurantDetailView, self).get_context_data(**kwargs) return context def render(self, request): objects = get_object_or_404(RestaurantLocations, slug=self.kwargs.get('slug')) comments = Comment.objects.filter(object_id=objects.id) return render(request, 'restaurants/restaurantlocations_detail.html', {'comment_form': self.form, 'comments':comments, 'object':objects}) def get(self, request, *args, **kwargs): self.object = self.get_object() self.form = CommentForm(initial={"content_type": self.object.get_content_type,"object_id": self.object.id}) return self.render(request) def post(self, request, *args, **kwargs): if not request.user.is_authenticated: return HttpResponseForbidden() self.object = self.get_object() self.form = CommentForm(request.POST or None) form = self.form if form.is_valid() and request.user.is_authenticated: c_type = form.cleaned_data["content_type"] content_qs = ContentType.objects.filter(app_label ='restaurants') content_type = content_qs.get(model='restaurantlocations') obj_id = form.cleaned_data['object_id'] content_data = form.cleaned_data["content"] parent_obj = None try: parent_id = int(request.POST.get("parent_id")) except: parent_id = None if parent_id: parent_qs = Comment.objects.filter(id=parent_id) if parent_qs.exists() and parent_qs.count() == 1: parent_obj = parent_qs.first() new_comment, created = Comment.objects.get_or_create( user = request.user, content_type= content_type, object_id = obj_id, content = content_data, parent = parent_obj, ) return HttpResponseRedirect(new_comment.get_absolute_url()) else: return self.render(request) class MyRestaurantListView(LoginRequiredMixin, ListView): template_name = 'restaurants/restaurants_list.html' paginate_by = 10 def get_queryset(self): return RestaurantLocations.objects.filter(owner=self.request.user) class RestaurantCreateView(LoginRequiredMixin, CreateView): form_class = RestaurantLocationCreateForm template_name = 'form.html' login_url = '/login/' def form_valid(self, form): instance = form.save(commit=False) instance.owner = self.request.user return super(RestaurantCreateView, self).form_valid(form) def get_context_data(self, *args, **kwargs): context = super(RestaurantCreateView, self).get_context_data(*args, **kwargs) context['title'] = 'Add Restaurant' return context class RestaurantUpdateView(LoginRequiredMixin, UpdateView): form_class = RestaurantLocationCreateForm template_name = 'restaurants/detail-update.html' login_url = '/login/' def get_context_data(self, *args, **kwargs): context = super(RestaurantUpdateView, self).get_context_data(*args, **kwargs) name = self.get_object().name context['title'] = '{} {}'.format('Update Restaurant: ', name) return context def get_queryset(self): return RestaurantLocations.objects.filter(owner=self.request.user)
true
true
f72c9ff03b849eba70778f598d05555ab5123a75
1,072
py
Python
core/tests/test_managers/test_project.py
erexer/polyaxon
be14dae1ed56d568983388736bcdaf27a7baa4a4
[ "Apache-2.0" ]
null
null
null
core/tests/test_managers/test_project.py
erexer/polyaxon
be14dae1ed56d568983388736bcdaf27a7baa4a4
[ "Apache-2.0" ]
null
null
null
core/tests/test_managers/test_project.py
erexer/polyaxon
be14dae1ed56d568983388736bcdaf27a7baa4a4
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # # Copyright 2018-2020 Polyaxon, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from polyaxon_sdk import V1Project from tests.utils import BaseTestCase from polyaxon.managers.project import ProjectManager @pytest.mark.managers_mark class TestProjectManager(BaseTestCase): def test_default_props(self): assert ProjectManager.is_all_visibility() is True assert ProjectManager.IS_POLYAXON_DIR is True assert ProjectManager.CONFIG_FILE_NAME == ".project" assert ProjectManager.CONFIG == V1Project
33.5
74
0.767724
import pytest from polyaxon_sdk import V1Project from tests.utils import BaseTestCase from polyaxon.managers.project import ProjectManager @pytest.mark.managers_mark class TestProjectManager(BaseTestCase): def test_default_props(self): assert ProjectManager.is_all_visibility() is True assert ProjectManager.IS_POLYAXON_DIR is True assert ProjectManager.CONFIG_FILE_NAME == ".project" assert ProjectManager.CONFIG == V1Project
true
true
f72ca02b98c9b0c00c8385d82a02c58fe350bf58
16,524
py
Python
src/ner_model/typer/data_translator.py
fracivilization/distant_ner_using_thesaurus
cebfb2bd950123ce3ef18e501314778cc41de71e
[ "Apache-2.0" ]
null
null
null
src/ner_model/typer/data_translator.py
fracivilization/distant_ner_using_thesaurus
cebfb2bd950123ce3ef18e501314778cc41de71e
[ "Apache-2.0" ]
null
null
null
src/ner_model/typer/data_translator.py
fracivilization/distant_ner_using_thesaurus
cebfb2bd950123ce3ef18e501314778cc41de71e
[ "Apache-2.0" ]
null
null
null
import dataclasses from enum import unique import click import datasets from datasets import features from datasets.arrow_dataset import Dataset from datasets.dataset_dict import DatasetDict from src.ner_model.chunker.abstract_model import Chunker from src.utils.utils import remove_BIE import dataclasses from seqeval.metrics.sequence_labeling import get_entities from collections import defaultdict from logging import getLogger from src.utils.params import span_length from hydra.utils import get_original_cwd from hashlib import md5 import prettytable from src.ner_model.chunker import ChunkerConfig from omegaconf import MISSING logger = getLogger(__name__) @dataclasses.dataclass class MSCConfig: ner_dataset: str = MISSING output_dir: str = MISSING with_o: bool = False chunker: ChunkerConfig = ChunkerConfig() o_sampling_ratio: float = 1.0 # hard_o_sampling: bool = False # o_outside_entity: bool = False # weight_of_hard_o_for_easy_o: float = 0.5 # from tqdm import tqdm from collections import Counter import random def remove_misguided_fns(starts, ends, labels): new_starts, new_ends, new_labels = [], [], [] misguided_tokens = set() for s, e, l in zip(starts, ends, labels): if l == "MISGUIDANCE": for i in range(s, e): misguided_tokens.add(i) for s, e, l in zip(starts, ends, labels): if l != "MISGUIDANCE": if l.startswith("nc"): span = set(range(s, e)) if span & misguided_tokens: continue new_starts.append(s) new_ends.append(e) new_labels.append(l) return new_starts, new_ends, new_labels def undersample_thesaurus_negatives(pre_span_classification_dataset): label_counter = Counter( [label for snt in pre_span_classification_dataset["labels"] for label in snt] ) pass positive_labels = [ label for label in label_counter.keys() if not label.startswith("nc-") ] max_positive_count = max(label_counter[label] for label in positive_labels) thesaurus_negative_class_sampling_ratio = { label: max_positive_count / count for label, count in label_counter.items() if label != "nc-O" and label.startswith("nc-") } new_pre_span_classification_dataset = defaultdict(list) pscd = pre_span_classification_dataset for tokens, starts, ends, labels in zip( pscd["tokens"], pscd["starts"], pscd["ends"], pscd["labels"] ): new_starts = [] new_ends = [] new_labels = [] for s, e, l in zip(starts, ends, labels): if ( l != "nc-O" and l.startswith("nc-") and random.random() > thesaurus_negative_class_sampling_ratio[l] ): continue new_starts.append(s) new_ends.append(e) new_labels.append(l) new_pre_span_classification_dataset["tokens"].append(tokens) new_pre_span_classification_dataset["starts"].append(new_starts) new_pre_span_classification_dataset["ends"].append(new_ends) new_pre_span_classification_dataset["labels"].append(new_labels) return new_pre_span_classification_dataset def ner_datasets_to_span_classification_datasets( ner_datasets: datasets.DatasetDict, data_args: MSCConfig, enumerator: Chunker, ) -> datasets.DatasetDict: pre_span_classification_datasets = dict() label_names = sorted( set( [ remove_BIE(tag) for tag in ner_datasets["test"].features["ner_tags"].feature.names if tag != "O" ] ) ) if data_args.with_o: if "nc-O" not in label_names: label_names = ["nc-O"] + label_names info = datasets.DatasetInfo( features=datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "starts": datasets.Sequence(datasets.Value("int32")), "ends": datasets.Sequence(datasets.Value("int32")), "labels": datasets.Sequence(datasets.ClassLabel(names=label_names)), } ) ) for key in ner_datasets: pre_span_classification_dataset = defaultdict(list) ner_tag_labels = ner_datasets[key].features["ner_tags"].feature.names for snt in tqdm(ner_datasets[key]): registered_chunks = set() ner_tags = [ner_tag_labels[tag] for tag in snt["ner_tags"]] starts = [] ends = [] labels = [] for label, s, e in get_entities(ner_tags): starts.append(s) ends.append(e + 1) labels.append(label) registered_chunks.add((s, e)) if data_args.with_o and key in {"train", "validation"}: for s, e in enumerator.predict(snt["tokens"]): if ( (s, e) not in registered_chunks and data_args.o_sampling_ratio > random.random() ): starts.append(s) ends.append(e) labels.append("nc-O") starts, ends, labels = remove_misguided_fns(starts, ends, labels) if labels: pre_span_classification_dataset["tokens"].append(snt["tokens"]) pre_span_classification_dataset["starts"].append(starts) pre_span_classification_dataset["ends"].append(ends) pre_span_classification_dataset["labels"].append(labels) # if key == "train": # pre_span_classification_dataset = undersample_thesaurus_negatives( # pre_span_classification_dataset # ) pre_span_classification_datasets[key] = datasets.Dataset.from_dict( pre_span_classification_dataset, info=info ) return datasets.DatasetDict(pre_span_classification_datasets) import numpy as np def label_balancing_span_classification_datasets( span_classification_datasets: datasets.DatasetDict, o_and_min_label_count_ratio=1 ): ret_datasets = dict() if "test" in span_classification_datasets: info = datasets.DatasetInfo( features=span_classification_datasets["test"].features ) else: info = datasets.DatasetInfo( features=span_classification_datasets["train"].features ) for split_key, dataset_split in span_classification_datasets.items(): if split_key != "test": if "labels" in dataset_split.features: # for multi span classification datasets span_classification_dataset = { "tokens": [], "starts": [], "ends": [], "labels": [], } label_count = Counter( [l for snt in dataset_split["labels"] for l in snt] ) min_label_count = min(label_count.values()) logger.info("min label count: %d" % min_label_count) undersampling_ratio = { label: min_label_count / count for label, count in label_count.items() } for snt in tqdm(dataset_split): starts = [] ends = [] labels = [] for s, e, l in zip(snt["starts"], snt["ends"], snt["labels"]): if random.random() < undersampling_ratio[l]: starts.append(s) ends.append(e) labels.append(l) if labels: span_classification_dataset["tokens"].append(snt["tokens"]) span_classification_dataset["starts"].append(starts) span_classification_dataset["ends"].append(ends) span_classification_dataset["labels"].append(labels) ret_datasets[split_key] = datasets.Dataset.from_dict( span_classification_dataset, info=info ) elif "label" in dataset_split.features: # for one span classification datasets span_classification_dataset = { "tokens": [], "start": [], "end": [], "label": [], } label_names = dataset_split.features["label"].names label_count = Counter(dataset_split["label"]) min_label_count = min(label_count.values()) logger.info("min label count: %d" % min_label_count) undersampling_ratio = dict() for label, count in label_count.items(): if label_names[label] == "O": undersampling_ratio[label] = ( min_label_count / count * o_and_min_label_count_ratio ) else: undersampling_ratio[label] = min_label_count / count for snt in tqdm(dataset_split): if random.random() < undersampling_ratio[snt["label"]]: for key, value in snt.items(): span_classification_dataset[key].append(value) ret_datasets[split_key] = datasets.Dataset.from_dict( span_classification_dataset, info=info ) else: raise NotImplementedError else: ret_datasets[split_key] = dataset_split return datasets.DatasetDict(ret_datasets) import os from pathlib import Path def print_label_statistics(span_classification_datasets: datasets.DatasetDict): for split_key, dataset_split in span_classification_datasets.items(): if "label" in dataset_split.features: label_names = dataset_split.features["label"].names label_count = Counter([label_names[l] for l in dataset_split["label"]]) else: pass label_names = dataset_split.features["labels"].feature.names label_count = Counter( [label_names[l] for snt in dataset_split["labels"] for l in snt] ) logger.info("label count of %s split: %s" % (split_key, label_count)) from copy import deepcopy from typing import Dict, List import random def load_o_label_spans(unlabelled_corpus: Dataset, span_num: int) -> List: # 各文から取得するスパン数を指定 # 各文に対してspan_length長のスパンをかき集めてくる # 各文に定められた個数になるまでサンプリング # 全体の断片から決められたスパン数になるまでサンプリング pass snt_num = len(unlabelled_corpus) span_num_per_snt = int(span_num / snt_num) + 100 o_label_spans = [] for snt in unlabelled_corpus["tokens"]: spans = [ (s, e) for s in range(len(snt)) for e in range(s + 1, len(snt) + 1) if e - s <= MSCConfig.span_length ] for s, e in random.sample(spans, min(span_num_per_snt, len(spans))): o_label_spans.append(snt[s:e]) return random.sample(o_label_spans, min(span_num, len(o_label_spans))) import spacy from itertools import islice from dataclasses import MISSING, dataclass @dataclass class Term2CatBasedDatasetArgs: label_balance: bool = False pass def load_term2cat_based_span_classification_dataset( term2cat: Dict, unlabelled_corpus: Dataset, args: Term2CatBasedDatasetArgs ): tokenizer = spacy.load("en_core_sci_sm") tokenizer.remove_pipe("ner") dataset = {"tokens": [], "start": [], "end": [], "label": []} label_names = ["O"] + sorted(set(term2cat.values())) dict_label_count = Counter(term2cat.values()) if args.label_balance: over_sampling_ratio = { l: dict_label_count.most_common()[0][1] / dict_label_count[l] for l in dict_label_count } else: over_sampling_ratio = {l: 1 for l in dict_label_count} for term, cat in tqdm(term2cat.items()): osr = over_sampling_ratio[cat] tokenized_terms = tokenizer(term) while True: if 0 < osr < 1: if osr > random.random(): break elif osr <= 0: break dataset["tokens"].append([w.text for w in tokenized_terms]) dataset["start"].append(0) dataset["end"].append(len(tokenized_terms)) dataset["label"].append(label_names.index(cat)) osr -= 1 if args.label_balance: span_num = dict_label_count.most_common()[0][1] else: span_num = sum(dict_label_count.values()) o_labeled_spans = load_o_label_spans(unlabelled_corpus, span_num) for span in o_labeled_spans: dataset["tokens"].append(span) dataset["start"].append(0) dataset["end"].append(len(span)) dataset["label"].append(label_names.index("O")) features = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "start": datasets.Value("int32"), "end": datasets.Value("int32"), "label": datasets.ClassLabel(names=label_names), } ) # new_dataset_dictに追加 return Dataset.from_dict(dataset, features=features) def split_span_classification_dataset(datasets: Dataset): features = datasets.features split_num = int(len(datasets) * 0.9) splitted_datasets = dict() from random import shuffle indexes = list(range(len(datasets))) shuffle(indexes) splitted_datasets["train"] = Dataset.from_dict( datasets.__getitem__(indexes[:split_num]), features=features ) splitted_datasets["validation"] = Dataset.from_dict( datasets.__getitem__(indexes[split_num:]), features=features ) return DatasetDict(splitted_datasets) def join_span_classification_datasets( main_datasets: DatasetDict, sub_datasets: DatasetDict ): pass new_dataset_dict = dict() for key, split in main_datasets.items(): if key in sub_datasets: sub_split = sub_datasets[key] new_dataset = {feature: split[feature] for feature in split.features} main_label_names = split.features["label"].names sub_label_names = sub_split.features["label"].names assert len(main_label_names) == len(sub_label_names) assert len(split.features) == len(sub_split.features) label_map = { i: sub_label_names.index(l) for i, l in enumerate(main_label_names) } for feature in sub_split.features: if feature == "label": new_dataset[feature] += [label_map[l] for l in sub_split[feature]] else: new_dataset[feature] += sub_split[feature] new_dataset_dict[key] = Dataset.from_dict(new_dataset, split.features) else: new_dataset_dict[key] = split return DatasetDict(new_dataset_dict) def log_label_ratio(msc_datasets: DatasetDict): table = prettytable.PrettyTable(["Label", "Count", "Ratio (%)"]) pass train_dataset = msc_datasets["train"] label_names = train_dataset.features["labels"].feature.names c = Counter([label for snt in train_dataset["labels"] for label in snt]) label_sum = sum(c.values()) for lid, count in c.most_common(): table.add_row([label_names[lid], count, "%.2f" % (100 * count / label_sum)]) logger.info(table.get_string()) def translate_into_msc_datasets( ner_datasets: DatasetDict, msc_args: MSCConfig, enumerator: Chunker, ): input_hash = {k: v._fingerprint for k, v in ner_datasets.items()} input_hash["msc_args"] = str(msc_args) input_hash["enumerator"] = str(enumerator.config) output_dir = Path(get_original_cwd()).joinpath( "data", "buffer", md5(str(input_hash).encode()).hexdigest() ) logger.info("output_dir of msc_datasets: " + str(output_dir)) if not output_dir.exists(): msc_datasets = ner_datasets_to_span_classification_datasets( ner_datasets, msc_args, enumerator ) msc_datasets.save_to_disk(output_dir) else: msc_datasets = DatasetDict.load_from_disk(output_dir) log_label_ratio(msc_datasets) return msc_datasets
37.216216
86
0.60633
import dataclasses from enum import unique import click import datasets from datasets import features from datasets.arrow_dataset import Dataset from datasets.dataset_dict import DatasetDict from src.ner_model.chunker.abstract_model import Chunker from src.utils.utils import remove_BIE import dataclasses from seqeval.metrics.sequence_labeling import get_entities from collections import defaultdict from logging import getLogger from src.utils.params import span_length from hydra.utils import get_original_cwd from hashlib import md5 import prettytable from src.ner_model.chunker import ChunkerConfig from omegaconf import MISSING logger = getLogger(__name__) @dataclasses.dataclass class MSCConfig: ner_dataset: str = MISSING output_dir: str = MISSING with_o: bool = False chunker: ChunkerConfig = ChunkerConfig() o_sampling_ratio: float = 1.0 from tqdm import tqdm from collections import Counter import random def remove_misguided_fns(starts, ends, labels): new_starts, new_ends, new_labels = [], [], [] misguided_tokens = set() for s, e, l in zip(starts, ends, labels): if l == "MISGUIDANCE": for i in range(s, e): misguided_tokens.add(i) for s, e, l in zip(starts, ends, labels): if l != "MISGUIDANCE": if l.startswith("nc"): span = set(range(s, e)) if span & misguided_tokens: continue new_starts.append(s) new_ends.append(e) new_labels.append(l) return new_starts, new_ends, new_labels def undersample_thesaurus_negatives(pre_span_classification_dataset): label_counter = Counter( [label for snt in pre_span_classification_dataset["labels"] for label in snt] ) pass positive_labels = [ label for label in label_counter.keys() if not label.startswith("nc-") ] max_positive_count = max(label_counter[label] for label in positive_labels) thesaurus_negative_class_sampling_ratio = { label: max_positive_count / count for label, count in label_counter.items() if label != "nc-O" and label.startswith("nc-") } new_pre_span_classification_dataset = defaultdict(list) pscd = pre_span_classification_dataset for tokens, starts, ends, labels in zip( pscd["tokens"], pscd["starts"], pscd["ends"], pscd["labels"] ): new_starts = [] new_ends = [] new_labels = [] for s, e, l in zip(starts, ends, labels): if ( l != "nc-O" and l.startswith("nc-") and random.random() > thesaurus_negative_class_sampling_ratio[l] ): continue new_starts.append(s) new_ends.append(e) new_labels.append(l) new_pre_span_classification_dataset["tokens"].append(tokens) new_pre_span_classification_dataset["starts"].append(new_starts) new_pre_span_classification_dataset["ends"].append(new_ends) new_pre_span_classification_dataset["labels"].append(new_labels) return new_pre_span_classification_dataset def ner_datasets_to_span_classification_datasets( ner_datasets: datasets.DatasetDict, data_args: MSCConfig, enumerator: Chunker, ) -> datasets.DatasetDict: pre_span_classification_datasets = dict() label_names = sorted( set( [ remove_BIE(tag) for tag in ner_datasets["test"].features["ner_tags"].feature.names if tag != "O" ] ) ) if data_args.with_o: if "nc-O" not in label_names: label_names = ["nc-O"] + label_names info = datasets.DatasetInfo( features=datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "starts": datasets.Sequence(datasets.Value("int32")), "ends": datasets.Sequence(datasets.Value("int32")), "labels": datasets.Sequence(datasets.ClassLabel(names=label_names)), } ) ) for key in ner_datasets: pre_span_classification_dataset = defaultdict(list) ner_tag_labels = ner_datasets[key].features["ner_tags"].feature.names for snt in tqdm(ner_datasets[key]): registered_chunks = set() ner_tags = [ner_tag_labels[tag] for tag in snt["ner_tags"]] starts = [] ends = [] labels = [] for label, s, e in get_entities(ner_tags): starts.append(s) ends.append(e + 1) labels.append(label) registered_chunks.add((s, e)) if data_args.with_o and key in {"train", "validation"}: for s, e in enumerator.predict(snt["tokens"]): if ( (s, e) not in registered_chunks and data_args.o_sampling_ratio > random.random() ): starts.append(s) ends.append(e) labels.append("nc-O") starts, ends, labels = remove_misguided_fns(starts, ends, labels) if labels: pre_span_classification_dataset["tokens"].append(snt["tokens"]) pre_span_classification_dataset["starts"].append(starts) pre_span_classification_dataset["ends"].append(ends) pre_span_classification_dataset["labels"].append(labels) pre_span_classification_datasets[key] = datasets.Dataset.from_dict( pre_span_classification_dataset, info=info ) return datasets.DatasetDict(pre_span_classification_datasets) import numpy as np def label_balancing_span_classification_datasets( span_classification_datasets: datasets.DatasetDict, o_and_min_label_count_ratio=1 ): ret_datasets = dict() if "test" in span_classification_datasets: info = datasets.DatasetInfo( features=span_classification_datasets["test"].features ) else: info = datasets.DatasetInfo( features=span_classification_datasets["train"].features ) for split_key, dataset_split in span_classification_datasets.items(): if split_key != "test": if "labels" in dataset_split.features: span_classification_dataset = { "tokens": [], "starts": [], "ends": [], "labels": [], } label_count = Counter( [l for snt in dataset_split["labels"] for l in snt] ) min_label_count = min(label_count.values()) logger.info("min label count: %d" % min_label_count) undersampling_ratio = { label: min_label_count / count for label, count in label_count.items() } for snt in tqdm(dataset_split): starts = [] ends = [] labels = [] for s, e, l in zip(snt["starts"], snt["ends"], snt["labels"]): if random.random() < undersampling_ratio[l]: starts.append(s) ends.append(e) labels.append(l) if labels: span_classification_dataset["tokens"].append(snt["tokens"]) span_classification_dataset["starts"].append(starts) span_classification_dataset["ends"].append(ends) span_classification_dataset["labels"].append(labels) ret_datasets[split_key] = datasets.Dataset.from_dict( span_classification_dataset, info=info ) elif "label" in dataset_split.features: span_classification_dataset = { "tokens": [], "start": [], "end": [], "label": [], } label_names = dataset_split.features["label"].names label_count = Counter(dataset_split["label"]) min_label_count = min(label_count.values()) logger.info("min label count: %d" % min_label_count) undersampling_ratio = dict() for label, count in label_count.items(): if label_names[label] == "O": undersampling_ratio[label] = ( min_label_count / count * o_and_min_label_count_ratio ) else: undersampling_ratio[label] = min_label_count / count for snt in tqdm(dataset_split): if random.random() < undersampling_ratio[snt["label"]]: for key, value in snt.items(): span_classification_dataset[key].append(value) ret_datasets[split_key] = datasets.Dataset.from_dict( span_classification_dataset, info=info ) else: raise NotImplementedError else: ret_datasets[split_key] = dataset_split return datasets.DatasetDict(ret_datasets) import os from pathlib import Path def print_label_statistics(span_classification_datasets: datasets.DatasetDict): for split_key, dataset_split in span_classification_datasets.items(): if "label" in dataset_split.features: label_names = dataset_split.features["label"].names label_count = Counter([label_names[l] for l in dataset_split["label"]]) else: pass label_names = dataset_split.features["labels"].feature.names label_count = Counter( [label_names[l] for snt in dataset_split["labels"] for l in snt] ) logger.info("label count of %s split: %s" % (split_key, label_count)) from copy import deepcopy from typing import Dict, List import random def load_o_label_spans(unlabelled_corpus: Dataset, span_num: int) -> List: pass snt_num = len(unlabelled_corpus) span_num_per_snt = int(span_num / snt_num) + 100 o_label_spans = [] for snt in unlabelled_corpus["tokens"]: spans = [ (s, e) for s in range(len(snt)) for e in range(s + 1, len(snt) + 1) if e - s <= MSCConfig.span_length ] for s, e in random.sample(spans, min(span_num_per_snt, len(spans))): o_label_spans.append(snt[s:e]) return random.sample(o_label_spans, min(span_num, len(o_label_spans))) import spacy from itertools import islice from dataclasses import MISSING, dataclass @dataclass class Term2CatBasedDatasetArgs: label_balance: bool = False pass def load_term2cat_based_span_classification_dataset( term2cat: Dict, unlabelled_corpus: Dataset, args: Term2CatBasedDatasetArgs ): tokenizer = spacy.load("en_core_sci_sm") tokenizer.remove_pipe("ner") dataset = {"tokens": [], "start": [], "end": [], "label": []} label_names = ["O"] + sorted(set(term2cat.values())) dict_label_count = Counter(term2cat.values()) if args.label_balance: over_sampling_ratio = { l: dict_label_count.most_common()[0][1] / dict_label_count[l] for l in dict_label_count } else: over_sampling_ratio = {l: 1 for l in dict_label_count} for term, cat in tqdm(term2cat.items()): osr = over_sampling_ratio[cat] tokenized_terms = tokenizer(term) while True: if 0 < osr < 1: if osr > random.random(): break elif osr <= 0: break dataset["tokens"].append([w.text for w in tokenized_terms]) dataset["start"].append(0) dataset["end"].append(len(tokenized_terms)) dataset["label"].append(label_names.index(cat)) osr -= 1 if args.label_balance: span_num = dict_label_count.most_common()[0][1] else: span_num = sum(dict_label_count.values()) o_labeled_spans = load_o_label_spans(unlabelled_corpus, span_num) for span in o_labeled_spans: dataset["tokens"].append(span) dataset["start"].append(0) dataset["end"].append(len(span)) dataset["label"].append(label_names.index("O")) features = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "start": datasets.Value("int32"), "end": datasets.Value("int32"), "label": datasets.ClassLabel(names=label_names), } ) return Dataset.from_dict(dataset, features=features) def split_span_classification_dataset(datasets: Dataset): features = datasets.features split_num = int(len(datasets) * 0.9) splitted_datasets = dict() from random import shuffle indexes = list(range(len(datasets))) shuffle(indexes) splitted_datasets["train"] = Dataset.from_dict( datasets.__getitem__(indexes[:split_num]), features=features ) splitted_datasets["validation"] = Dataset.from_dict( datasets.__getitem__(indexes[split_num:]), features=features ) return DatasetDict(splitted_datasets) def join_span_classification_datasets( main_datasets: DatasetDict, sub_datasets: DatasetDict ): pass new_dataset_dict = dict() for key, split in main_datasets.items(): if key in sub_datasets: sub_split = sub_datasets[key] new_dataset = {feature: split[feature] for feature in split.features} main_label_names = split.features["label"].names sub_label_names = sub_split.features["label"].names assert len(main_label_names) == len(sub_label_names) assert len(split.features) == len(sub_split.features) label_map = { i: sub_label_names.index(l) for i, l in enumerate(main_label_names) } for feature in sub_split.features: if feature == "label": new_dataset[feature] += [label_map[l] for l in sub_split[feature]] else: new_dataset[feature] += sub_split[feature] new_dataset_dict[key] = Dataset.from_dict(new_dataset, split.features) else: new_dataset_dict[key] = split return DatasetDict(new_dataset_dict) def log_label_ratio(msc_datasets: DatasetDict): table = prettytable.PrettyTable(["Label", "Count", "Ratio (%)"]) pass train_dataset = msc_datasets["train"] label_names = train_dataset.features["labels"].feature.names c = Counter([label for snt in train_dataset["labels"] for label in snt]) label_sum = sum(c.values()) for lid, count in c.most_common(): table.add_row([label_names[lid], count, "%.2f" % (100 * count / label_sum)]) logger.info(table.get_string()) def translate_into_msc_datasets( ner_datasets: DatasetDict, msc_args: MSCConfig, enumerator: Chunker, ): input_hash = {k: v._fingerprint for k, v in ner_datasets.items()} input_hash["msc_args"] = str(msc_args) input_hash["enumerator"] = str(enumerator.config) output_dir = Path(get_original_cwd()).joinpath( "data", "buffer", md5(str(input_hash).encode()).hexdigest() ) logger.info("output_dir of msc_datasets: " + str(output_dir)) if not output_dir.exists(): msc_datasets = ner_datasets_to_span_classification_datasets( ner_datasets, msc_args, enumerator ) msc_datasets.save_to_disk(output_dir) else: msc_datasets = DatasetDict.load_from_disk(output_dir) log_label_ratio(msc_datasets) return msc_datasets
true
true
f72ca25004c0c4905aca487d4e9c73657cbe9a5d
482
py
Python
app/http/middleware/HelloWorldMiddleware.py
llaski/masonite-tutorial
f89dc88ccf7924b477dfe971fdb981a82e63d5fe
[ "MIT" ]
null
null
null
app/http/middleware/HelloWorldMiddleware.py
llaski/masonite-tutorial
f89dc88ccf7924b477dfe971fdb981a82e63d5fe
[ "MIT" ]
1
2021-06-02T00:33:40.000Z
2021-06-02T00:33:40.000Z
app/http/middleware/HelloWorldMiddleware.py
llaski/masonite-tutorial
f89dc88ccf7924b477dfe971fdb981a82e63d5fe
[ "MIT" ]
null
null
null
"""HelloWorld Middleware.""" from masonite.request import Request class HelloWorldMiddleware: """HelloWorld Middleware.""" def __init__(self, request: Request): """Inject Any Dependencies From The Service Container. Arguments: Request {masonite.request.Request} -- The Masonite request object """ self.request = request def before(self): print('Hello World') def after(self): print('Goodbye World')
21.909091
77
0.636929
from masonite.request import Request class HelloWorldMiddleware: def __init__(self, request: Request): self.request = request def before(self): print('Hello World') def after(self): print('Goodbye World')
true
true
f72ca260e47ced61e897e70195c321f15e9d783d
3,962
py
Python
misc/learnpy/k-means/loadiris.py
mutazag/mdsi
efecc8f650ddf6866154389f98d4ce0a9803db18
[ "MIT" ]
null
null
null
misc/learnpy/k-means/loadiris.py
mutazag/mdsi
efecc8f650ddf6866154389f98d4ce0a9803db18
[ "MIT" ]
null
null
null
misc/learnpy/k-means/loadiris.py
mutazag/mdsi
efecc8f650ddf6866154389f98d4ce0a9803db18
[ "MIT" ]
null
null
null
import pandas as pd from sklearn import datasets # load iris data set iris = datasets.load_iris() print(iris) species = [iris.target_names[x] for x in iris.target] iris = pd.DataFrame(iris['data'], columns = ['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width']) iris['Species'] = species iris.head() iris.dtypes # quick count iris['count'] = 1 iris[['Species', 'count']].groupby('Species').count() iris.groupby('Species').count() # plot the data set # %matplotlib inline def plot_iris(iris, col1, col2): print("plot_iris") import seaborn as sns import matplotlib.pyplot as plt sns.lmplot(x = col1, y=col2, data = iris, hue = "Species", fit_reg=False) plt.xlabel(col1) plt.ylabel(col2) plt.title('Iris species show by color') plt.show() plot_iris(iris, 'Petal_Width', 'Sepal_Length') plot_iris(iris, 'Sepal_Width', 'Sepal_Length') # preparing numeric featurs by scaling from sklearn.preprocessing import scale import pandas as pd num_cols = ['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width'] iris_scaled = scale(iris[num_cols]) iris_scaled = pd.DataFrame(iris_scaled, columns = num_cols) print(iris_scaled.describe().round(3)) # coding string col 'species' as numeric using a dictionary levels = {'setosa':0, 'versicolor':1, 'virginica':2} # add coded species to the new scaled iris data frame iris_scaled['Species'] = [levels[x] for x in iris['Species']] iris_scaled.head() plot_iris(iris_scaled, 'Sepal_Width', 'Sepal_Length') ## split the data into training and tes using Bernoulli sampling from sklearn.model_selection import train_test_split import numpy as np np.random.seed(3456) iris_split = train_test_split(np.asmatrix(iris_scaled), test_size = 75) iris_train_features = iris_split[0][:,:4] iris_train_labels = np.ravel(iris_split[0][:,4]) iris_test_features = iris_split[1][:,:4] iris_test_labels = np.ravel(iris_split[1][:,4]) print(iris_train_features.shape) print(iris_train_labels.shape) print(iris_test_features.shape) print(iris_test_labels.shape) # Train and Eval KNN model #fit model from sklearn.neighbors import KNeighborsClassifier KNN_mod = KNeighborsClassifier(n_neighbors=3) # this is K KNN_mod.fit(iris_train_features, iris_train_labels) #test model on test data set iris_test = pd.DataFrame(iris_test_features, columns = num_cols) iris_test['predicted'] = KNN_mod.predict(iris_test_features) iris_test['actuals'] = iris_test_labels iris_test['correct'] = [1 if x == z else 0 for x, z in zip(iris_test['predicted'], iris_test_labels)] # calculate some accuracy measure accuracy = 100 * float(sum(iris_test['correct'])) / float(iris_test.shape[0]) print(accuracy) iris_test[iris_test.correct != 1] iris_test.loc[iris_test["correct"] != 1] # plotting the predicted values and highliting incorrectly classified observations levels = {0:'setosa', 1:'versicolor', 2:'virginica'} iris_test['Species'] = [levels[x] for x in iris_test['predicted']] markers = {1:'^', 0:'o'} colors = {'setosa':'blue', 'versicolor':'green', 'virginica':'red'} def plot_shapes(df, col1,col2, markers, colors): import matplotlib.pyplot as plt import seaborn as sns ax = plt.figure(figsize=(6, 6)).gca() # define plot axis for m in markers: # iterate over marker dictioary keys for c in colors: # iterate over color dictionary keys df_temp = df[(df['correct'] == m) & (df['Species'] == c)] sns.regplot(x = col1, y = col2, data = df_temp, fit_reg = False, scatter_kws={'color': colors[c]}, marker = markers[m], ax = ax) plt.xlabel(col1) plt.ylabel(col2) plt.title('Iris species by color') return 'Done' plot_shapes(iris_test, 'Petal_Width', 'Sepal_Length', markers, colors) plot_shapes(iris_test, 'Sepal_Width', 'Sepal_Length', markers, colors)
29.132353
108
0.694346
import pandas as pd from sklearn import datasets iris = datasets.load_iris() print(iris) species = [iris.target_names[x] for x in iris.target] iris = pd.DataFrame(iris['data'], columns = ['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width']) iris['Species'] = species iris.head() iris.dtypes iris['count'] = 1 iris[['Species', 'count']].groupby('Species').count() iris.groupby('Species').count() def plot_iris(iris, col1, col2): print("plot_iris") import seaborn as sns import matplotlib.pyplot as plt sns.lmplot(x = col1, y=col2, data = iris, hue = "Species", fit_reg=False) plt.xlabel(col1) plt.ylabel(col2) plt.title('Iris species show by color') plt.show() plot_iris(iris, 'Petal_Width', 'Sepal_Length') plot_iris(iris, 'Sepal_Width', 'Sepal_Length') from sklearn.preprocessing import scale import pandas as pd num_cols = ['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width'] iris_scaled = scale(iris[num_cols]) iris_scaled = pd.DataFrame(iris_scaled, columns = num_cols) print(iris_scaled.describe().round(3)) levels = {'setosa':0, 'versicolor':1, 'virginica':2} iris_scaled['Species'] = [levels[x] for x in iris['Species']] iris_scaled.head() plot_iris(iris_scaled, 'Sepal_Width', 'Sepal_Length') mpy as np np.random.seed(3456) iris_split = train_test_split(np.asmatrix(iris_scaled), test_size = 75) iris_train_features = iris_split[0][:,:4] iris_train_labels = np.ravel(iris_split[0][:,4]) iris_test_features = iris_split[1][:,:4] iris_test_labels = np.ravel(iris_split[1][:,4]) print(iris_train_features.shape) print(iris_train_labels.shape) print(iris_test_features.shape) print(iris_test_labels.shape) from sklearn.neighbors import KNeighborsClassifier KNN_mod = KNeighborsClassifier(n_neighbors=3) KNN_mod.fit(iris_train_features, iris_train_labels) iris_test = pd.DataFrame(iris_test_features, columns = num_cols) iris_test['predicted'] = KNN_mod.predict(iris_test_features) iris_test['actuals'] = iris_test_labels iris_test['correct'] = [1 if x == z else 0 for x, z in zip(iris_test['predicted'], iris_test_labels)] accuracy = 100 * float(sum(iris_test['correct'])) / float(iris_test.shape[0]) print(accuracy) iris_test[iris_test.correct != 1] iris_test.loc[iris_test["correct"] != 1] levels = {0:'setosa', 1:'versicolor', 2:'virginica'} iris_test['Species'] = [levels[x] for x in iris_test['predicted']] markers = {1:'^', 0:'o'} colors = {'setosa':'blue', 'versicolor':'green', 'virginica':'red'} def plot_shapes(df, col1,col2, markers, colors): import matplotlib.pyplot as plt import seaborn as sns ax = plt.figure(figsize=(6, 6)).gca() for m in markers: for c in colors: df_temp = df[(df['correct'] == m) & (df['Species'] == c)] sns.regplot(x = col1, y = col2, data = df_temp, fit_reg = False, scatter_kws={'color': colors[c]}, marker = markers[m], ax = ax) plt.xlabel(col1) plt.ylabel(col2) plt.title('Iris species by color') return 'Done' plot_shapes(iris_test, 'Petal_Width', 'Sepal_Length', markers, colors) plot_shapes(iris_test, 'Sepal_Width', 'Sepal_Length', markers, colors)
true
true
f72ca2f478e2f86936751094fd9d66c1fab0a9ee
1,734
py
Python
run-gat-2-8.py
urialon/bottleneck
481fbb95edc6ae711da40b6305b40c12ce6a6d29
[ "MIT" ]
null
null
null
run-gat-2-8.py
urialon/bottleneck
481fbb95edc6ae711da40b6305b40c12ce6a6d29
[ "MIT" ]
null
null
null
run-gat-2-8.py
urialon/bottleneck
481fbb95edc6ae711da40b6305b40c12ce6a6d29
[ "MIT" ]
null
null
null
import main from common import Task, STOP, GNN_TYPE from attrdict import AttrDict from experiment import Experiment import torch override_params = { 2: {'batch_size': 64, 'eval_every': 1000}, 3: {'batch_size': 64}, 4: {'batch_size': 1024}, 5: {'batch_size': 1024}, 6: {'batch_size': 1024}, 7: {'batch_size': 2048}, 8: {'batch_size': 1024, 'accum_grad': 2}, # effective batch size of 2048, with less GPU memory } class Results: def __init__(self, train_acc, test_acc, epoch): self.train_acc = train_acc self.test_acc = test_acc self.epoch = epoch if __name__ == '__main__': task = Task.DICTIONARY gnn_type = GNN_TYPE.GAT stopping_criterion = STOP.TRAIN min_depth = 2 max_depth = 8 results_all_depths = {} for depth in range(min_depth, max_depth + 1): num_layers = depth + 1 args = main.get_fake_args(task=task, depth=depth, num_layers=num_layers, loader_workers=7, type=gnn_type, stop=stopping_criterion, no_activation=True, no_residual=False) if depth in override_params: for key, value in AttrDict(override_params[depth]).items(): args[key] = value train_acc, test_acc, epoch = Experiment(args).run() torch.cuda.empty_cache() results_all_depths[depth] = Results(train_acc=train_acc, test_acc=test_acc, epoch=epoch) print() print(f'Task: {task}') print('depth, train_acc, test_acc, epoch, train_acc, test_acc, epoch,') for depth in range(min_depth, max_depth + 1): res = results_all_depths[depth] print(f'{depth}, {res.train_acc}, {res.test_acc}, {res.epoch}')
33.346154
99
0.632641
import main from common import Task, STOP, GNN_TYPE from attrdict import AttrDict from experiment import Experiment import torch override_params = { 2: {'batch_size': 64, 'eval_every': 1000}, 3: {'batch_size': 64}, 4: {'batch_size': 1024}, 5: {'batch_size': 1024}, 6: {'batch_size': 1024}, 7: {'batch_size': 2048}, 8: {'batch_size': 1024, 'accum_grad': 2}, } class Results: def __init__(self, train_acc, test_acc, epoch): self.train_acc = train_acc self.test_acc = test_acc self.epoch = epoch if __name__ == '__main__': task = Task.DICTIONARY gnn_type = GNN_TYPE.GAT stopping_criterion = STOP.TRAIN min_depth = 2 max_depth = 8 results_all_depths = {} for depth in range(min_depth, max_depth + 1): num_layers = depth + 1 args = main.get_fake_args(task=task, depth=depth, num_layers=num_layers, loader_workers=7, type=gnn_type, stop=stopping_criterion, no_activation=True, no_residual=False) if depth in override_params: for key, value in AttrDict(override_params[depth]).items(): args[key] = value train_acc, test_acc, epoch = Experiment(args).run() torch.cuda.empty_cache() results_all_depths[depth] = Results(train_acc=train_acc, test_acc=test_acc, epoch=epoch) print() print(f'Task: {task}') print('depth, train_acc, test_acc, epoch, train_acc, test_acc, epoch,') for depth in range(min_depth, max_depth + 1): res = results_all_depths[depth] print(f'{depth}, {res.train_acc}, {res.test_acc}, {res.epoch}')
true
true
f72ca4cbe79a2f6143b41e5d9b7ad5d70a93a0a8
884
py
Python
enrich/followthemoney_enrich/cache.py
achievement008/followthemoney
bda06d62c81c82e62cd0c53117d8804939b40f62
[ "MIT" ]
137
2017-10-20T09:36:32.000Z
2022-03-24T18:49:16.000Z
enrich/followthemoney_enrich/cache.py
achievement008/followthemoney
bda06d62c81c82e62cd0c53117d8804939b40f62
[ "MIT" ]
505
2017-10-24T13:14:06.000Z
2022-03-28T20:21:45.000Z
enrich/followthemoney_enrich/cache.py
achievement008/followthemoney
bda06d62c81c82e62cd0c53117d8804939b40f62
[ "MIT" ]
32
2017-12-19T15:22:07.000Z
2022-02-18T11:01:28.000Z
import os import json from redis import Redis from normality import stringify class Cache(object): def get(self, key): return None def has(self, key): return self.get(key) is not None def store(self, key, value): pass class RedisCache(Cache): EXPIRE = 84600 * 90 URL = os.environ.get("ENRICH_REDIS_URL") def __init__(self): self.redis = Redis.from_url(self.URL) def _prefix_key(self, key): return "ftm:enrich:%s" % stringify(key) def store(self, key, value): key = self._prefix_key(key) self.redis.set(key, json.dumps(value), ex=self.EXPIRE) def get(self, key): value = self.redis.get(self._prefix_key(key)) if value is not None: return json.loads(value) def has(self, key): key = self._prefix_key(key) return self.redis.exists(key)
22.1
62
0.61991
import os import json from redis import Redis from normality import stringify class Cache(object): def get(self, key): return None def has(self, key): return self.get(key) is not None def store(self, key, value): pass class RedisCache(Cache): EXPIRE = 84600 * 90 URL = os.environ.get("ENRICH_REDIS_URL") def __init__(self): self.redis = Redis.from_url(self.URL) def _prefix_key(self, key): return "ftm:enrich:%s" % stringify(key) def store(self, key, value): key = self._prefix_key(key) self.redis.set(key, json.dumps(value), ex=self.EXPIRE) def get(self, key): value = self.redis.get(self._prefix_key(key)) if value is not None: return json.loads(value) def has(self, key): key = self._prefix_key(key) return self.redis.exists(key)
true
true
f72ca4f157e0f5d299e44df76de3bb9ba9ff45ad
13,454
py
Python
env/lib/python3.7/encodings/mac_cyrillic.py
JacobMiske/nuclear-database-APIs
bc9fb6afb9aa0d98dde5d744d8f22b2791597e78
[ "MIT" ]
null
null
null
env/lib/python3.7/encodings/mac_cyrillic.py
JacobMiske/nuclear-database-APIs
bc9fb6afb9aa0d98dde5d744d8f22b2791597e78
[ "MIT" ]
null
null
null
env/lib/python3.7/encodings/mac_cyrillic.py
JacobMiske/nuclear-database-APIs
bc9fb6afb9aa0d98dde5d744d8f22b2791597e78
[ "MIT" ]
1
2020-05-01T20:23:35.000Z
2020-05-01T20:23:35.000Z
""" Python Character Mapping Codec mac_cyrillic generated from 'MAPPINGS/VENDORS/APPLE/CYRILLIC.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module src def getregentry(): return codecs.CodecInfo( name='mac-cyrillic', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> CONTROL CHARACTER '\x01' # 0x01 -> CONTROL CHARACTER '\x02' # 0x02 -> CONTROL CHARACTER '\x03' # 0x03 -> CONTROL CHARACTER '\x04' # 0x04 -> CONTROL CHARACTER '\x05' # 0x05 -> CONTROL CHARACTER '\x06' # 0x06 -> CONTROL CHARACTER '\x07' # 0x07 -> CONTROL CHARACTER '\x08' # 0x08 -> CONTROL CHARACTER '\t' # 0x09 -> CONTROL CHARACTER '\n' # 0x0A -> CONTROL CHARACTER '\x0b' # 0x0B -> CONTROL CHARACTER '\x0c' # 0x0C -> CONTROL CHARACTER '\r' # 0x0D -> CONTROL CHARACTER '\x0e' # 0x0E -> CONTROL CHARACTER '\x0f' # 0x0F -> CONTROL CHARACTER '\x10' # 0x10 -> CONTROL CHARACTER '\x11' # 0x11 -> CONTROL CHARACTER '\x12' # 0x12 -> CONTROL CHARACTER '\x13' # 0x13 -> CONTROL CHARACTER '\x14' # 0x14 -> CONTROL CHARACTER '\x15' # 0x15 -> CONTROL CHARACTER '\x16' # 0x16 -> CONTROL CHARACTER '\x17' # 0x17 -> CONTROL CHARACTER '\x18' # 0x18 -> CONTROL CHARACTER '\x19' # 0x19 -> CONTROL CHARACTER '\x1a' # 0x1A -> CONTROL CHARACTER '\x1b' # 0x1B -> CONTROL CHARACTER '\x1c' # 0x1C -> CONTROL CHARACTER '\x1d' # 0x1D -> CONTROL CHARACTER '\x1e' # 0x1E -> CONTROL CHARACTER '\x1f' # 0x1F -> CONTROL CHARACTER ' ' # 0x20 -> SPACE '!' # 0x21 -> EXCLAMATION MARK '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> CONTROL CHARACTER '\u0410' # 0x80 -> CYRILLIC CAPITAL LETTER A '\u0411' # 0x81 -> CYRILLIC CAPITAL LETTER BE '\u0412' # 0x82 -> CYRILLIC CAPITAL LETTER VE '\u0413' # 0x83 -> CYRILLIC CAPITAL LETTER GHE '\u0414' # 0x84 -> CYRILLIC CAPITAL LETTER DE '\u0415' # 0x85 -> CYRILLIC CAPITAL LETTER IE '\u0416' # 0x86 -> CYRILLIC CAPITAL LETTER ZHE '\u0417' # 0x87 -> CYRILLIC CAPITAL LETTER ZE '\u0418' # 0x88 -> CYRILLIC CAPITAL LETTER I '\u0419' # 0x89 -> CYRILLIC CAPITAL LETTER SHORT I '\u041a' # 0x8A -> CYRILLIC CAPITAL LETTER KA '\u041b' # 0x8B -> CYRILLIC CAPITAL LETTER EL '\u041c' # 0x8C -> CYRILLIC CAPITAL LETTER EM '\u041d' # 0x8D -> CYRILLIC CAPITAL LETTER EN '\u041e' # 0x8E -> CYRILLIC CAPITAL LETTER O '\u041f' # 0x8F -> CYRILLIC CAPITAL LETTER PE '\u0420' # 0x90 -> CYRILLIC CAPITAL LETTER ER '\u0421' # 0x91 -> CYRILLIC CAPITAL LETTER ES '\u0422' # 0x92 -> CYRILLIC CAPITAL LETTER TE '\u0423' # 0x93 -> CYRILLIC CAPITAL LETTER U '\u0424' # 0x94 -> CYRILLIC CAPITAL LETTER EF '\u0425' # 0x95 -> CYRILLIC CAPITAL LETTER HA '\u0426' # 0x96 -> CYRILLIC CAPITAL LETTER TSE '\u0427' # 0x97 -> CYRILLIC CAPITAL LETTER CHE '\u0428' # 0x98 -> CYRILLIC CAPITAL LETTER SHA '\u0429' # 0x99 -> CYRILLIC CAPITAL LETTER SHCHA '\u042a' # 0x9A -> CYRILLIC CAPITAL LETTER HARD SIGN '\u042b' # 0x9B -> CYRILLIC CAPITAL LETTER YERU '\u042c' # 0x9C -> CYRILLIC CAPITAL LETTER SOFT SIGN '\u042d' # 0x9D -> CYRILLIC CAPITAL LETTER E '\u042e' # 0x9E -> CYRILLIC CAPITAL LETTER YU '\u042f' # 0x9F -> CYRILLIC CAPITAL LETTER YA '\u2020' # 0xA0 -> DAGGER '\xb0' # 0xA1 -> DEGREE SIGN '\u0490' # 0xA2 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN '\xa3' # 0xA3 -> POUND SIGN '\xa7' # 0xA4 -> SECTION SIGN '\u2022' # 0xA5 -> BULLET '\xb6' # 0xA6 -> PILCROW SIGN '\u0406' # 0xA7 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I '\xae' # 0xA8 -> REGISTERED SIGN '\xa9' # 0xA9 -> COPYRIGHT SIGN '\u2122' # 0xAA -> TRADE MARK SIGN '\u0402' # 0xAB -> CYRILLIC CAPITAL LETTER DJE '\u0452' # 0xAC -> CYRILLIC SMALL LETTER DJE '\u2260' # 0xAD -> NOT EQUAL TO '\u0403' # 0xAE -> CYRILLIC CAPITAL LETTER GJE '\u0453' # 0xAF -> CYRILLIC SMALL LETTER GJE '\u221e' # 0xB0 -> INFINITY '\xb1' # 0xB1 -> PLUS-MINUS SIGN '\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO '\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO '\u0456' # 0xB4 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I '\xb5' # 0xB5 -> MICRO SIGN '\u0491' # 0xB6 -> CYRILLIC SMALL LETTER GHE WITH UPTURN '\u0408' # 0xB7 -> CYRILLIC CAPITAL LETTER JE '\u0404' # 0xB8 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE '\u0454' # 0xB9 -> CYRILLIC SMALL LETTER UKRAINIAN IE '\u0407' # 0xBA -> CYRILLIC CAPITAL LETTER YI '\u0457' # 0xBB -> CYRILLIC SMALL LETTER YI '\u0409' # 0xBC -> CYRILLIC CAPITAL LETTER LJE '\u0459' # 0xBD -> CYRILLIC SMALL LETTER LJE '\u040a' # 0xBE -> CYRILLIC CAPITAL LETTER NJE '\u045a' # 0xBF -> CYRILLIC SMALL LETTER NJE '\u0458' # 0xC0 -> CYRILLIC SMALL LETTER JE '\u0405' # 0xC1 -> CYRILLIC CAPITAL LETTER DZE '\xac' # 0xC2 -> NOT SIGN '\u221a' # 0xC3 -> SQUARE ROOT '\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK '\u2248' # 0xC5 -> ALMOST EQUAL TO '\u2206' # 0xC6 -> INCREMENT '\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS '\xa0' # 0xCA -> NO-BREAK SPACE '\u040b' # 0xCB -> CYRILLIC CAPITAL LETTER TSHE '\u045b' # 0xCC -> CYRILLIC SMALL LETTER TSHE '\u040c' # 0xCD -> CYRILLIC CAPITAL LETTER KJE '\u045c' # 0xCE -> CYRILLIC SMALL LETTER KJE '\u0455' # 0xCF -> CYRILLIC SMALL LETTER DZE '\u2013' # 0xD0 -> EN DASH '\u2014' # 0xD1 -> EM DASH '\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK '\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK '\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK '\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK '\xf7' # 0xD6 -> DIVISION SIGN '\u201e' # 0xD7 -> DOUBLE LOW-9 QUOTATION MARK '\u040e' # 0xD8 -> CYRILLIC CAPITAL LETTER SHORT U '\u045e' # 0xD9 -> CYRILLIC SMALL LETTER SHORT U '\u040f' # 0xDA -> CYRILLIC CAPITAL LETTER DZHE '\u045f' # 0xDB -> CYRILLIC SMALL LETTER DZHE '\u2116' # 0xDC -> NUMERO SIGN '\u0401' # 0xDD -> CYRILLIC CAPITAL LETTER IO '\u0451' # 0xDE -> CYRILLIC SMALL LETTER IO '\u044f' # 0xDF -> CYRILLIC SMALL LETTER YA '\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A '\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE '\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE '\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE '\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE '\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE '\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE '\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE '\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I '\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I '\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA '\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL '\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM '\u043d' # 0xED -> CYRILLIC SMALL LETTER EN '\u043e' # 0xEE -> CYRILLIC SMALL LETTER O '\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE '\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER '\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES '\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE '\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U '\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF '\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA '\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE '\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE '\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA '\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA '\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN '\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU '\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN '\u044d' # 0xFD -> CYRILLIC SMALL LETTER E '\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU '\u20ac' # 0xFF -> EURO SIGN ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
43.681818
118
0.549353
import codecs c): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass nfo( name='mac-cyrillic', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) '\x01' '\x02' '\x03' '\x04' '\x05' '\x06' '\x07' '\x08' '\t' '\n' '\x0b' '\x0c' '\r' '\x0e' '\x0f' '\x10' '\x11' '\x12' '\x13' '\x14' '\x15' '\x16' '\x17' '\x18' '\x19' '\x1a' '\x1b' '\x1c' '\x1d' '\x1e' '\x1f' ' ' '!' '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> CONTROL CHARACTER '\u0410' # 0x80 -> CYRILLIC CAPITAL LETTER A '\u0411' # 0x81 -> CYRILLIC CAPITAL LETTER BE '\u0412' # 0x82 -> CYRILLIC CAPITAL LETTER VE '\u0413' # 0x83 -> CYRILLIC CAPITAL LETTER GHE '\u0414' # 0x84 -> CYRILLIC CAPITAL LETTER DE '\u0415' # 0x85 -> CYRILLIC CAPITAL LETTER IE '\u0416' # 0x86 -> CYRILLIC CAPITAL LETTER ZHE '\u0417' # 0x87 -> CYRILLIC CAPITAL LETTER ZE '\u0418' # 0x88 -> CYRILLIC CAPITAL LETTER I '\u0419' # 0x89 -> CYRILLIC CAPITAL LETTER SHORT I '\u041a' # 0x8A -> CYRILLIC CAPITAL LETTER KA '\u041b' # 0x8B -> CYRILLIC CAPITAL LETTER EL '\u041c' # 0x8C -> CYRILLIC CAPITAL LETTER EM '\u041d' # 0x8D -> CYRILLIC CAPITAL LETTER EN '\u041e' # 0x8E -> CYRILLIC CAPITAL LETTER O '\u041f' # 0x8F -> CYRILLIC CAPITAL LETTER PE '\u0420' # 0x90 -> CYRILLIC CAPITAL LETTER ER '\u0421' # 0x91 -> CYRILLIC CAPITAL LETTER ES '\u0422' # 0x92 -> CYRILLIC CAPITAL LETTER TE '\u0423' # 0x93 -> CYRILLIC CAPITAL LETTER U '\u0424' # 0x94 -> CYRILLIC CAPITAL LETTER EF '\u0425' # 0x95 -> CYRILLIC CAPITAL LETTER HA '\u0426' # 0x96 -> CYRILLIC CAPITAL LETTER TSE '\u0427' # 0x97 -> CYRILLIC CAPITAL LETTER CHE '\u0428' # 0x98 -> CYRILLIC CAPITAL LETTER SHA '\u0429' # 0x99 -> CYRILLIC CAPITAL LETTER SHCHA '\u042a' # 0x9A -> CYRILLIC CAPITAL LETTER HARD SIGN '\u042b' # 0x9B -> CYRILLIC CAPITAL LETTER YERU '\u042c' # 0x9C -> CYRILLIC CAPITAL LETTER SOFT SIGN '\u042d' # 0x9D -> CYRILLIC CAPITAL LETTER E '\u042e' # 0x9E -> CYRILLIC CAPITAL LETTER YU '\u042f' # 0x9F -> CYRILLIC CAPITAL LETTER YA '\u2020' # 0xA0 -> DAGGER '\xb0' # 0xA1 -> DEGREE SIGN '\u0490' # 0xA2 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN '\xa3' # 0xA3 -> POUND SIGN '\xa7' # 0xA4 -> SECTION SIGN '\u2022' # 0xA5 -> BULLET '\xb6' # 0xA6 -> PILCROW SIGN '\u0406' # 0xA7 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I '\xae' # 0xA8 -> REGISTERED SIGN '\xa9' # 0xA9 -> COPYRIGHT SIGN '\u2122' # 0xAA -> TRADE MARK SIGN '\u0402' # 0xAB -> CYRILLIC CAPITAL LETTER DJE '\u0452' # 0xAC -> CYRILLIC SMALL LETTER DJE '\u2260' # 0xAD -> NOT EQUAL TO '\u0403' # 0xAE -> CYRILLIC CAPITAL LETTER GJE '\u0453' # 0xAF -> CYRILLIC SMALL LETTER GJE '\u221e' # 0xB0 -> INFINITY '\xb1' # 0xB1 -> PLUS-MINUS SIGN '\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO '\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO '\u0456' # 0xB4 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I '\xb5' # 0xB5 -> MICRO SIGN '\u0491' # 0xB6 -> CYRILLIC SMALL LETTER GHE WITH UPTURN '\u0408' # 0xB7 -> CYRILLIC CAPITAL LETTER JE '\u0404' # 0xB8 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE '\u0454' # 0xB9 -> CYRILLIC SMALL LETTER UKRAINIAN IE '\u0407' # 0xBA -> CYRILLIC CAPITAL LETTER YI '\u0457' # 0xBB -> CYRILLIC SMALL LETTER YI '\u0409' # 0xBC -> CYRILLIC CAPITAL LETTER LJE '\u0459' # 0xBD -> CYRILLIC SMALL LETTER LJE '\u040a' # 0xBE -> CYRILLIC CAPITAL LETTER NJE '\u045a' # 0xBF -> CYRILLIC SMALL LETTER NJE '\u0458' # 0xC0 -> CYRILLIC SMALL LETTER JE '\u0405' # 0xC1 -> CYRILLIC CAPITAL LETTER DZE '\xac' # 0xC2 -> NOT SIGN '\u221a' # 0xC3 -> SQUARE ROOT '\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK '\u2248' # 0xC5 -> ALMOST EQUAL TO '\u2206' # 0xC6 -> INCREMENT '\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS '\xa0' # 0xCA -> NO-BREAK SPACE '\u040b' # 0xCB -> CYRILLIC CAPITAL LETTER TSHE '\u045b' # 0xCC -> CYRILLIC SMALL LETTER TSHE '\u040c' # 0xCD -> CYRILLIC CAPITAL LETTER KJE '\u045c' # 0xCE -> CYRILLIC SMALL LETTER KJE '\u0455' # 0xCF -> CYRILLIC SMALL LETTER DZE '\u2013' # 0xD0 -> EN DASH '\u2014' # 0xD1 -> EM DASH '\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK '\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK '\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK '\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK '\xf7' # 0xD6 -> DIVISION SIGN '\u201e' # 0xD7 -> DOUBLE LOW-9 QUOTATION MARK '\u040e' # 0xD8 -> CYRILLIC CAPITAL LETTER SHORT U '\u045e' # 0xD9 -> CYRILLIC SMALL LETTER SHORT U '\u040f' # 0xDA -> CYRILLIC CAPITAL LETTER DZHE '\u045f' # 0xDB -> CYRILLIC SMALL LETTER DZHE '\u2116' # 0xDC -> NUMERO SIGN '\u0401' # 0xDD -> CYRILLIC CAPITAL LETTER IO '\u0451' # 0xDE -> CYRILLIC SMALL LETTER IO '\u044f' # 0xDF -> CYRILLIC SMALL LETTER YA '\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A '\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE '\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE '\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE '\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE '\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE '\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE '\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE '\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I '\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I '\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA '\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL '\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM '\u043d' # 0xED -> CYRILLIC SMALL LETTER EN '\u043e' # 0xEE -> CYRILLIC SMALL LETTER O '\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE '\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER '\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES '\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE '\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U '\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF '\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA '\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE '\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE '\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA '\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA '\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN '\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU '\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN '\u044d' # 0xFD -> CYRILLIC SMALL LETTER E '\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU '\u20ac' # 0xFF -> EURO SIGN ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
true
true
f72ca5261e26e28890b2ead99f9ab8ea92310208
9,487
py
Python
test/fb_cases_util.py
savinshynu/turbo_seti
7d756f130af5a323403affcdcb9f9bfa62325836
[ "MIT" ]
33
2017-05-09T03:31:38.000Z
2022-03-26T01:29:35.000Z
test/fb_cases_util.py
savinshynu/turbo_seti
7d756f130af5a323403affcdcb9f9bfa62325836
[ "MIT" ]
284
2018-03-13T13:57:09.000Z
2022-03-30T21:59:34.000Z
test/fb_cases_util.py
savinshynu/turbo_seti
7d756f130af5a323403affcdcb9f9bfa62325836
[ "MIT" ]
116
2017-08-08T17:27:30.000Z
2022-03-24T21:24:40.000Z
r''' Utility functions for test_fb_cases.py ''' from os import mkdir, remove from os.path import dirname from shutil import rmtree import logging import pandas as pd import numpy as np import setigen as stg from turbo_seti.find_doppler.find_doppler import FindDoppler from fb_cases_def import HERE, DEBUGGING, RTOL_DIFF, TestResultRecord, SetigenParms DF_REFERENCE = HERE + '/fb_dat_reference.txt' SEP = r'\s+' def initialize(arg_dir): r''' Recreate working directory, TESTDIR. Load result reference tables (2). ''' rmtree(arg_dir, ignore_errors=True) mkdir(arg_dir) df = pd.read_csv(DF_REFERENCE, sep=SEP, engine='python', comment='#') nrows = len(df) if nrows < 1: raise ValueError('initialize: Empty reference table') if nrows % 2 != 0: raise ValueError('initialize: Reference table row count ({}) is not divisible by 2' .format(nrows)) if DEBUGGING: print('initialize: Test case reference results: \n', df) ref_tophit_1 = [] ref_tophit_2 = [] jj = 0 while jj < nrows: record = TestResultRecord() record.fdir = int(df['fdir'][jj]) record.drsign = int(df['drsign'][jj]) record.tophit_id = int(df['tophit'][jj]) record.drate = float(df['drate'][jj]) record.snr = float(df['snr'][jj]) record.freq = float(df['freq'][jj]) record.index = int(df['index'][jj]) ref_tophit_1.append(record) if DEBUGGING: print('initialize: appended for hit_1:\n', record.to_string() ) jj += 1 del record record = TestResultRecord() record.fdir = int(df['fdir'][jj]) record.drsign = int(df['drsign'][jj]) record.tophit_id = int(df['tophit'][jj]) record.drate = float(df['drate'][jj]) record.snr = float(df['snr'][jj]) record.freq = float(df['freq'][jj]) record.index = int(df['index'][jj]) ref_tophit_2.append(record) if DEBUGGING: print('initialize: appended for hit_2:\n', record.to_string() ) jj += 1 if DEBUGGING: print('initialize: {} test cases loaded.'.format(len(ref_tophit_1))) return ref_tophit_1, ref_tophit_2 def generate_fil_file(outpath, flag_fascending, flag_sign_drift_rate): r''' Using setigen, generate a filterbank file. Parameters: outpath - full path of where to store the resultant filterbank file. flag_fascending - use an ascending (+1) or descending (-1) sequence of frequencies flag_sign_drift_rate - use a positive (+1) or negative (-1) drift rate ''' if DEBUGGING: print('generate_fil_file: flag_fascending={}, flag_sign_drift_rate={}' .format(flag_fascending, flag_sign_drift_rate)) # Set up setigne parameters stg_parms = SetigenParms() if flag_sign_drift_rate < 0: stg_parms.drift_rate_1 = -stg_parms.drift_rate_1 stg_parms.drift_rate_2 = -stg_parms.drift_rate_2 stg_parms.drift_rate_3 = -stg_parms.drift_rate_3 stg_parms.drift_rate_4 = -stg_parms.drift_rate_4 stg_parms.drift_rate_5 = -stg_parms.drift_rate_5 # Instantiate a setigen Frame object frame = stg.Frame(fchans=stg_parms.fchans, tchans=stg_parms.tchans, df=stg_parms.df, dt=stg_parms.dt, fch1=stg_parms.fch1, ascending=(flag_fascending > 0)) # Add noise to stg object. frame.add_noise(x_mean=0, x_std=stg_parms.noise_std, noise_type='gaussian') # Signal 1 will be detected. signal_1_intensity = frame.get_intensity(snr=stg_parms.snr_1) frame.add_constant_signal(f_start=frame.get_frequency(stg_parms.signal_start_1), drift_rate=stg_parms.drift_rate_1, level=signal_1_intensity, width=stg_parms.width_1, f_profile_type='gaussian') # Signal 2 will be detected. signal_2_intensity = frame.get_intensity(snr=stg_parms.snr_2) frame.add_constant_signal(f_start=frame.get_frequency(stg_parms.signal_start_2), drift_rate=stg_parms.drift_rate_2, level=signal_2_intensity, width=stg_parms.width_2, f_profile_type='gaussian') # Signal 3 is a symmetric signal with three Gaussians # that will fall below the SNR requirements. signal_3_intensity = frame.get_intensity(snr=stg_parms.snr_3) frame.add_signal(stg.constant_path(f_start=frame.get_frequency(stg_parms.signal_start_3), drift_rate=stg_parms.drift_rate_3), stg.constant_t_profile(level=1), stg.multiple_gaussian_f_profile(width=stg_parms.width_3), stg.constant_bp_profile(level=signal_3_intensity)) # Signal 4 is a symmetric signal with three Gaussians # that will be drifting too quickly. signal_4_intensity = frame.get_intensity(snr=stg_parms.snr_4) frame.add_signal(stg.constant_path(f_start=frame.get_frequency(stg_parms.signal_start_4), drift_rate=stg_parms.drift_rate_4), stg.constant_t_profile(level=1), stg.multiple_gaussian_f_profile(width=stg_parms.width_4), stg.constant_bp_profile(level=signal_4_intensity)) # Signal 5 is similar to signal 4 but drifting in the opposite direction. signal_5_intensity = frame.get_intensity(snr=stg_parms.snr_5) frame.add_signal(stg.constant_path(f_start=frame.get_frequency(stg_parms.signal_start_5), drift_rate=stg_parms.drift_rate_5), stg.constant_t_profile(level=1), stg.multiple_gaussian_f_profile(width=stg_parms.width_5), stg.constant_bp_profile(level=signal_5_intensity)) # Save the frame as a filterbank file. frame.save_fil(filename=outpath) print("generate_fil_file: generated {}".format(outpath)) del frame def make_one_dat_file(arg_path_fil, min_drift=0.0, max_drift=4.0, min_snr=25.0, remove_h5=True): r''' Make a single DAT file: * Instantiate the FindDoppler class object. * With the object, search the H5, creating the DAT file and a LOG file (not used). ''' if max_drift is None: raise ValueError('make_one_dat_file: max_drift not set') woutdir = dirname(arg_path_fil) fdop = FindDoppler(datafile=arg_path_fil, min_drift=min_drift, max_drift=max_drift, snr=min_snr, log_level_int=logging.WARNING, out_dir=woutdir) fdop.search() path_h5_file = arg_path_fil.replace('.fil', '.h5') if remove_h5: remove(path_h5_file) def get_case_results(arg_path_dat): r'''From the DAT file, extract the data for all top hits.''' df = pd.read_csv(arg_path_dat, header=None, sep=SEP, engine='python', comment='#') nrows = len(df) if nrows != 2: raise ValueError('get_case_results: Expected 2 rows in DAT but observed {} rows' .format(nrows)) obs_tophit_1 = TestResultRecord() obs_tophit_1.tophit_id = int(df[0][0]) # 1st col, 1st row obs_tophit_1.drate = float(df[1][0]) obs_tophit_1.snr = float(df[2][0]) obs_tophit_1.freq = float(df[4][0]) obs_tophit_1.index = int(df[5][0]) obs_tophit_2 = TestResultRecord() obs_tophit_2.tophit_id = int(df[0][1]) # 1st col, 2nd row obs_tophit_2.drate = float(df[1][1]) obs_tophit_2.snr = float(df[2][1]) obs_tophit_2.freq = float(df[4][1]) obs_tophit_2.index = int(df[5][1]) return obs_tophit_1, obs_tophit_2 def case_comparison(obs_tophit, ref_tophit, max_drift): r'''Compare DAT file observations to the reference.''' if obs_tophit is None: if ref_tophit is None: return # success, both None # ref_tophit defined, obs_tophit is None raise ValueError('case_comparison: FAILED, max_drift={}\nobs_tophit is None\nref_tophit:::{}' .format(max_drift, ref_tophit.to_string())) if ref_tophit is None: # obs_tophit defined, ref_tophit is None raise ValueError('case_comparison: FAILED, max_drift={}\nref_tophit is None\nobs_tophit:::{}' .format(max_drift, obs_tophit.to_string())) if obs_tophit.tophit_id == ref_tophit.tophit_id \ and np.isclose(obs_tophit.drate, ref_tophit.drate, rtol=RTOL_DIFF) \ and np.isclose(obs_tophit.snr, ref_tophit.snr, rtol=RTOL_DIFF) \ and np.isclose(obs_tophit.freq, ref_tophit.freq, rtol=RTOL_DIFF) \ and obs_tophit.index == ref_tophit.index: return # success # Some field(s) did not compare correctly. raise ValueError('case_comparison: FAILED, max_drift={}\nobs_tophit:::{}\nref_tophit:::{}' .format(max_drift, obs_tophit.to_string(), ref_tophit.to_string())) if __name__ == '__main__': # __main__ is a developer unit test, not normally to be executed. from fb_cases_def import TESTDIR, PATH_FIL_FILE, MIN_SNR rmtree(TESTDIR, ignore_errors=True) mkdir(TESTDIR) generate_fil_file(PATH_FIL_FILE, -1, -1) make_one_dat_file(PATH_FIL_FILE, max_drift=5, min_snr=MIN_SNR)
41.792952
101
0.64288
from os import mkdir, remove from os.path import dirname from shutil import rmtree import logging import pandas as pd import numpy as np import setigen as stg from turbo_seti.find_doppler.find_doppler import FindDoppler from fb_cases_def import HERE, DEBUGGING, RTOL_DIFF, TestResultRecord, SetigenParms DF_REFERENCE = HERE + '/fb_dat_reference.txt' SEP = r'\s+' def initialize(arg_dir): rmtree(arg_dir, ignore_errors=True) mkdir(arg_dir) df = pd.read_csv(DF_REFERENCE, sep=SEP, engine='python', comment='#') nrows = len(df) if nrows < 1: raise ValueError('initialize: Empty reference table') if nrows % 2 != 0: raise ValueError('initialize: Reference table row count ({}) is not divisible by 2' .format(nrows)) if DEBUGGING: print('initialize: Test case reference results: \n', df) ref_tophit_1 = [] ref_tophit_2 = [] jj = 0 while jj < nrows: record = TestResultRecord() record.fdir = int(df['fdir'][jj]) record.drsign = int(df['drsign'][jj]) record.tophit_id = int(df['tophit'][jj]) record.drate = float(df['drate'][jj]) record.snr = float(df['snr'][jj]) record.freq = float(df['freq'][jj]) record.index = int(df['index'][jj]) ref_tophit_1.append(record) if DEBUGGING: print('initialize: appended for hit_1:\n', record.to_string() ) jj += 1 del record record = TestResultRecord() record.fdir = int(df['fdir'][jj]) record.drsign = int(df['drsign'][jj]) record.tophit_id = int(df['tophit'][jj]) record.drate = float(df['drate'][jj]) record.snr = float(df['snr'][jj]) record.freq = float(df['freq'][jj]) record.index = int(df['index'][jj]) ref_tophit_2.append(record) if DEBUGGING: print('initialize: appended for hit_2:\n', record.to_string() ) jj += 1 if DEBUGGING: print('initialize: {} test cases loaded.'.format(len(ref_tophit_1))) return ref_tophit_1, ref_tophit_2 def generate_fil_file(outpath, flag_fascending, flag_sign_drift_rate): if DEBUGGING: print('generate_fil_file: flag_fascending={}, flag_sign_drift_rate={}' .format(flag_fascending, flag_sign_drift_rate)) stg_parms = SetigenParms() if flag_sign_drift_rate < 0: stg_parms.drift_rate_1 = -stg_parms.drift_rate_1 stg_parms.drift_rate_2 = -stg_parms.drift_rate_2 stg_parms.drift_rate_3 = -stg_parms.drift_rate_3 stg_parms.drift_rate_4 = -stg_parms.drift_rate_4 stg_parms.drift_rate_5 = -stg_parms.drift_rate_5 frame = stg.Frame(fchans=stg_parms.fchans, tchans=stg_parms.tchans, df=stg_parms.df, dt=stg_parms.dt, fch1=stg_parms.fch1, ascending=(flag_fascending > 0)) frame.add_noise(x_mean=0, x_std=stg_parms.noise_std, noise_type='gaussian') signal_1_intensity = frame.get_intensity(snr=stg_parms.snr_1) frame.add_constant_signal(f_start=frame.get_frequency(stg_parms.signal_start_1), drift_rate=stg_parms.drift_rate_1, level=signal_1_intensity, width=stg_parms.width_1, f_profile_type='gaussian') signal_2_intensity = frame.get_intensity(snr=stg_parms.snr_2) frame.add_constant_signal(f_start=frame.get_frequency(stg_parms.signal_start_2), drift_rate=stg_parms.drift_rate_2, level=signal_2_intensity, width=stg_parms.width_2, f_profile_type='gaussian') signal_3_intensity = frame.get_intensity(snr=stg_parms.snr_3) frame.add_signal(stg.constant_path(f_start=frame.get_frequency(stg_parms.signal_start_3), drift_rate=stg_parms.drift_rate_3), stg.constant_t_profile(level=1), stg.multiple_gaussian_f_profile(width=stg_parms.width_3), stg.constant_bp_profile(level=signal_3_intensity)) signal_4_intensity = frame.get_intensity(snr=stg_parms.snr_4) frame.add_signal(stg.constant_path(f_start=frame.get_frequency(stg_parms.signal_start_4), drift_rate=stg_parms.drift_rate_4), stg.constant_t_profile(level=1), stg.multiple_gaussian_f_profile(width=stg_parms.width_4), stg.constant_bp_profile(level=signal_4_intensity)) signal_5_intensity = frame.get_intensity(snr=stg_parms.snr_5) frame.add_signal(stg.constant_path(f_start=frame.get_frequency(stg_parms.signal_start_5), drift_rate=stg_parms.drift_rate_5), stg.constant_t_profile(level=1), stg.multiple_gaussian_f_profile(width=stg_parms.width_5), stg.constant_bp_profile(level=signal_5_intensity)) frame.save_fil(filename=outpath) print("generate_fil_file: generated {}".format(outpath)) del frame def make_one_dat_file(arg_path_fil, min_drift=0.0, max_drift=4.0, min_snr=25.0, remove_h5=True): if max_drift is None: raise ValueError('make_one_dat_file: max_drift not set') woutdir = dirname(arg_path_fil) fdop = FindDoppler(datafile=arg_path_fil, min_drift=min_drift, max_drift=max_drift, snr=min_snr, log_level_int=logging.WARNING, out_dir=woutdir) fdop.search() path_h5_file = arg_path_fil.replace('.fil', '.h5') if remove_h5: remove(path_h5_file) def get_case_results(arg_path_dat): df = pd.read_csv(arg_path_dat, header=None, sep=SEP, engine='python', comment='#') nrows = len(df) if nrows != 2: raise ValueError('get_case_results: Expected 2 rows in DAT but observed {} rows' .format(nrows)) obs_tophit_1 = TestResultRecord() obs_tophit_1.tophit_id = int(df[0][0]) obs_tophit_1.drate = float(df[1][0]) obs_tophit_1.snr = float(df[2][0]) obs_tophit_1.freq = float(df[4][0]) obs_tophit_1.index = int(df[5][0]) obs_tophit_2 = TestResultRecord() obs_tophit_2.tophit_id = int(df[0][1]) obs_tophit_2.drate = float(df[1][1]) obs_tophit_2.snr = float(df[2][1]) obs_tophit_2.freq = float(df[4][1]) obs_tophit_2.index = int(df[5][1]) return obs_tophit_1, obs_tophit_2 def case_comparison(obs_tophit, ref_tophit, max_drift): if obs_tophit is None: if ref_tophit is None: return raise ValueError('case_comparison: FAILED, max_drift={}\nobs_tophit is None\nref_tophit:::{}' .format(max_drift, ref_tophit.to_string())) if ref_tophit is None: raise ValueError('case_comparison: FAILED, max_drift={}\nref_tophit is None\nobs_tophit:::{}' .format(max_drift, obs_tophit.to_string())) if obs_tophit.tophit_id == ref_tophit.tophit_id \ and np.isclose(obs_tophit.drate, ref_tophit.drate, rtol=RTOL_DIFF) \ and np.isclose(obs_tophit.snr, ref_tophit.snr, rtol=RTOL_DIFF) \ and np.isclose(obs_tophit.freq, ref_tophit.freq, rtol=RTOL_DIFF) \ and obs_tophit.index == ref_tophit.index: return raise ValueError('case_comparison: FAILED, max_drift={}\nobs_tophit:::{}\nref_tophit:::{}' .format(max_drift, obs_tophit.to_string(), ref_tophit.to_string())) if __name__ == '__main__': from fb_cases_def import TESTDIR, PATH_FIL_FILE, MIN_SNR rmtree(TESTDIR, ignore_errors=True) mkdir(TESTDIR) generate_fil_file(PATH_FIL_FILE, -1, -1) make_one_dat_file(PATH_FIL_FILE, max_drift=5, min_snr=MIN_SNR)
true
true
f72ca647ec6c0d280fd1a1ba4d668d4a17a782b2
5,517
py
Python
backend/course/migrations/0001_initial.py
crowdbotics-apps/utawala-main-altar-29305
f450b7e301bc63a8400e7a9b0e39f4b7f931e2fd
[ "FTL", "AML", "RSA-MD" ]
null
null
null
backend/course/migrations/0001_initial.py
crowdbotics-apps/utawala-main-altar-29305
f450b7e301bc63a8400e7a9b0e39f4b7f931e2fd
[ "FTL", "AML", "RSA-MD" ]
null
null
null
backend/course/migrations/0001_initial.py
crowdbotics-apps/utawala-main-altar-29305
f450b7e301bc63a8400e7a9b0e39f4b7f931e2fd
[ "FTL", "AML", "RSA-MD" ]
null
null
null
# Generated by Django 2.2.24 on 2021-07-31 08:35 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=256)), ], ), migrations.CreateModel( name='Course', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(blank=True, max_length=256, null=True)), ('description', models.TextField(blank=True, null=True)), ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='course_author', to=settings.AUTH_USER_MODEL)), ('categories', models.ManyToManyField(blank=True, related_name='course_categories', to='course.Category')), ], ), migrations.CreateModel( name='Event', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=256)), ('date', models.DateTimeField()), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='event_user', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=256)), ], ), migrations.CreateModel( name='SubscriptionType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=256)), ], ), migrations.CreateModel( name='Subscription', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('subscription_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subscription_subscription_type', to='course.SubscriptionType')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subscription_user', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Recording', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('media', models.URLField()), ('published', models.DateTimeField()), ('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recording_event', to='course.Event')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recording_user', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='PaymentMethod', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('primary', models.BooleanField()), ('token', models.CharField(max_length=256)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='paymentmethod_user', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Module', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=256)), ('description', models.TextField()), ('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='module_course', to='course.Course')), ], ), migrations.CreateModel( name='Lesson', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=256)), ('description', models.TextField()), ('media', models.URLField()), ('module', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lesson_module', to='course.Module')), ], ), migrations.CreateModel( name='Enrollment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='enrollment_course', to='course.Course')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='enrollment_user', to=settings.AUTH_USER_MODEL)), ], ), ]
49.258929
179
0.595251
from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=256)), ], ), migrations.CreateModel( name='Course', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(blank=True, max_length=256, null=True)), ('description', models.TextField(blank=True, null=True)), ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='course_author', to=settings.AUTH_USER_MODEL)), ('categories', models.ManyToManyField(blank=True, related_name='course_categories', to='course.Category')), ], ), migrations.CreateModel( name='Event', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=256)), ('date', models.DateTimeField()), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='event_user', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=256)), ], ), migrations.CreateModel( name='SubscriptionType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=256)), ], ), migrations.CreateModel( name='Subscription', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('subscription_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subscription_subscription_type', to='course.SubscriptionType')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subscription_user', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Recording', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('media', models.URLField()), ('published', models.DateTimeField()), ('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recording_event', to='course.Event')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recording_user', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='PaymentMethod', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('primary', models.BooleanField()), ('token', models.CharField(max_length=256)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='paymentmethod_user', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Module', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=256)), ('description', models.TextField()), ('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='module_course', to='course.Course')), ], ), migrations.CreateModel( name='Lesson', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=256)), ('description', models.TextField()), ('media', models.URLField()), ('module', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lesson_module', to='course.Module')), ], ), migrations.CreateModel( name='Enrollment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='enrollment_course', to='course.Course')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='enrollment_user', to=settings.AUTH_USER_MODEL)), ], ), ]
true
true
f72ca651974209e177cd6e0b852ffe740ce4bc1b
57,027
py
Python
tensorflow/python/ipu/utils.py
DebeshJha/tensorflow-1
2b5a225c49d25273532d11c424d37ce394d7579a
[ "Apache-2.0" ]
2
2021-03-08T23:32:06.000Z
2022-01-13T03:43:49.000Z
tensorflow/python/ipu/utils.py
DebeshJha/tensorflow-1
2b5a225c49d25273532d11c424d37ce394d7579a
[ "Apache-2.0" ]
null
null
null
tensorflow/python/ipu/utils.py
DebeshJha/tensorflow-1
2b5a225c49d25273532d11c424d37ce394d7579a
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """ General utilities ~~~~~~~~~~~~~~~~~ """ import collections from enum import Enum import os import time import numpy as np from tensorflow.compiler.plugin.poplar.driver.config_pb2 import IpuOptions from tensorflow.compiler.plugin.poplar.driver.trace_pb2 import IpuTraceEvent from tensorflow.compiler.plugin.poplar.driver import config_pb2 from tensorflow.compiler.plugin.poplar.ops import gen_ipu_ops # pylint: disable=unused-import # These imports are only here to make it easier for the Tensorflow Wheel users # to use these functions: # ``` # from tensorflow.python import ipu # ... # ipu.utils.export_variables_from_live_session(...) # ``` from tensorflow.compiler.plugin.poplar.tools.tensorflow_weights_extractor import ( export_variables_from_live_session, export_variables_from_live_model, import_data_in_live_session, import_data_in_live_model) # pylint: enable=unused-import from tensorflow.compat.v1 import executing_eagerly from tensorflow.core.framework import attr_value_pb2 from tensorflow.python.client import session as session_lib from tensorflow.python.distribute import values from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.util import deprecation from tensorflow.python.data.ops import dataset_ops from tensorflow.python.ipu import ipu_infeed_queue from tensorflow.python.ipu import dataset_extractor class SelectionOrder(Enum): """Depending on the communication pattern of the model, the order in which the IPUs are selected and mapped to shards can impact the performance. For example, given a model which executes on multiple IPUs: .. code-block:: python def sharded_graph(pa, pb, pc, pd): with ipu.scopes.ipu_shard(0): o1 = pa + pb with ipu.scopes.ipu_shard(1): o2 = o1 + pc with ipu.scopes.ipu_shard(2): o3 = o2 + pd return o3 and a typical machine with 8 Graphcore C2 cards: .. code-block:: none _______ _______ | | | | | 14 |=============| 15 | |_______| |_______| || || _______ _______ | | | | | 12 |=============| 13 | |_______| |_______| || || _______ _______ | | | | | 10 |=============| 11 | |_______| |_______| || || _______ _______ | | | | | 8 |=============| 9 | |_______| |_______| || || _______ _______ | | | | | 6 |=============| 7 | |_______| |_______| || || _______ _______ | | | | | 4 |=============| 5 | |_______| |_______| || || _______ _______ | | | | | 2 |=============| 3 | |_______| |_______| || || _______ _______ | | | | | 0 |=============| 1 | |_______| |_______| (where each numbered square represents an IPU with the given device ID and the == and || connections represent IPUs being directly connected via IPU-Links) we can see that the `ipu_shard(0)` directly communicates with `ipu_shard(1)` and that `ipu_shard(1)` directly communicates with `ipu_shard(2)`. If the shards 0, 1, 2 were mapped to IPUs 0, 1, 2 in that order, then the communication between shards 1 and 2 would not have a direct connection via an IPU-Link and would have to perform a "hop" via an IPU. If the shards 0, 1, 2 were mapped to IPUs 0, 1, 3 in that order, then the communication between shards 1 and 2 would have a direct connection via an IPU-Link which will reduce the communication cost. This Enum class is used to control the order in which the IPUs are selected. Currently, the following IPU selection orderings are supported: * `AUTO`: automatically try and select the best selection given the network. * `ZIGZAG`: follow the natural ordering of IPUs. In the above example, the IPUs would be selected in the following order: `0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15`. * `SNAKE`: select IPUs such that each consecutive shard is directly connected via IPU-Links to the shard before and after. In the above example, the IPUs would be selected in the following order: `0, 1, 3, 2, 4, 5, 7, 6, 8, 9, 11, 10, 12, 13, 15, 14`. * `HOOF`: select IPUs such that each consecutive shard is directly connected via IPU-Links to the shard before and after and the last and first shard are on the same C2 cards. In the above example, the IPUs would be selected in the following order: `0, 2, 4, 6, 8, 10, 12, 14, 15, 13, 11, 9, 7, 5, 3, 1`. The `SNAKE` and `HOOF` IPU selection orders are particularly beneficial for pipelined models. """ AUTO = config_pb2.IpuSelectionOrder.Value("AUTO") ZIGZAG = config_pb2.IpuSelectionOrder.Value("ZIGZAG") SNAKE = config_pb2.IpuSelectionOrder.Value("SNAKE") HOOF = config_pb2.IpuSelectionOrder.Value("HOOF") class ExecutionProfileType(Enum): """The execution profile type indicates the desired information in the execution profile. * `NO_PROFILE` indicates that there should be no execution profiling. * `DEVICE_PROFILE` indicates that the execution profile should contain only device wide events. * `IPU_PROFILE` indicates that the profile should contain IPU level execution events. * `TILE_PROFILE` indicates that the profile should contain Tile level execution events. """ NO_PROFILE = config_pb2.IpuExecutionProfileType.Value("NO_PROFILE") DEVICE_PROFILE = config_pb2.IpuExecutionProfileType.Value("DEVICE_PROFILE") IPU_PROFILE = config_pb2.IpuExecutionProfileType.Value("IPU_PROFILE") TILE_PROFILE = config_pb2.IpuExecutionProfileType.Value("TILE_PROFILE") class DeviceConnectionType(Enum): """Enumeration to describe the mechanism used to attach to the Poplar device. * `ALWAYS` indicates that the system will attach when configuring the device. * `ON_DEMAND` will defer connection to when the IPU is needed. * `NEVER` will never try to attach to a device. Used when compiling offline. """ ALWAYS = config_pb2.IpuDeviceConnectionType.Value("ALWAYS") ON_DEMAND = config_pb2.IpuDeviceConnectionType.Value("ON_DEMAND") NEVER = config_pb2.IpuDeviceConnectionType.Value("NEVER") def configure_ipu_system(config, device="cpu"): """Configure an IPU system. Passing an IpuOptions protobuf created by the ``create_ipu_config`` function. Args: config: An IpuOptions configuration protobuf device: The CPU device which is local to the IPU hardware Returns: None """ if not isinstance(config, config_pb2.IpuOptions): raise Exception("`config` must be an IpuOptions instance") g = ops.Graph() with g.as_default(): with ops.device(device): cfg_op = gen_ipu_ops.ipu_configure_hardware(config.SerializeToString()) with session_lib.Session(graph=g) as sess: sess.run(cfg_op) def get_ipu_config(session=None): """Get the configuration of an IPU system. Args: session: An optional session on which to execute. Returns: A list of IpuOption instances, one for each PoplarExecutor. """ configurations = None # Get the serialized output. if executing_eagerly(): assert not session, "No session is required for eager execution." configurations = gen_ipu_ops.ipu_get_configuration().numpy() else: s = session if session else session_lib.Session() configurations = s.run(gen_ipu_ops.ipu_get_configuration()) # Deserialize and determine if a valid config exists, # i.e. user has succesfully called ipu_configure_hardware. deserialized = [] valid = False for conf in configurations: # Deserialize. opt = IpuOptions() opt.ParseFromString(conf) deserialized.append(opt) valid |= len(opt.device_config) > 0 if not valid: raise RuntimeError("No IPU devices configured.") return deserialized def get_num_of_ipus_in_device(ipu_device, device="cpu"): """Get the number of physical IPUs Args: ipu_device: The IPU device for which to get the number of devices for. device: The CPU device which is local to the IPU hardware. Returns: A number of physical IPUs configured for a particular TF device. """ g = ops.Graph() with g.as_default(): with ops.device(device): cfg_op = gen_ipu_ops.ipu_get_num_devices(ipu_device) with session_lib.Session(graph=g) as sess: return sess.run(cfg_op) def running_on_ipu_model(): """ Check if XLA is configured to run on the ipu model. Returns: True if XLA is configured to run on the ipu model. False if XLA is configured to run on real hardware. """ return "--use_ipu_model" in os.environ.get("TF_POPLAR_FLAGS", "") @deprecation.deprecated_args(None, "Use set_optimization_options() instead.", "max_cross_replica_sum_buffer_size", "max_inter_ipu_copies_buffer_size") def create_ipu_config(profiling=False, enable_ipu_events=False, use_poplar_text_report=False, use_poplar_cbor_report=False, profile_execution=None, enable_poplar_serialized_graph=False, report_every_nth_execution=0, max_report_size=0x10000000, report_directory="", scheduler_selection="", always_rearrange_copies_on_the_host=False, merge_infeed_io_copies=False, disable_graph_convolution_caching=False, disable_graph_outlining=False, retain_control_dependencies=False, max_cross_replica_sum_buffer_size=0, max_inter_ipu_copies_buffer_size=0, max_scheduler_lookahead_depth=5, max_scheduler_search_space_size=64, prefetch_data_streams=True, selection_order=None, enable_experimental_remote_buffer_embedding=False): """Create an empty IPU session configuration structure. Args: profiling: Enable compilation reports, and IPU trace events. enable_ipu_events: Enable IPU trace events without poplar reports. use_poplar_text_report: Enable the Poplar textual report summary. use_poplar_cbor_report: Enable the Poplar CBOR reports. profile_execution: Include Poplar execution profiles in the execution events. Can only be enabled if `profiling` is also enabled. If set, can be `True`, 'False`, or a member of the `ExecutionProfileType` enumeration. A `True` value indicates `ExecutionProfileType.DEVICE_PROFILE`. enable_poplar_serialized_graph: Create the Poplar serialized graph and include in the IPU compilation trace events. report_every_nth_execution: Only produce an execution report on every Nth execution. 0 = One report only. max_report_size: The maximum size of Poplar profiles to include in the profile events. report_directory: When set, reports will be written to files in this directory, instead of being written into the events. The events will contain the full paths of the report files. scheduler_selection: When set, this forces the compiler to use a specific scheduler when ordering the instructions. See the documentation for a list of valid schedulers. always_rearrange_copies_on_the_host: *** Experimental Flag *** The data which is streamed to/from the device might be stored in different layouts on the device and on the host. If that is the case the rearrangment is performed on the device by default. By enabling this option the rearrangment will be performed on the host at the expense of latency. merge_infeed_io_copies: When true, this flag will merge the streamed host->device input copies into one larger copy. This may reduce the time to copy data from the host, at the expense of increasing the live tensor memory on the device. disable_graph_convolution_caching: By default, the convolution operation searches for an equivalent cached operation, and uses this instead of creating a new convolution. Setting this flag forces the creation of a new convolution. This can improve runtime at the expense of graph size. disable_graph_outlining: By default, some operations, such as matrix multiplications, which occur in the graph multiple times but with different input tensors might be optimised to reduce the total code size of the graph at the expense of the execution time. Setting this flag will disable these optimisations. This option is not valid for the convolution operation (also see disable_graph_convolution_caching) retain_control_dependencies: Deprecated. max_cross_replica_sum_buffer_size: The maximum number of bytes that can be waiting before a cross replica sum op is scheduled. max_inter_ipu_copies_buffer_size: The maximum number of bytes that can be waiting before a inter IPU copy between IPUs is scheduled. max_scheduler_lookahead_depth: The maximum distance to look into the future when considering valid schedules. max_scheduler_search_space_size: The maximum number of nodes to consider when building the tree of future schedules. prefetch_data_streams: When set to true, the prefetching of data for data streams on the host will be overlapped with execution on the IPU. selection_order: the order in which IPUs are selected and mapped to physical IPU devices when using a multi-IPU devices (see `SelectionOrder`). When not specified, then automatic selection order is used, otherwise an instance of `SelectionOrder`. enable_experimental_remote_buffer_embedding: When set to true, `HostEmbedding` will make use of poplar remote buffers. Returns: An IpuOptions configuration protobuf, suitable for passing to ``configure_ipu_system`` """ if profiling and enable_ipu_events: raise Exception( "`profiling` and `enable_ipu_events` are mutually exclusive") if retain_control_dependencies: raise Exception("`retain_control_dependencies` is deprecated") selection_order = selection_order if selection_order else SelectionOrder.AUTO profile_execution = profile_execution if profile_execution \ else ExecutionProfileType.NO_PROFILE if isinstance(profile_execution, (np.bool_, bool)): if profile_execution: profile_execution = ExecutionProfileType.DEVICE_PROFILE else: profile_execution = ExecutionProfileType.NO_PROFILE if (profile_execution != ExecutionProfileType.NO_PROFILE and not profiling): raise Exception("`profiling` is required when `profile_execution` is set") if not isinstance(profile_execution, ExecutionProfileType): raise Exception("`profile_execution` must be True, False, or an " "ExecutionProfileType instance") opts = config_pb2.IpuOptions() # Default initialize IpuOptions() attributes here. opts.creator_id = config_pb2.IpuOptionsCreator.IPU_UTILS opts.ipu_model_config.compile_ipu_code = True opts.enable_multi_slice_combiner = False opts.enable_matmul_combiner = False opts.enable_gather_simplifier = False opts.device_connection_type = DeviceConnectionType.ALWAYS.value opts.speed_size_config.allow_recompute = False # Configure IpuOptions according to the passed arguments. opts.profiling.enable_ipu_trace_events = profiling or enable_ipu_events opts.profiling.enable_compilation_trace = profiling opts.profiling.enable_io_trace = profiling opts.profiling.execution_trace_type = profile_execution.value opts.profiling.enable_poplar_reports_text = use_poplar_text_report opts.profiling.enable_poplar_reports_cbor = use_poplar_cbor_report opts.profiling.enable_poplar_graph = enable_poplar_serialized_graph opts.profiling.report_every_nth_execution = report_every_nth_execution opts.profiling.max_report_size = max_report_size opts.profiling.report_directory = report_directory opts.speed_size_config.always_rearrange_copies_on_the_host = \ always_rearrange_copies_on_the_host opts.speed_size_config.merge_infeed_io_copies = merge_infeed_io_copies opts.speed_size_config.disable_graph_convolution_caching = \ disable_graph_convolution_caching opts.speed_size_config.disable_graph_outlining = \ disable_graph_outlining opts.speed_size_config.scheduler_selection = scheduler_selection opts.max_cross_replica_sum_buffer_size = max_cross_replica_sum_buffer_size opts.max_inter_ipu_copies_buffer_size = max_inter_ipu_copies_buffer_size opts.max_scheduler_lookahead_depth = max_scheduler_lookahead_depth opts.max_scheduler_search_space_size = max_scheduler_search_space_size opts.prefetch_data_streams = prefetch_data_streams opts.selection_order = selection_order.value opts.verified_transfers.enabled = False opts = set_verification_options(opts, VerificationOptions()) opts.enable_experimental_remote_buffer_embedding = \ enable_experimental_remote_buffer_embedding return opts def set_serialization_options(opts, output_folder=""): """ Enable / disable the serialization to disk of the compiled executables. .. code-block:: python # Create a device that will save to disk all the compiled executables. opts = create_ipu_config() opts = set_serialization_options(opts, output_folder="/tmp/my_network") ipu.utils.configure_ipu_system(opts) with tf.Session() as s: ... Args: output_folder: Where to save the compiled executables. Set to "" to disable serialization. Returns: The IpuOptions configuration protobuf. """ opts.serialization_folder = output_folder return opts def set_optimization_options(opts, combine_embedding_lookups=False, combine_matmuls=False, max_cross_replica_sum_buffer_size=0, max_reduce_scatter_buffer_size=0, max_inter_ipu_copies_buffer_size=0, max_send_recv_cluster_size=0, gather_simplifier=False, triangular_solve_expander_block_size=0): """Set the IPU options related to performance / optimizations. .. code-block:: python # Create a device with fusion for multiSlices sharing the same input # enabled. opts = create_ipu_config() opts = set_optimization_options(opts, combine_embedding_lookups=True) ipu.utils.configure_ipu_system(opts) with tf.Session() as s: ... Args: combine_embedding_lookups: Fuse embedding lookups on the same tensor. This might improve performance but increase memory usage. combine_matmuls: Fuse matmul operations if they share the same weights or the same input. max_cross_replica_sum_buffer_size: The maximum number of bytes that can be waiting before a cross replica sum op is scheduled. max_reduce_scatter_buffer_size: The maximum number of bytes that can be waiting before a reduce scatter op is scheduled. max_inter_ipu_copies_buffer_size: The maximum number of bytes that can be waiting before a inter IPU copy between IPUs is scheduled. max_send_recv_cluster_size: The maximum number of bytes that can be waiting before a cluster of send/recv instructions to/from the host is scheduled. These are lowered to stream copies that can be merged by Poplar. gather_simplifier: Will enable more aggressive optimisation for embedding lookups. triangular_solve_expander_block_size: Defines size for triangular solver expander blocks. 0 - implementation defined default. Returns: The IpuOptions configuration protobuf. """ # Internally embedding lookups are implemented using multiSlice operations. opts.enable_multi_slice_combiner = combine_embedding_lookups opts.enable_matmul_combiner = combine_matmuls opts.max_cross_replica_sum_buffer_size = max_cross_replica_sum_buffer_size opts.max_reduce_scatter_buffer_size = max_reduce_scatter_buffer_size opts.max_inter_ipu_copies_buffer_size = max_inter_ipu_copies_buffer_size opts.max_send_recv_cluster_size = max_send_recv_cluster_size opts.enable_gather_simplifier = gather_simplifier opts.triangular_solve_expander_block_size = \ triangular_solve_expander_block_size return opts def set_norm_options(opts, use_stable_statistics=False): """Set the IPU options related to norms. Args: use_stable_statistics: If True, computes the mean first and subtracts the activations by it before computing the variance. The implementation with this flag set to True is slower than when set to False. Returns: The IpuOptions configuration protobuf. """ opts.use_stable_norm_statistics = use_stable_statistics return opts def set_transfer_options(opts, use_verified_transfers=False): """Set the IPU options related to Poplar data transfers. Args: opts: An IpuOptions session control protobuf. use_verified_transfers: If True, use Poplar's verified transfers. Returns: The IpuOptions configuration protobuf. """ opts.verified_transfers.enabled = use_verified_transfers return opts class KeyId: def __init__(self, key=0, start_id=-1): self.key = key self.start_id = start_id class VerificationOptions: """Store pairs of key / id to use for each type of data used in the graph. Does nothing unless verified transfers have been enabled by calling `set_transfer_options(opts, use_verified_transfers=True)` and an instance of this class has been set by calling `set_verification_options`: .. code-block:: python o = VerificationOptions() o.inputs.key = 1 o.infeeds["infeed"].key = 3 set_verification_options(opts, o) """ def __init__(self): self.inputs = KeyId() self.input_parameters = KeyId() self.outputs = KeyId() self.output_parameters = KeyId() self.infeeds = collections.defaultdict(KeyId) self.outfeeds = collections.defaultdict(KeyId) self.checkpoint_in = KeyId(0, 0) self.checkpoint_out = KeyId(0, 0) def set_verification_options(opts, verification_options): """Set the pairs or key / id to use for each type of data used in the graph when verified transfers are enabled. .. code-block:: python # Create a device which will use verified transfers with different keys. opts = create_ipu_config() opts = set_transfer_options(opts, use_verified_transfers=True) o = VerificationOptions() o.input_parameters = KeyId(1) o.infeeds["training_feed"] = KeyId(2) opts = set_verification_options(opts, o) ipu.utils.configure_ipu_system(opts) with tf.Session() as s: ... Args: opts: An IpuOptions session control protobuf. verification_options: a VerificationOptions object that contains the keys / ids to use. """ if not isinstance(verification_options, VerificationOptions): raise Exception( "`verification_options` must be of type VerificationOptions") def _cp_key_and_id(src, dst): dst.key = src.key dst.start_id = src.start_id for attr in [ "inputs", "input_parameters", "outputs", "output_parameters", "checkpoint_in", "checkpoint_out" ]: _cp_key_and_id(getattr(verification_options, attr), getattr(opts.verified_transfers, attr)) for name, options in verification_options.infeeds.items(): _cp_key_and_id(options, opts.verified_transfers.infeeds[name]) for name, options in verification_options.outfeeds.items(): _cp_key_and_id(options, opts.verified_transfers.outfeeds[name]) return opts def set_compilation_options(opts, compilation_options=None): """Set the IPU compilation options for the session. .. code-block:: python # Create a device with debug execution profile flag set to "compute_sets" opts = create_ipu_config() opts = set_compilation_options(opts, compilation_options={"debug.instrument": "true", "debug.allowOutOfMemory": "true"}) ipu.utils.configure_ipu_system(opts) with tf.Session() as s: ... Args: opts: An IpuOptions session control protobuf. compilation_options: A dictionary of poplar compilation option flags to be sent to the executor. Returns: The IpuOptions configuration protobuf, with engine compilation options set. """ if compilation_options: if not isinstance(compilation_options, dict): raise Exception("`compilation_options` must be a dictionary") for (option_name, value) in compilation_options.items(): compilation_option = opts.compilation_options.add() compilation_option.option = option_name compilation_option.value = value return opts def set_convolution_options(opts, convolution_options=None): """Set the IPU convolution options for the session. .. code-block:: python # Set "availableMemoryProportion" flag to "0.1" opts = create_ipu_config() opts = set_convolution_options(opts, convolution_options={"availableMemoryProportion": "0.1"}) ipu.utils.configure_ipu_system(opts) with tf.Session() as s: ... Args: opts: An IpuOptions session control protobuf. convolution_options: A dictionary of poplar option flags for convolutions. The "availableMemoryProportion" flag indicates the proportion of tile memory to be made available as temporary memory for convolutions (float between 0 and 1.0). Less temporary memory will generally result in a convolution that takes more cycles to complete. However, because always live memory (such as control code and vertex state) is not tracked when planning it, a convolution using less temporary memory may use more memory overall, due to an increase of always live memory. Returns: The IpuOptions configuration protobuf, with convolution options set. """ if convolution_options: if not isinstance(convolution_options, dict): raise Exception("`convolution_options` must be a dictionary") for (option_name, value) in convolution_options.items(): opt = opts.convolution_options.add() opt.option = option_name opt.value = value return opts def set_matmul_options(opts, matmul_options=None, clear_pass_type=False): """Set the IPU matrix multiplication options for the session. .. code-block:: python # Set "availableMemoryProportion" flag to "0.5" opts = create_ipu_config() opts = set_matmul_options(opts, matmul_options={"availableMemoryProportion": "0.5"}) ipu.utils.configure_ipu_system(opts) with tf.Session() as s: ... Args: opts: An IpuOptions session control protobuf. matmul_options: A dictionary containing the poplar option flag "availableMemoryProportion" for the matrix multiplication operations. It indicates the proportion of tile memory to be made available as temporary memory for the matrix multiplications (float between 0 and 1.0). Less temporary memory will generally result in a multiplication that takes more cycles to complete. However, because always live memory (like code and vertex state) is not tracked when planning it, a multiplication using less temporary memory may use more memory overall, due to an increase of always live memory. clear_pass_type: When set to True, the Pass type will not be set in the options passed to the poplar operation. Returns: The IpuOptions configuration protobuf, with matmul options set. """ if matmul_options: if not isinstance(matmul_options, dict): raise Exception("`matmul_options` must be a dictionary") for (option_name, value) in matmul_options.items(): opt = opts.matmul_options.add() opt.option = option_name opt.value = value opts.clear_matmul_pass_type = clear_pass_type return opts def set_pooling_options(opts, pooling_options=None): """Set the IPU pooling compilation options for the session. .. code-block:: python # Set "poolUseIntrospectiveMapping" flag to "false" opts = create_ipu_config() opts = set_pooling_options(opts, pooling_options={"poolUseIntrospectiveMapping": "false"}) ipu.utils.configure_ipu_system(opts) with tf.Session() as s: ... Args: opts: An IpuOptions session control protobuf. pooling_options: A dictionary of poplar option flags for the pooling operation. Returns: The IpuOptions configuration protobuf, with pooling options set. """ if pooling_options: if not isinstance(pooling_options, dict): raise Exception("`pooling_options` must be a dictionary") for (option_name, value) in pooling_options.items(): opt = opts.pooling_options.add() opt.option = option_name opt.value = value return opts @deprecation.deprecated_args( None, "report_options is deprecated, use graph_options and" " execution_options instead", "report_options") def set_report_options(opts, report_options=None, graph_options=None, execution_options=None): """Set the options used to influence Poplar graph and execution reports generation. .. code-block:: python opts = create_ipu_config() opts = set_report_options(opts, report_options={"reportOption1": "false"}, graph_options={"graphOptions": "false"}, execution_options={"executionOptions": "false"}) ipu.utils.configure_ipu_system(opts) with tf.Session() as s: ... Args: opts: An IpuOptions session control protobuf. report_options: (Deprecated) A dictionary of poplar option flags for the report generation. graph_options: A dictionary of poplar option flags for the graph report generation. execution_options: A dictionary of poplar option flags for the execution report generation. Returns: The IpuOptions configuration protobuf, with convolution options set. """ def use_report_options(): if report_options: if not isinstance(report_options, dict): raise Exception("`report_options` must be a dictionary") return report_options if not graph_options: graph_options = use_report_options() if graph_options: if not isinstance(graph_options, dict): raise Exception("`graph_options` must be a dictionary") for (option_name, value) in graph_options.items(): opt = opts.profiling.graph_options.add() opt.option = option_name opt.value = value if not execution_options: execution_options = use_report_options() if execution_options: if not isinstance(execution_options, dict): raise Exception("`execution_options` must be a dictionary") for (option_name, value) in execution_options.items(): opt = opts.profiling.execution_options.add() opt.option = option_name opt.value = value return opts def set_ipu_model_options(opts, compile_ipu_code=True): """Set the IPU Model options. Args: compile_ipu_code: Whether or not to actually compile real IPU code for modelling. Returns: The IpuOptions configuration protobuf, with IPU model options set. """ opts.ipu_model_config.compile_ipu_code = compile_ipu_code return opts @deprecation.deprecated_args( None, "Pipelining recomputation will recompute all the non-stateful operations " "when recomputation is enabled.", "allow_stateful_recompute", ) def set_recomputation_options(opts, allow_recompute=True, allow_stateful_recompute=None): # pylint: disable=unused-argument """Set re-computation options. Args: allow_recompute: Whether or not to re-compute instructions during training. If this is enabled then we will attempt to pattern match instructions/pipeline stages in the forward pass and recompute them in the backward pass to avoid having to preserve activations which increase the maximum memory liveness. Enabling this option can reduce memory usage at the expense of extra computation. Any stateful operations cannot be recomputed. allow_stateful_recompute: Deprecated. Returns: The IpuOptions configuration protobuf. """ opts.speed_size_config.allow_recompute = allow_recompute return opts def set_floating_point_behaviour_options(opts, inv=True, div0=True, oflo=True, esr=True, nanoo=True): """Set the IPU floating point control behaviour bits See the Poplar API documentation for poplar::FloatingPointBehaviour. Args: inv: If true a floating point invalid operation (defined by IEEE 754) will cause an exception. div0: If true a floating point divide by zero operation will cause an exception. oflo: If true a floating point overflow will cause an exception. esr: Enable stochastic rounding. nanoo: Enable Not-a-Number on overflow mode. """ opts.floating_point_behaviour.flags_set = True opts.floating_point_behaviour.inv = inv opts.floating_point_behaviour.div0 = div0 opts.floating_point_behaviour.oflo = oflo opts.floating_point_behaviour.esr = esr opts.floating_point_behaviour.nanoo = nanoo return opts def set_gcl_options(opts, num_io_tiles=0, gcl_options=None): """Set the IPU options for the Graphcore Communication Library. Args: num_io_tiles: Number of tiles to reserve per IPU for the GCL collective operations. gcl_options: A dictionary with options for configuring the GCL collective operations. Returns: The IpuOptions configuration protobuf. """ opts.gcl_num_io_tiles = num_io_tiles if gcl_options: if not isinstance(gcl_options, dict): raise TypeError("`gcl_options` must be a dictionary") for (option_name, value) in gcl_options.items(): opt = opts.gcl_options.add() opt.option = option_name opt.value = value return opts def auto_select_ipus(opts, num_ipus): """Configure the IPUs to be used by the session. The configuration describes a system consisting of multiple Tensorflow devices, each with control of one of more IPUs. The devices will be labeled ``/device:IPU:0``, ``/device:IPU:1`` and so on. Each device can control a specific number of IPUs, given by the ``num_ipus`` parameter. The system will automatically select IPU configurations from the available IPUs, where they match the desired number of IPUs. Examples: .. code-block:: python # Create a single device, with one IPU opts = create_ipu_config() opts = auto_select_ipus(opts, num_ipus=1) ipu.utils.configure_ipu_system(opts) with tf.Session() as s: ... .. code-block:: python # Create two devices, with 2 IPUs per device. opts = create_ipu_config() opts = auto_select_ipus(opts, num_ipus=[2,2]) ipu.utils.configure_ipu_system(opts) with tf.Session() as s: ... .. code-block:: python # Create two devices, with 1 IPU in the first device and 2 IPUs # in the second device. opts = create_ipu_config() opts = auto_select_ipus(opts, num_ipus=[1,2]) ipu.utils.configure_ipu_system(opts) with tf.Session() as s: ... Args: opts: An IpuOptions session control protobuf. num_ipus: List of IPUs per Tensorflow device Returns: The IpuOptions configuration protobuf, configured for auto-selecting a set of IPU devices. """ if opts.device_config: raise Exception("IPU devices have already been configured.") if not isinstance(num_ipus, (int, list, tuple)): raise Exception("`num_ipus` must be an integer, list or tuple.") if isinstance(num_ipus, int): dev = opts.device_config.add() dev.auto_count = num_ipus else: for n in num_ipus: dev = opts.device_config.add() dev.auto_count = n return opts def select_ipus(opts, indices): """Configure the IPUs to be used by the session. The configuration describes a system consisting of multiple Tensorflow devices, each with control of one of more IPUs. The Tensorflow devices will be labeled ``/device:IPU:0``, ``/device:IPU:1`` and so on. Each Tensorflow device uses a specific configuration consisting of one or more IPUs from the list of devices. These can be found by running the Graphcore utility ``gc-info -l``. For instance, the following listing shows the device configurations available on a system with 16 IPUs. .. code-block:: shell user@host:~$ gc-info -l Graphcore device listing: -+- Id: [0], type: [PCIe], PCI Domain: [0000:1a:00.0] -+- Id: [1], type: [PCIe], PCI Domain: [0000:1b:00.0] -+- Id: [2], type: [PCIe], PCI Domain: [0000:23:00.0] -+- Id: [3], type: [PCIe], PCI Domain: [0000:24:00.0] -+- Id: [4], type: [PCIe], PCI Domain: [0000:3d:00.0] -+- Id: [5], type: [PCIe], PCI Domain: [0000:3e:00.0] -+- Id: [6], type: [PCIe], PCI Domain: [0000:43:00.0] -+- Id: [7], type: [PCIe], PCI Domain: [0000:44:00.0] -+- Id: [8], type: [PCIe], PCI Domain: [0000:8b:00.0] -+- Id: [9], type: [PCIe], PCI Domain: [0000:8c:00.0] -+- Id: [10], type: [PCIe], PCI Domain: [0000:8e:00.0] -+- Id: [11], type: [PCIe], PCI Domain: [0000:8f:00.0] -+- Id: [12], type: [PCIe], PCI Domain: [0000:b8:00.0] -+- Id: [13], type: [PCIe], PCI Domain: [0000:b9:00.0] -+- Id: [14], type: [PCIe], PCI Domain: [0000:ba:00.0] -+- Id: [15], type: [PCIe], PCI Domain: [0000:bb:00.0] -+- Id: [16], type: [Multi IPU] |--- PCIe Id: [5], DNC Id: [0], PCI Domain: [0000:3e:00.0] |--- PCIe Id: [7], DNC Id: [1], PCI Domain: [0000:44:00.0] -+- Id: [17], type: [Multi IPU] |--- PCIe Id: [4], DNC Id: [0], PCI Domain: [0000:3d:00.0] |--- PCIe Id: [6], DNC Id: [1], PCI Domain: [0000:43:00.0] -+- Id: [18], type: [Multi IPU] |--- PCIe Id: [3], DNC Id: [0], PCI Domain: [0000:24:00.0] |--- PCIe Id: [1], DNC Id: [1], PCI Domain: [0000:1b:00.0] -+- Id: [19], type: [Multi IPU] |--- PCIe Id: [2], DNC Id: [0], PCI Domain: [0000:23:00.0] |--- PCIe Id: [0], DNC Id: [1], PCI Domain: [0000:1a:00.0] -+- Id: [20], type: [Multi IPU] |--- PCIe Id: [13], DNC Id: [0], PCI Domain: [0000:b9:00.0] |--- PCIe Id: [15], DNC Id: [1], PCI Domain: [0000:bb:00.0] -+- Id: [21], type: [Multi IPU] |--- PCIe Id: [12], DNC Id: [0], PCI Domain: [0000:b8:00.0] |--- PCIe Id: [14], DNC Id: [1], PCI Domain: [0000:ba:00.0] -+- Id: [22], type: [Multi IPU] |--- PCIe Id: [9], DNC Id: [0], PCI Domain: [0000:8c:00.0] |--- PCIe Id: [11], DNC Id: [1], PCI Domain: [0000:8f:00.0] -+- Id: [23], type: [Multi IPU] |--- PCIe Id: [10], DNC Id: [0], PCI Domain: [0000:8e:00.0] |--- PCIe Id: [8], DNC Id: [1], PCI Domain: [0000:8b:00.0] -+- Id: [24], type: [Multi IPU] |--- PCIe Id: [5], DNC Id: [0], PCI Domain: [0000:3e:00.0] |--- PCIe Id: [7], DNC Id: [1], PCI Domain: [0000:44:00.0] |--- PCIe Id: [4], DNC Id: [2], PCI Domain: [0000:3d:00.0] |--- PCIe Id: [6], DNC Id: [3], PCI Domain: [0000:43:00.0] -+- Id: [25], type: [Multi IPU] |--- PCIe Id: [3], DNC Id: [0], PCI Domain: [0000:24:00.0] |--- PCIe Id: [1], DNC Id: [1], PCI Domain: [0000:1b:00.0] |--- PCIe Id: [2], DNC Id: [2], PCI Domain: [0000:23:00.0] |--- PCIe Id: [0], DNC Id: [3], PCI Domain: [0000:1a:00.0] -+- Id: [26], type: [Multi IPU] |--- PCIe Id: [13], DNC Id: [0], PCI Domain: [0000:b9:00.0] |--- PCIe Id: [15], DNC Id: [1], PCI Domain: [0000:bb:00.0] |--- PCIe Id: [12], DNC Id: [2], PCI Domain: [0000:b8:00.0] |--- PCIe Id: [14], DNC Id: [3], PCI Domain: [0000:ba:00.0] -+- Id: [27], type: [Multi IPU] |--- PCIe Id: [9], DNC Id: [0], PCI Domain: [0000:8c:00.0] |--- PCIe Id: [11], DNC Id: [1], PCI Domain: [0000:8f:00.0] |--- PCIe Id: [10], DNC Id: [2], PCI Domain: [0000:8e:00.0] |--- PCIe Id: [8], DNC Id: [3], PCI Domain: [0000:8b:00.0] -+- Id: [28], type: [Multi IPU] |--- PCIe Id: [5], DNC Id: [0], PCI Domain: [0000:3e:00.0] |--- PCIe Id: [7], DNC Id: [1], PCI Domain: [0000:44:00.0] |--- PCIe Id: [4], DNC Id: [2], PCI Domain: [0000:3d:00.0] |--- PCIe Id: [6], DNC Id: [3], PCI Domain: [0000:43:00.0] |--- PCIe Id: [3], DNC Id: [4], PCI Domain: [0000:24:00.0] |--- PCIe Id: [1], DNC Id: [5], PCI Domain: [0000:1b:00.0] |--- PCIe Id: [2], DNC Id: [6], PCI Domain: [0000:23:00.0] |--- PCIe Id: [0], DNC Id: [7], PCI Domain: [0000:1a:00.0] -+- Id: [29], type: [Multi IPU] |--- PCIe Id: [13], DNC Id: [0], PCI Domain: [0000:b9:00.0] |--- PCIe Id: [15], DNC Id: [1], PCI Domain: [0000:bb:00.0] |--- PCIe Id: [12], DNC Id: [2], PCI Domain: [0000:b8:00.0] |--- PCIe Id: [14], DNC Id: [3], PCI Domain: [0000:ba:00.0] |--- PCIe Id: [9], DNC Id: [4], PCI Domain: [0000:8c:00.0] |--- PCIe Id: [11], DNC Id: [5], PCI Domain: [0000:8f:00.0] |--- PCIe Id: [10], DNC Id: [6], PCI Domain: [0000:8e:00.0] |--- PCIe Id: [8], DNC Id: [7], PCI Domain: [0000:8b:00.0] -+- Id: [30], type: [Multi IPU] |--- PCIe Id: [5], DNC Id: [0], PCI Domain: [0000:3e:00.0] |--- PCIe Id: [7], DNC Id: [1], PCI Domain: [0000:44:00.0] |--- PCIe Id: [4], DNC Id: [2], PCI Domain: [0000:3d:00.0] |--- PCIe Id: [6], DNC Id: [3], PCI Domain: [0000:43:00.0] |--- PCIe Id: [3], DNC Id: [4], PCI Domain: [0000:24:00.0] |--- PCIe Id: [1], DNC Id: [5], PCI Domain: [0000:1b:00.0] |--- PCIe Id: [2], DNC Id: [6], PCI Domain: [0000:23:00.0] |--- PCIe Id: [0], DNC Id: [7], PCI Domain: [0000:1a:00.0] |--- PCIe Id: [13], DNC Id: [8], PCI Domain: [0000:b9:00.0] |--- PCIe Id: [15], DNC Id: [9], PCI Domain: [0000:bb:00.0] |--- PCIe Id: [12], DNC Id: [10], PCI Domain: [0000:b8:00.0] |--- PCIe Id: [14], DNC Id: [11], PCI Domain: [0000:ba:00.0] |--- PCIe Id: [9], DNC Id: [12], PCI Domain: [0000:8c:00.0] |--- PCIe Id: [11], DNC Id: [13], PCI Domain: [0000:8f:00.0] |--- PCIe Id: [10], DNC Id: [14], PCI Domain: [0000:8e:00.0] |--- PCIe Id: [8], DNC Id: [15], PCI Domain: [0000:8b:00.0] Examples based on the listing above: .. code-block:: python # Create a single device with 1 IPU at PCI address 0000:1a:00.0 by using # IPU configuration index 0 opts = create_ipu_config() opts = select_ipus(opts, indices=[0]) ipu.utils.configure_ipu_system(opts) with tf.Session() as s: ... .. code-block:: python # Create a single device with 1 IPU at PCI address 0000:8b:00.0 by using # IPU configuration index 8 opts = create_ipu_config() opts = select_ipus(opts, indices=[8]) ipu.utils.configure_ipu_system(opts) with tf.Session() as s: ... .. code-block:: python # Create two TensorFlow devices, with one IPU each, being devices at # indices 0 and 1 opts = create_ipu_config() opts = select_ipus(opts, indices=[0, 1]) ipu.utils.configure_ipu_system(opts) with tf.Session() as s: ... .. code-block:: python # Create two TensorFlow devices, with four IPUs each. The device # configurations at indices 24 (0000:3e:00.0, 0000:44:00.0, 0000:3d:00.0, # 000:43:00.0) and 25 (0000:24:00.0, 0000:1b:00.0, 0000:23:00.0, # 00:1a:00.0) opts = create_ipu_config() opts = select_ipus(opts, indices=[24, 25]) ipu.utils.configure_ipu_system(opts) with tf.Session() as s: ... .. code-block:: python # Create four TensorFlow devices each with one IPU, at addresses # 0000:1a:00.0, 0000:1b:00.0, 0000:23:00.0, 0000:24:00.0. opts = create_ipu_config() opts = select_ipus(opts, indices=[0, 1, 2, 3]) ipu.utils.configure_ipu_system(opts) with tf.Session() as s: ... Args: opts: An IpuOptions session control protobuf. indices: List of IPU configuration indices. Returns: The IpuOptions configuration protobuf, with a number of devices selected by IPU configuration index. """ if opts.device_config: raise Exception("IPU devices have already been configured.") if not isinstance(indices, (list, tuple)): raise Exception("`indices` must be a list or tuple.") if len(set(indices)) != len(indices): raise Exception("All device indeicies in `indices` must be unique.") for i in indices: dev = opts.device_config.add() dev.cfg_index = i return opts def set_ipu_connection_type(opts, connection_type=None, ipu_version=None): """ Configure when to attach to the device. .. code-block:: python # Compile without attaching to the device. opts = create_ipu_config() opts = set_ipu_connection_type(opts, DeviceConnectionType.ON_DEMAND)) ipu.utils.configure_ipu_system(opts) with tf.Session() as s: ... Args: opts: An IpuOptions session control protobuf. connection_type: One of `DeviceConnectionType`. Defaults to `DeviceConnectionType.ALWAYS` if None. ipu_version: Version of the IPU hardware used. Required if the `connection_type` provided is `DeviceConnectionType.NEVER`. Returns: The IpuOptions configuration protobuf. """ connection_type = connection_type if connection_type \ else DeviceConnectionType.ALWAYS if connection_type == DeviceConnectionType.NEVER and ipu_version is None: raise Exception("`ipu_version` must be set when `connection_type` is set " "to `DeviceConnectionType.NEVER`") opts.device_connection_type = connection_type.value if ipu_version is not None: opts.ipu_version = ipu_version opts.has_ipu_version = True return opts def reset_ipu_seed(seed, device="/device:IPU:0", cpu_device="cpu"): """Reset the seed used to generate stateful random numbers and perform stochastic rounding. Args: seed: The new random number generator seed. device: The device to which the seed will be applied. cpu_device: The CPU device which is on the same hardware to the IPU device. Returns: None """ g = ops.Graph() with g.as_default(): with ops.device(cpu_device): cfg_op = gen_ipu_ops.ipu_reset_seed(device, seed) with session_lib.Session(graph=g) as sess: sess.run(cfg_op) def extract_all_strings_from_event_trace(events): """Extract a concatenation of all data strings from an IPU event trace. Args: events: An array of IPU events as returned from the ``ipu_compile_summary`` operation. Returns: A string containing the concatenation of all of the data fields of the events. """ result = "" for e in events: evt = IpuTraceEvent.FromString(e) result = result + ("-" * 70) + "\n=> @ " + \ time.strftime('%F %T %z', time.localtime(evt.timestamp)) + ": " if evt.type == IpuTraceEvent.COMPILE_BEGIN: evt_str = "Compile begin: " + \ evt.compile_begin.module_name.decode('utf-8') + "\n" elif evt.type == IpuTraceEvent.COMPILE_END: evt_str = "Compile end: " + \ evt.compile_end.module_name.decode('utf-8') + "\n" + \ "Duration: " + str(evt.compile_end.duration) + " us\n" + \ evt.compile_end.compilation_report.decode('utf-8') elif evt.type == IpuTraceEvent.HOST_TO_DEVICE_TRANSFER: evt_str = "Host->Device\n" + \ evt.data_transfer.data_transfer.decode('utf-8') + "\n" elif evt.type == IpuTraceEvent.DEVICE_TO_HOST_TRANSFER: evt_str = "Device->Host\n" + \ evt.data_transfer.data_transfer.decode('utf-8') + "\n" elif evt.type == IpuTraceEvent.LOAD_ENGINE: evt_str = "Load engine: " + \ evt.load_engine.module_name.decode('utf-8') + "\n" elif evt.type == IpuTraceEvent.EXECUTE: evt_str = "Execute: " + \ evt.execute.module_name.decode('utf-8') + "\n" + \ evt.execute.execution_report.decode('utf-8') else: evt_str = "Unknown event" result = result + evt_str + '\n' return result def extract_all_types_from_event_trace(events): """Return a list of the types of each event in an event trace tensor Args: events: A tensor containing a list of IPU events as protobuf strings Returns: A list containing the type of each event """ result = [] for e in events: evt = IpuTraceEvent.FromString(e) result += [evt.type] return result def extract_all_events(events): """Extract a list containing each event as an event object Args: events: A tensor containing a list of IPU events as protobuf strings Returns: A list containing IpuTraceEvent objects """ result = [] for e in events: evt = IpuTraceEvent.FromString(e) result += [evt] return result def extract_compile_reports(events): """Get a list of all compiler reports in the event list. Args: events: A list of trace event serialized protobufs. Returns: A list of tuples containing the module name and report.""" result = [] for e in events: evt = IpuTraceEvent.FromString(e) if evt.type == IpuTraceEvent.COMPILE_END: try: module = evt.compile_end.module_name.decode('utf-8') rep = evt.compile_end.compilation_report.decode('utf-8') if rep: result += [(module, rep)] except UnicodeDecodeError: pass return result def extract_poplar_serialized_graphs(events): """Get a list of all poplar serialized graphs in the event list. Args: events: A list of trace event serialized protobufs. Returns: A list of tuples containing the module name and report.""" result = [] for e in events: evt = IpuTraceEvent.FromString(e) if evt.type == IpuTraceEvent.COMPILE_END: try: rep = evt.compile_end.poplar_graph.decode('utf-8') except UnicodeDecodeError: rep = evt.compile_end.poplar_graph module = evt.compile_end.module_name.decode('utf-8') if rep: result += [(module, rep)] return result def extract_execute_reports(events): """Get a list of all compiler reports in the event list. Args: events: A list of trace event serialized protobufs. Returns: A list of tuples containing the module name and report.""" result = [] for e in events: evt = IpuTraceEvent.FromString(e) if evt.type == IpuTraceEvent.EXECUTE: try: module = evt.execute.module_name.decode('utf-8') rep = evt.execute.execution_report.decode('utf-8') if rep: result += [(module, rep)] except UnicodeDecodeError: pass return result def move_variable_initialization_to_cpu(graph=None): """For all variables in the VARIABLES collection, move any initialization ops onto the CPU. Args: graph: Operations are moved around on this graph. The default graph will be used if not specified. Returns: None """ if not graph: graph = ops.get_default_graph() with ops.device("/device:CPU:0"): control_flow_ops.no_op(name="cpu") variables = [] for v in graph.get_collection('variables'): # We assume a distribution strategy knows better how to # initialize its own variables, so skip those. if not isinstance(v, values.DistributedVariable): variables.append(v) def _uses_resource(op): """ Helper to determine if an op uses a resource """ return any(input_tensor.dtype == 'resource' for input_tensor in op.inputs) init_ops = [] dep_ops = [v.initializer.inputs[1].op for v in variables] visited = set() # Depth-first search up the graph starting from all variables in VARIABLES # Place all touched ops on the CPU, but do not touch or search ops that use # resource tensors, otherwise device colocation could be violated. while dep_ops: op = dep_ops.pop() if op not in visited and not _uses_resource(op): visited.add(op) init_ops += [op] dep_ops += [x.op for x in op.inputs] # pylint: disable=protected-access for op in init_ops: op._set_device('/device:CPU:0') op._set_attr( '_class', attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue( s=[b'loc:@cpu']))) op._set_attr('_XlaCompile', attr_value_pb2.AttrValue(b=False)) op._set_attr('_XlaScope', attr_value_pb2.AttrValue(s=b'')) # pylint: enable=protected-access return def export_dataset_to_file(dataset_or_infeed, output_filename, num_elements, feed_name="", apply_options=True): """Export as binary `num_elements` from the given `infeed` to the specified `output_filename`. If the infeed elements are tuples then one file per tuple element will be created. For example, if `dataset` looks like .. code-block:: python [{ "a": A_0, "b": B_0}, { "a": A_1, "b": B_1}, ...] then `export_dataset_to_file(dataset, "my_dataset.bin", 100)` will generate: .. code-block:: python my_dataset.0.bin # Contains tensors [ A_0, A_1, ..., A_99] my_dataset.1.bin # Contains tensors [ B_0, B_1, ..., B_99] Args: dataset_or_infeed: An unary dataset with the same input and output structure or an `IPUInfeedQueue`. output_filename: Where to export the tensors to. num_elements: Number of elements to export from the dataset. feed_name: Specify the feed name. apply_options: Whether to apply optimization options which can improve the dataset performance. """ assert isinstance(dataset_or_infeed, (dataset_ops.Dataset, ipu_infeed_queue.IPUInfeedQueue)) if isinstance(dataset_or_infeed, ipu_infeed_queue.IPUInfeedQueue): dataset = dataset_or_infeed._dataset # pylint: disable=protected-access feed_name = feed_name or dataset_or_infeed._id # pylint: disable=protected-access else: dataset = dataset_or_infeed if apply_options: dataset = dataset._apply_options() # pylint: disable=protected-access extractor = dataset_extractor.dataset_extractor(dataset, num_elements, output_filename, feed_name) with ops.device("cpu"), session_lib.Session() as sess: sess.run(extractor) def export_inputs_to_file(inputs, output_filename, feed_dict): """Export as binary the list of `inputs` provided to the specified `output_filename`. Args: inputs: List of graph inputs to export. output_filename: Where to export the tensors to. feed_dict: Feed dictionary containing the inputs' values. """ with ops.device("cpu"), session_lib.Session() as sess: sess.run(dataset_extractor.export_variables(inputs, output_filename), feed_dict)
37.296926
96
0.668806
import collections from enum import Enum import os import time import numpy as np from tensorflow.compiler.plugin.poplar.driver.config_pb2 import IpuOptions from tensorflow.compiler.plugin.poplar.driver.trace_pb2 import IpuTraceEvent from tensorflow.compiler.plugin.poplar.driver import config_pb2 from tensorflow.compiler.plugin.poplar.ops import gen_ipu_ops from tensorflow.compiler.plugin.poplar.tools.tensorflow_weights_extractor import ( export_variables_from_live_session, export_variables_from_live_model, import_data_in_live_session, import_data_in_live_model) from tensorflow.compat.v1 import executing_eagerly from tensorflow.core.framework import attr_value_pb2 from tensorflow.python.client import session as session_lib from tensorflow.python.distribute import values from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.util import deprecation from tensorflow.python.data.ops import dataset_ops from tensorflow.python.ipu import ipu_infeed_queue from tensorflow.python.ipu import dataset_extractor class SelectionOrder(Enum): AUTO = config_pb2.IpuSelectionOrder.Value("AUTO") ZIGZAG = config_pb2.IpuSelectionOrder.Value("ZIGZAG") SNAKE = config_pb2.IpuSelectionOrder.Value("SNAKE") HOOF = config_pb2.IpuSelectionOrder.Value("HOOF") class ExecutionProfileType(Enum): NO_PROFILE = config_pb2.IpuExecutionProfileType.Value("NO_PROFILE") DEVICE_PROFILE = config_pb2.IpuExecutionProfileType.Value("DEVICE_PROFILE") IPU_PROFILE = config_pb2.IpuExecutionProfileType.Value("IPU_PROFILE") TILE_PROFILE = config_pb2.IpuExecutionProfileType.Value("TILE_PROFILE") class DeviceConnectionType(Enum): ALWAYS = config_pb2.IpuDeviceConnectionType.Value("ALWAYS") ON_DEMAND = config_pb2.IpuDeviceConnectionType.Value("ON_DEMAND") NEVER = config_pb2.IpuDeviceConnectionType.Value("NEVER") def configure_ipu_system(config, device="cpu"): if not isinstance(config, config_pb2.IpuOptions): raise Exception("`config` must be an IpuOptions instance") g = ops.Graph() with g.as_default(): with ops.device(device): cfg_op = gen_ipu_ops.ipu_configure_hardware(config.SerializeToString()) with session_lib.Session(graph=g) as sess: sess.run(cfg_op) def get_ipu_config(session=None): configurations = None if executing_eagerly(): assert not session, "No session is required for eager execution." configurations = gen_ipu_ops.ipu_get_configuration().numpy() else: s = session if session else session_lib.Session() configurations = s.run(gen_ipu_ops.ipu_get_configuration()) deserialized = [] valid = False for conf in configurations: opt = IpuOptions() opt.ParseFromString(conf) deserialized.append(opt) valid |= len(opt.device_config) > 0 if not valid: raise RuntimeError("No IPU devices configured.") return deserialized def get_num_of_ipus_in_device(ipu_device, device="cpu"): g = ops.Graph() with g.as_default(): with ops.device(device): cfg_op = gen_ipu_ops.ipu_get_num_devices(ipu_device) with session_lib.Session(graph=g) as sess: return sess.run(cfg_op) def running_on_ipu_model(): return "--use_ipu_model" in os.environ.get("TF_POPLAR_FLAGS", "") @deprecation.deprecated_args(None, "Use set_optimization_options() instead.", "max_cross_replica_sum_buffer_size", "max_inter_ipu_copies_buffer_size") def create_ipu_config(profiling=False, enable_ipu_events=False, use_poplar_text_report=False, use_poplar_cbor_report=False, profile_execution=None, enable_poplar_serialized_graph=False, report_every_nth_execution=0, max_report_size=0x10000000, report_directory="", scheduler_selection="", always_rearrange_copies_on_the_host=False, merge_infeed_io_copies=False, disable_graph_convolution_caching=False, disable_graph_outlining=False, retain_control_dependencies=False, max_cross_replica_sum_buffer_size=0, max_inter_ipu_copies_buffer_size=0, max_scheduler_lookahead_depth=5, max_scheduler_search_space_size=64, prefetch_data_streams=True, selection_order=None, enable_experimental_remote_buffer_embedding=False): if profiling and enable_ipu_events: raise Exception( "`profiling` and `enable_ipu_events` are mutually exclusive") if retain_control_dependencies: raise Exception("`retain_control_dependencies` is deprecated") selection_order = selection_order if selection_order else SelectionOrder.AUTO profile_execution = profile_execution if profile_execution \ else ExecutionProfileType.NO_PROFILE if isinstance(profile_execution, (np.bool_, bool)): if profile_execution: profile_execution = ExecutionProfileType.DEVICE_PROFILE else: profile_execution = ExecutionProfileType.NO_PROFILE if (profile_execution != ExecutionProfileType.NO_PROFILE and not profiling): raise Exception("`profiling` is required when `profile_execution` is set") if not isinstance(profile_execution, ExecutionProfileType): raise Exception("`profile_execution` must be True, False, or an " "ExecutionProfileType instance") opts = config_pb2.IpuOptions() opts.creator_id = config_pb2.IpuOptionsCreator.IPU_UTILS opts.ipu_model_config.compile_ipu_code = True opts.enable_multi_slice_combiner = False opts.enable_matmul_combiner = False opts.enable_gather_simplifier = False opts.device_connection_type = DeviceConnectionType.ALWAYS.value opts.speed_size_config.allow_recompute = False opts.profiling.enable_ipu_trace_events = profiling or enable_ipu_events opts.profiling.enable_compilation_trace = profiling opts.profiling.enable_io_trace = profiling opts.profiling.execution_trace_type = profile_execution.value opts.profiling.enable_poplar_reports_text = use_poplar_text_report opts.profiling.enable_poplar_reports_cbor = use_poplar_cbor_report opts.profiling.enable_poplar_graph = enable_poplar_serialized_graph opts.profiling.report_every_nth_execution = report_every_nth_execution opts.profiling.max_report_size = max_report_size opts.profiling.report_directory = report_directory opts.speed_size_config.always_rearrange_copies_on_the_host = \ always_rearrange_copies_on_the_host opts.speed_size_config.merge_infeed_io_copies = merge_infeed_io_copies opts.speed_size_config.disable_graph_convolution_caching = \ disable_graph_convolution_caching opts.speed_size_config.disable_graph_outlining = \ disable_graph_outlining opts.speed_size_config.scheduler_selection = scheduler_selection opts.max_cross_replica_sum_buffer_size = max_cross_replica_sum_buffer_size opts.max_inter_ipu_copies_buffer_size = max_inter_ipu_copies_buffer_size opts.max_scheduler_lookahead_depth = max_scheduler_lookahead_depth opts.max_scheduler_search_space_size = max_scheduler_search_space_size opts.prefetch_data_streams = prefetch_data_streams opts.selection_order = selection_order.value opts.verified_transfers.enabled = False opts = set_verification_options(opts, VerificationOptions()) opts.enable_experimental_remote_buffer_embedding = \ enable_experimental_remote_buffer_embedding return opts def set_serialization_options(opts, output_folder=""): opts.serialization_folder = output_folder return opts def set_optimization_options(opts, combine_embedding_lookups=False, combine_matmuls=False, max_cross_replica_sum_buffer_size=0, max_reduce_scatter_buffer_size=0, max_inter_ipu_copies_buffer_size=0, max_send_recv_cluster_size=0, gather_simplifier=False, triangular_solve_expander_block_size=0): opts.enable_multi_slice_combiner = combine_embedding_lookups opts.enable_matmul_combiner = combine_matmuls opts.max_cross_replica_sum_buffer_size = max_cross_replica_sum_buffer_size opts.max_reduce_scatter_buffer_size = max_reduce_scatter_buffer_size opts.max_inter_ipu_copies_buffer_size = max_inter_ipu_copies_buffer_size opts.max_send_recv_cluster_size = max_send_recv_cluster_size opts.enable_gather_simplifier = gather_simplifier opts.triangular_solve_expander_block_size = \ triangular_solve_expander_block_size return opts def set_norm_options(opts, use_stable_statistics=False): opts.use_stable_norm_statistics = use_stable_statistics return opts def set_transfer_options(opts, use_verified_transfers=False): opts.verified_transfers.enabled = use_verified_transfers return opts class KeyId: def __init__(self, key=0, start_id=-1): self.key = key self.start_id = start_id class VerificationOptions: def __init__(self): self.inputs = KeyId() self.input_parameters = KeyId() self.outputs = KeyId() self.output_parameters = KeyId() self.infeeds = collections.defaultdict(KeyId) self.outfeeds = collections.defaultdict(KeyId) self.checkpoint_in = KeyId(0, 0) self.checkpoint_out = KeyId(0, 0) def set_verification_options(opts, verification_options): if not isinstance(verification_options, VerificationOptions): raise Exception( "`verification_options` must be of type VerificationOptions") def _cp_key_and_id(src, dst): dst.key = src.key dst.start_id = src.start_id for attr in [ "inputs", "input_parameters", "outputs", "output_parameters", "checkpoint_in", "checkpoint_out" ]: _cp_key_and_id(getattr(verification_options, attr), getattr(opts.verified_transfers, attr)) for name, options in verification_options.infeeds.items(): _cp_key_and_id(options, opts.verified_transfers.infeeds[name]) for name, options in verification_options.outfeeds.items(): _cp_key_and_id(options, opts.verified_transfers.outfeeds[name]) return opts def set_compilation_options(opts, compilation_options=None): if compilation_options: if not isinstance(compilation_options, dict): raise Exception("`compilation_options` must be a dictionary") for (option_name, value) in compilation_options.items(): compilation_option = opts.compilation_options.add() compilation_option.option = option_name compilation_option.value = value return opts def set_convolution_options(opts, convolution_options=None): if convolution_options: if not isinstance(convolution_options, dict): raise Exception("`convolution_options` must be a dictionary") for (option_name, value) in convolution_options.items(): opt = opts.convolution_options.add() opt.option = option_name opt.value = value return opts def set_matmul_options(opts, matmul_options=None, clear_pass_type=False): if matmul_options: if not isinstance(matmul_options, dict): raise Exception("`matmul_options` must be a dictionary") for (option_name, value) in matmul_options.items(): opt = opts.matmul_options.add() opt.option = option_name opt.value = value opts.clear_matmul_pass_type = clear_pass_type return opts def set_pooling_options(opts, pooling_options=None): if pooling_options: if not isinstance(pooling_options, dict): raise Exception("`pooling_options` must be a dictionary") for (option_name, value) in pooling_options.items(): opt = opts.pooling_options.add() opt.option = option_name opt.value = value return opts @deprecation.deprecated_args( None, "report_options is deprecated, use graph_options and" " execution_options instead", "report_options") def set_report_options(opts, report_options=None, graph_options=None, execution_options=None): def use_report_options(): if report_options: if not isinstance(report_options, dict): raise Exception("`report_options` must be a dictionary") return report_options if not graph_options: graph_options = use_report_options() if graph_options: if not isinstance(graph_options, dict): raise Exception("`graph_options` must be a dictionary") for (option_name, value) in graph_options.items(): opt = opts.profiling.graph_options.add() opt.option = option_name opt.value = value if not execution_options: execution_options = use_report_options() if execution_options: if not isinstance(execution_options, dict): raise Exception("`execution_options` must be a dictionary") for (option_name, value) in execution_options.items(): opt = opts.profiling.execution_options.add() opt.option = option_name opt.value = value return opts def set_ipu_model_options(opts, compile_ipu_code=True): opts.ipu_model_config.compile_ipu_code = compile_ipu_code return opts @deprecation.deprecated_args( None, "Pipelining recomputation will recompute all the non-stateful operations " "when recomputation is enabled.", "allow_stateful_recompute", ) def set_recomputation_options(opts, allow_recompute=True, allow_stateful_recompute=None): opts.speed_size_config.allow_recompute = allow_recompute return opts def set_floating_point_behaviour_options(opts, inv=True, div0=True, oflo=True, esr=True, nanoo=True): opts.floating_point_behaviour.flags_set = True opts.floating_point_behaviour.inv = inv opts.floating_point_behaviour.div0 = div0 opts.floating_point_behaviour.oflo = oflo opts.floating_point_behaviour.esr = esr opts.floating_point_behaviour.nanoo = nanoo return opts def set_gcl_options(opts, num_io_tiles=0, gcl_options=None): opts.gcl_num_io_tiles = num_io_tiles if gcl_options: if not isinstance(gcl_options, dict): raise TypeError("`gcl_options` must be a dictionary") for (option_name, value) in gcl_options.items(): opt = opts.gcl_options.add() opt.option = option_name opt.value = value return opts def auto_select_ipus(opts, num_ipus): if opts.device_config: raise Exception("IPU devices have already been configured.") if not isinstance(num_ipus, (int, list, tuple)): raise Exception("`num_ipus` must be an integer, list or tuple.") if isinstance(num_ipus, int): dev = opts.device_config.add() dev.auto_count = num_ipus else: for n in num_ipus: dev = opts.device_config.add() dev.auto_count = n return opts def select_ipus(opts, indices): if opts.device_config: raise Exception("IPU devices have already been configured.") if not isinstance(indices, (list, tuple)): raise Exception("`indices` must be a list or tuple.") if len(set(indices)) != len(indices): raise Exception("All device indeicies in `indices` must be unique.") for i in indices: dev = opts.device_config.add() dev.cfg_index = i return opts def set_ipu_connection_type(opts, connection_type=None, ipu_version=None): connection_type = connection_type if connection_type \ else DeviceConnectionType.ALWAYS if connection_type == DeviceConnectionType.NEVER and ipu_version is None: raise Exception("`ipu_version` must be set when `connection_type` is set " "to `DeviceConnectionType.NEVER`") opts.device_connection_type = connection_type.value if ipu_version is not None: opts.ipu_version = ipu_version opts.has_ipu_version = True return opts def reset_ipu_seed(seed, device="/device:IPU:0", cpu_device="cpu"): g = ops.Graph() with g.as_default(): with ops.device(cpu_device): cfg_op = gen_ipu_ops.ipu_reset_seed(device, seed) with session_lib.Session(graph=g) as sess: sess.run(cfg_op) def extract_all_strings_from_event_trace(events): result = "" for e in events: evt = IpuTraceEvent.FromString(e) result = result + ("-" * 70) + "\n=> @ " + \ time.strftime('%F %T %z', time.localtime(evt.timestamp)) + ": " if evt.type == IpuTraceEvent.COMPILE_BEGIN: evt_str = "Compile begin: " + \ evt.compile_begin.module_name.decode('utf-8') + "\n" elif evt.type == IpuTraceEvent.COMPILE_END: evt_str = "Compile end: " + \ evt.compile_end.module_name.decode('utf-8') + "\n" + \ "Duration: " + str(evt.compile_end.duration) + " us\n" + \ evt.compile_end.compilation_report.decode('utf-8') elif evt.type == IpuTraceEvent.HOST_TO_DEVICE_TRANSFER: evt_str = "Host->Device\n" + \ evt.data_transfer.data_transfer.decode('utf-8') + "\n" elif evt.type == IpuTraceEvent.DEVICE_TO_HOST_TRANSFER: evt_str = "Device->Host\n" + \ evt.data_transfer.data_transfer.decode('utf-8') + "\n" elif evt.type == IpuTraceEvent.LOAD_ENGINE: evt_str = "Load engine: " + \ evt.load_engine.module_name.decode('utf-8') + "\n" elif evt.type == IpuTraceEvent.EXECUTE: evt_str = "Execute: " + \ evt.execute.module_name.decode('utf-8') + "\n" + \ evt.execute.execution_report.decode('utf-8') else: evt_str = "Unknown event" result = result + evt_str + '\n' return result def extract_all_types_from_event_trace(events): result = [] for e in events: evt = IpuTraceEvent.FromString(e) result += [evt.type] return result def extract_all_events(events): result = [] for e in events: evt = IpuTraceEvent.FromString(e) result += [evt] return result def extract_compile_reports(events): result = [] for e in events: evt = IpuTraceEvent.FromString(e) if evt.type == IpuTraceEvent.COMPILE_END: try: module = evt.compile_end.module_name.decode('utf-8') rep = evt.compile_end.compilation_report.decode('utf-8') if rep: result += [(module, rep)] except UnicodeDecodeError: pass return result def extract_poplar_serialized_graphs(events): result = [] for e in events: evt = IpuTraceEvent.FromString(e) if evt.type == IpuTraceEvent.COMPILE_END: try: rep = evt.compile_end.poplar_graph.decode('utf-8') except UnicodeDecodeError: rep = evt.compile_end.poplar_graph module = evt.compile_end.module_name.decode('utf-8') if rep: result += [(module, rep)] return result def extract_execute_reports(events): result = [] for e in events: evt = IpuTraceEvent.FromString(e) if evt.type == IpuTraceEvent.EXECUTE: try: module = evt.execute.module_name.decode('utf-8') rep = evt.execute.execution_report.decode('utf-8') if rep: result += [(module, rep)] except UnicodeDecodeError: pass return result def move_variable_initialization_to_cpu(graph=None): if not graph: graph = ops.get_default_graph() with ops.device("/device:CPU:0"): control_flow_ops.no_op(name="cpu") variables = [] for v in graph.get_collection('variables'): if not isinstance(v, values.DistributedVariable): variables.append(v) def _uses_resource(op): return any(input_tensor.dtype == 'resource' for input_tensor in op.inputs) init_ops = [] dep_ops = [v.initializer.inputs[1].op for v in variables] visited = set() while dep_ops: op = dep_ops.pop() if op not in visited and not _uses_resource(op): visited.add(op) init_ops += [op] dep_ops += [x.op for x in op.inputs] for op in init_ops: op._set_device('/device:CPU:0') op._set_attr( '_class', attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue( s=[b'loc:@cpu']))) op._set_attr('_XlaCompile', attr_value_pb2.AttrValue(b=False)) op._set_attr('_XlaScope', attr_value_pb2.AttrValue(s=b'')) return def export_dataset_to_file(dataset_or_infeed, output_filename, num_elements, feed_name="", apply_options=True): assert isinstance(dataset_or_infeed, (dataset_ops.Dataset, ipu_infeed_queue.IPUInfeedQueue)) if isinstance(dataset_or_infeed, ipu_infeed_queue.IPUInfeedQueue): dataset = dataset_or_infeed._dataset feed_name = feed_name or dataset_or_infeed._id else: dataset = dataset_or_infeed if apply_options: dataset = dataset._apply_options() extractor = dataset_extractor.dataset_extractor(dataset, num_elements, output_filename, feed_name) with ops.device("cpu"), session_lib.Session() as sess: sess.run(extractor) def export_inputs_to_file(inputs, output_filename, feed_dict): with ops.device("cpu"), session_lib.Session() as sess: sess.run(dataset_extractor.export_variables(inputs, output_filename), feed_dict)
true
true
f72ca776ebc6d065e702f3c8cb4da790bde5d2ce
3,892
py
Python
tests/data/residues/GLN.py
uw-ipd/privileged_residues
78078c22ba537651a1b6bd1404c05246ab73a3e3
[ "Apache-2.0" ]
null
null
null
tests/data/residues/GLN.py
uw-ipd/privileged_residues
78078c22ba537651a1b6bd1404c05246ab73a3e3
[ "Apache-2.0" ]
20
2018-08-13T22:50:46.000Z
2018-11-03T22:29:03.000Z
tests/data/residues/GLN.py
uw-ipd/privileged_residues
78078c22ba537651a1b6bd1404c05246ab73a3e3
[ "Apache-2.0" ]
1
2018-08-25T06:03:43.000Z
2018-08-25T06:03:43.000Z
from tests.util import pick_ray from pyrosetta import Pose from pyrosetta.rosetta.core.import_pose import pose_from_pdbstring name = "GLN" contents = """ ATOM 1 N ALA A 1 0.000 0.000 0.000 1.00 0.00 N ATOM 2 CA ALA A 1 1.458 0.000 0.000 1.00 0.00 C ATOM 3 C ALA A 1 2.009 1.420 0.000 1.00 0.00 C ATOM 4 O ALA A 1 1.251 2.390 0.000 1.00 0.00 O ATOM 5 CB ALA A 1 1.988 -0.773 -1.199 1.00 0.00 C ATOM 6 1H ALA A 1 -0.334 -0.943 -0.000 1.00 0.00 H ATOM 7 2H ALA A 1 -0.334 0.471 0.816 1.00 0.00 H ATOM 8 3H ALA A 1 -0.334 0.471 -0.816 1.00 0.00 H ATOM 9 HA ALA A 1 1.797 -0.490 0.913 1.00 0.00 H ATOM 10 1HB ALA A 1 3.078 -0.764 -1.185 1.00 0.00 H ATOM 11 2HB ALA A 1 1.633 -1.802 -1.154 1.00 0.00 H ATOM 12 3HB ALA A 1 1.633 -0.307 -2.117 1.00 0.00 H ATOM 13 N GLN A 2 3.332 1.536 0.000 1.00 0.00 N ATOM 14 CA GLN A 2 3.988 2.839 0.000 1.00 0.00 C ATOM 15 C GLN A 2 5.504 2.693 0.000 1.00 0.00 C ATOM 16 O GLN A 2 6.030 1.580 0.000 1.00 0.00 O ATOM 17 CB GLN A 2 3.542 3.663 1.211 1.00 0.00 C ATOM 18 CG GLN A 2 2.545 2.955 2.113 1.00 0.00 C ATOM 19 CD GLN A 2 2.200 1.564 1.615 1.00 0.00 C ATOM 20 OE1 GLN A 2 2.707 1.116 0.583 1.00 0.00 O ATOM 21 NE2 GLN A 2 1.333 0.873 2.346 1.00 0.00 N ATOM 22 H GLN A 2 3.899 0.700 0.000 1.00 0.00 H ATOM 23 HA GLN A 2 3.702 3.361 -0.913 1.00 0.00 H ATOM 24 1HB GLN A 2 4.412 3.926 1.812 1.00 0.00 H ATOM 25 2HB GLN A 2 3.086 4.592 0.870 1.00 0.00 H ATOM 26 1HG GLN A 2 2.975 2.864 3.111 1.00 0.00 H ATOM 27 2HG GLN A 2 1.627 3.541 2.153 1.00 0.00 H ATOM 28 1HE2 GLN A 2 1.066 -0.050 2.067 1.00 0.00 H ATOM 29 2HE2 GLN A 2 0.945 1.275 3.176 1.00 0.00 H ATOM 30 N ALA A 3 6.202 3.823 0.000 1.00 0.00 N ATOM 31 CA ALA A 3 7.660 3.823 0.000 1.00 0.00 C ATOM 32 C ALA A 3 8.211 5.243 0.000 1.00 0.00 C ATOM 33 O ALA A 3 8.260 5.868 1.023 1.00 0.00 O ATOM 34 OXT ALA A 3 8.596 5.737 -1.023 1.00 0.00 O ATOM 35 CB ALA A 3 8.190 3.050 -1.199 1.00 0.00 C ATOM 36 H ALA A 3 5.710 4.705 -0.000 1.00 0.00 H ATOM 37 HA ALA A 3 7.999 3.333 0.913 1.00 0.00 H ATOM 38 1HB ALA A 3 9.280 3.059 -1.185 1.00 0.00 H ATOM 39 2HB ALA A 3 7.835 2.021 -1.154 1.00 0.00 H ATOM 40 3HB ALA A 3 7.835 3.516 -2.117 1.00 0.00 H TER """ pose = Pose() pose_from_pdbstring(pose, contents) n_rays = { 1: pick_ray(pose.residue(1), "1H", "N"), 2: pick_ray(pose.residue(2), "H", "N"), 3: pick_ray(pose.residue(3), "H", "N") } c_rays = { 1: pick_ray(pose.residue(1), "O", "C"), 2: pick_ray(pose.residue(2), "O", "C"), 3: pick_ray(pose.residue(3), "O", "C") } sc_donor = { 2: [ pick_ray(pose.residue(2), "1HE2", "NE2"), pick_ray(pose.residue(2), "2HE2", "NE2") ] } sc_acceptor = { 2: [ pick_ray(pose.residue(2), "OE1", "CD") ] } cat_pi = [ ]
48.049383
78
0.43705
from tests.util import pick_ray from pyrosetta import Pose from pyrosetta.rosetta.core.import_pose import pose_from_pdbstring name = "GLN" contents = """ ATOM 1 N ALA A 1 0.000 0.000 0.000 1.00 0.00 N ATOM 2 CA ALA A 1 1.458 0.000 0.000 1.00 0.00 C ATOM 3 C ALA A 1 2.009 1.420 0.000 1.00 0.00 C ATOM 4 O ALA A 1 1.251 2.390 0.000 1.00 0.00 O ATOM 5 CB ALA A 1 1.988 -0.773 -1.199 1.00 0.00 C ATOM 6 1H ALA A 1 -0.334 -0.943 -0.000 1.00 0.00 H ATOM 7 2H ALA A 1 -0.334 0.471 0.816 1.00 0.00 H ATOM 8 3H ALA A 1 -0.334 0.471 -0.816 1.00 0.00 H ATOM 9 HA ALA A 1 1.797 -0.490 0.913 1.00 0.00 H ATOM 10 1HB ALA A 1 3.078 -0.764 -1.185 1.00 0.00 H ATOM 11 2HB ALA A 1 1.633 -1.802 -1.154 1.00 0.00 H ATOM 12 3HB ALA A 1 1.633 -0.307 -2.117 1.00 0.00 H ATOM 13 N GLN A 2 3.332 1.536 0.000 1.00 0.00 N ATOM 14 CA GLN A 2 3.988 2.839 0.000 1.00 0.00 C ATOM 15 C GLN A 2 5.504 2.693 0.000 1.00 0.00 C ATOM 16 O GLN A 2 6.030 1.580 0.000 1.00 0.00 O ATOM 17 CB GLN A 2 3.542 3.663 1.211 1.00 0.00 C ATOM 18 CG GLN A 2 2.545 2.955 2.113 1.00 0.00 C ATOM 19 CD GLN A 2 2.200 1.564 1.615 1.00 0.00 C ATOM 20 OE1 GLN A 2 2.707 1.116 0.583 1.00 0.00 O ATOM 21 NE2 GLN A 2 1.333 0.873 2.346 1.00 0.00 N ATOM 22 H GLN A 2 3.899 0.700 0.000 1.00 0.00 H ATOM 23 HA GLN A 2 3.702 3.361 -0.913 1.00 0.00 H ATOM 24 1HB GLN A 2 4.412 3.926 1.812 1.00 0.00 H ATOM 25 2HB GLN A 2 3.086 4.592 0.870 1.00 0.00 H ATOM 26 1HG GLN A 2 2.975 2.864 3.111 1.00 0.00 H ATOM 27 2HG GLN A 2 1.627 3.541 2.153 1.00 0.00 H ATOM 28 1HE2 GLN A 2 1.066 -0.050 2.067 1.00 0.00 H ATOM 29 2HE2 GLN A 2 0.945 1.275 3.176 1.00 0.00 H ATOM 30 N ALA A 3 6.202 3.823 0.000 1.00 0.00 N ATOM 31 CA ALA A 3 7.660 3.823 0.000 1.00 0.00 C ATOM 32 C ALA A 3 8.211 5.243 0.000 1.00 0.00 C ATOM 33 O ALA A 3 8.260 5.868 1.023 1.00 0.00 O ATOM 34 OXT ALA A 3 8.596 5.737 -1.023 1.00 0.00 O ATOM 35 CB ALA A 3 8.190 3.050 -1.199 1.00 0.00 C ATOM 36 H ALA A 3 5.710 4.705 -0.000 1.00 0.00 H ATOM 37 HA ALA A 3 7.999 3.333 0.913 1.00 0.00 H ATOM 38 1HB ALA A 3 9.280 3.059 -1.185 1.00 0.00 H ATOM 39 2HB ALA A 3 7.835 2.021 -1.154 1.00 0.00 H ATOM 40 3HB ALA A 3 7.835 3.516 -2.117 1.00 0.00 H TER """ pose = Pose() pose_from_pdbstring(pose, contents) n_rays = { 1: pick_ray(pose.residue(1), "1H", "N"), 2: pick_ray(pose.residue(2), "H", "N"), 3: pick_ray(pose.residue(3), "H", "N") } c_rays = { 1: pick_ray(pose.residue(1), "O", "C"), 2: pick_ray(pose.residue(2), "O", "C"), 3: pick_ray(pose.residue(3), "O", "C") } sc_donor = { 2: [ pick_ray(pose.residue(2), "1HE2", "NE2"), pick_ray(pose.residue(2), "2HE2", "NE2") ] } sc_acceptor = { 2: [ pick_ray(pose.residue(2), "OE1", "CD") ] } cat_pi = [ ]
true
true
f72ca7d3d97e12ab7b405dcff314bdb6c0a78755
3,337
py
Python
examples/pointer_generator/preprocess.py
fairseq-FT/fairseq
18725499144c1bba7c151b796ba774e59d36eaa9
[ "MIT" ]
16,259
2018-05-02T02:31:30.000Z
2022-03-31T21:50:23.000Z
examples/pointer_generator/preprocess.py
fairseq-FT/fairseq
18725499144c1bba7c151b796ba774e59d36eaa9
[ "MIT" ]
3,863
2018-05-02T13:42:39.000Z
2022-03-31T19:03:32.000Z
examples/pointer_generator/preprocess.py
fairseq-FT/fairseq
18725499144c1bba7c151b796ba774e59d36eaa9
[ "MIT" ]
4,796
2018-05-02T07:55:51.000Z
2022-03-31T14:46:45.000Z
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse from itertools import zip_longest def replace_oovs(source_in, target_in, vocabulary, source_out, target_out): """Replaces out-of-vocabulary words in source and target text with <unk-N>, where N in is the position of the word in the source sequence. """ def format_unk(pos): return "<unk-{}>".format(pos) if target_in is None: target_in = [] for seq_num, (source_seq, target_seq) in enumerate( zip_longest(source_in, target_in) ): source_seq_out = [] target_seq_out = [] word_to_pos = dict() for position, token in enumerate(source_seq.strip().split()): if token in vocabulary: token_out = token else: if token in word_to_pos: oov_pos = word_to_pos[token] else: word_to_pos[token] = position oov_pos = position token_out = format_unk(oov_pos) source_seq_out.append(token_out) source_out.write(" ".join(source_seq_out) + "\n") if target_seq is not None: for token in target_seq.strip().split(): if token in word_to_pos: token_out = format_unk(word_to_pos[token]) else: token_out = token target_seq_out.append(token_out) if target_out is not None: target_out.write(" ".join(target_seq_out) + "\n") def main(): parser = argparse.ArgumentParser( description="Replaces out-of-vocabulary words in both source and target " "sequences with tokens that indicate the position of the word " "in the source sequence." ) parser.add_argument( "--source", type=str, help="text file with source sequences", required=True ) parser.add_argument( "--target", type=str, help="text file with target sequences", default=None ) parser.add_argument("--vocab", type=str, help="vocabulary file", required=True) parser.add_argument( "--source-out", type=str, help="where to write source sequences with <unk-N> entries", required=True, ) parser.add_argument( "--target-out", type=str, help="where to write target sequences with <unk-N> entries", default=None, ) args = parser.parse_args() with open(args.vocab, encoding="utf-8") as vocab: vocabulary = vocab.read().splitlines() target_in = ( open(args.target, "r", encoding="utf-8") if args.target is not None else None ) target_out = ( open(args.target_out, "w", encoding="utf-8") if args.target_out is not None else None ) with open(args.source, "r", encoding="utf-8") as source_in, open( args.source_out, "w", encoding="utf-8" ) as source_out: replace_oovs(source_in, target_in, vocabulary, source_out, target_out) if target_in is not None: target_in.close() if target_out is not None: target_out.close() if __name__ == "__main__": main()
32.398058
85
0.605034
import argparse from itertools import zip_longest def replace_oovs(source_in, target_in, vocabulary, source_out, target_out): def format_unk(pos): return "<unk-{}>".format(pos) if target_in is None: target_in = [] for seq_num, (source_seq, target_seq) in enumerate( zip_longest(source_in, target_in) ): source_seq_out = [] target_seq_out = [] word_to_pos = dict() for position, token in enumerate(source_seq.strip().split()): if token in vocabulary: token_out = token else: if token in word_to_pos: oov_pos = word_to_pos[token] else: word_to_pos[token] = position oov_pos = position token_out = format_unk(oov_pos) source_seq_out.append(token_out) source_out.write(" ".join(source_seq_out) + "\n") if target_seq is not None: for token in target_seq.strip().split(): if token in word_to_pos: token_out = format_unk(word_to_pos[token]) else: token_out = token target_seq_out.append(token_out) if target_out is not None: target_out.write(" ".join(target_seq_out) + "\n") def main(): parser = argparse.ArgumentParser( description="Replaces out-of-vocabulary words in both source and target " "sequences with tokens that indicate the position of the word " "in the source sequence." ) parser.add_argument( "--source", type=str, help="text file with source sequences", required=True ) parser.add_argument( "--target", type=str, help="text file with target sequences", default=None ) parser.add_argument("--vocab", type=str, help="vocabulary file", required=True) parser.add_argument( "--source-out", type=str, help="where to write source sequences with <unk-N> entries", required=True, ) parser.add_argument( "--target-out", type=str, help="where to write target sequences with <unk-N> entries", default=None, ) args = parser.parse_args() with open(args.vocab, encoding="utf-8") as vocab: vocabulary = vocab.read().splitlines() target_in = ( open(args.target, "r", encoding="utf-8") if args.target is not None else None ) target_out = ( open(args.target_out, "w", encoding="utf-8") if args.target_out is not None else None ) with open(args.source, "r", encoding="utf-8") as source_in, open( args.source_out, "w", encoding="utf-8" ) as source_out: replace_oovs(source_in, target_in, vocabulary, source_out, target_out) if target_in is not None: target_in.close() if target_out is not None: target_out.close() if __name__ == "__main__": main()
true
true
f72ca9ee9ae4957b92084a00b5624be329e8478f
349
py
Python
Capitulo_02/exercise2_4.py
thiagosouzalink/my_codes-exercices-book-curso_intensivo_de_python
841aa855a7450ad3d0ba65393ba0b6debcd6a770
[ "MIT" ]
null
null
null
Capitulo_02/exercise2_4.py
thiagosouzalink/my_codes-exercices-book-curso_intensivo_de_python
841aa855a7450ad3d0ba65393ba0b6debcd6a770
[ "MIT" ]
null
null
null
Capitulo_02/exercise2_4.py
thiagosouzalink/my_codes-exercices-book-curso_intensivo_de_python
841aa855a7450ad3d0ba65393ba0b6debcd6a770
[ "MIT" ]
null
null
null
""" 2.4 – Letras maiúsculas e minúsculas em nomes: Armazene o nome de uma pessoa em uma variável e então apresente o nome dessa pessoa em letras minúsculas, em letras maiúsculas e somente com a primeira letra maiúscula. """ nome = "José" # Minúsculas print(nome.lower()) # Maiúsculas print(nome.upper()) # Somente a primeira letra print(nome[0])
24.928571
215
0.747851
nome = "José" print(nome.lower()) print(nome.upper()) print(nome[0])
true
true
f72caa1cc50710b6f6793c4a96821b65b2e32acb
2,025
py
Python
src/routes/users.py
tombrereton/flask-api-starter-kit
2e244bfc4f5659e91fd7cd27388c37bf32baeaec
[ "MIT" ]
null
null
null
src/routes/users.py
tombrereton/flask-api-starter-kit
2e244bfc4f5659e91fd7cd27388c37bf32baeaec
[ "MIT" ]
null
null
null
src/routes/users.py
tombrereton/flask-api-starter-kit
2e244bfc4f5659e91fd7cd27388c37bf32baeaec
[ "MIT" ]
null
null
null
from http import HTTPStatus from typing import List from apifairy import body, other_responses, response from flask import Blueprint, jsonify from flask import request from src.config import DefaultConfig from src.dtos.user import UserDto from src.requests.user import CreateUserRequestSchema, CreateUserRequest, CreateManyUsersRequestSchema, \ CreateManyUsersRequest from src.responses.user import UserResponseSchema from src.services import queue_client from src.services.pascal_to_snake_serializer import JSONSerializer as ToSnakeJson from src.services.snake_to_pascal_serializer import JSONSerializer as ToPascalJson users_api = Blueprint('users', __name__) @users_api.route('users', methods=['POST']) @other_responses({ 200: 'User Created', 400: 'Request Body is Invalid' }) @body(CreateUserRequestSchema()) def post(user_request: CreateUserRequest): """Create a User.""" if request.method == 'POST': user_snake_case = ToSnakeJson.deserialize(UserDto, ToSnakeJson.serialize(user_request)) add_msg = queue_client.add_create_user_job(user_snake_case) return jsonify(add_msg), 200 @users_api.route('users/many', methods=['POST']) @other_responses({ 200: 'Users Created', 400: 'Request Body is Invalid' }) @body(CreateManyUsersRequestSchema()) def post_many(user_request: CreateManyUsersRequest): """Create a User.""" if request.method == 'POST': users_snake_case = ToSnakeJson.deserialize(List[UserDto], ToSnakeJson.serialize(user_request.Users)) users_added = [] for user in users_snake_case: add_msg = queue_client.add_create_user_job(user) users_added.append(add_msg) return jsonify(users_added), 200 @users_api.route('users/<int:id>', methods=['GET']) @response(UserResponseSchema, HTTPStatus.OK.value, "Get Users") def get_all_users(id: int): if request.method == 'GET': user = UserDto(user_name=DefaultConfig.DEFAULT_USERNAME) return ToPascalJson.serialize(user), 200
33.196721
108
0.74963
from http import HTTPStatus from typing import List from apifairy import body, other_responses, response from flask import Blueprint, jsonify from flask import request from src.config import DefaultConfig from src.dtos.user import UserDto from src.requests.user import CreateUserRequestSchema, CreateUserRequest, CreateManyUsersRequestSchema, \ CreateManyUsersRequest from src.responses.user import UserResponseSchema from src.services import queue_client from src.services.pascal_to_snake_serializer import JSONSerializer as ToSnakeJson from src.services.snake_to_pascal_serializer import JSONSerializer as ToPascalJson users_api = Blueprint('users', __name__) @users_api.route('users', methods=['POST']) @other_responses({ 200: 'User Created', 400: 'Request Body is Invalid' }) @body(CreateUserRequestSchema()) def post(user_request: CreateUserRequest): if request.method == 'POST': user_snake_case = ToSnakeJson.deserialize(UserDto, ToSnakeJson.serialize(user_request)) add_msg = queue_client.add_create_user_job(user_snake_case) return jsonify(add_msg), 200 @users_api.route('users/many', methods=['POST']) @other_responses({ 200: 'Users Created', 400: 'Request Body is Invalid' }) @body(CreateManyUsersRequestSchema()) def post_many(user_request: CreateManyUsersRequest): if request.method == 'POST': users_snake_case = ToSnakeJson.deserialize(List[UserDto], ToSnakeJson.serialize(user_request.Users)) users_added = [] for user in users_snake_case: add_msg = queue_client.add_create_user_job(user) users_added.append(add_msg) return jsonify(users_added), 200 @users_api.route('users/<int:id>', methods=['GET']) @response(UserResponseSchema, HTTPStatus.OK.value, "Get Users") def get_all_users(id: int): if request.method == 'GET': user = UserDto(user_name=DefaultConfig.DEFAULT_USERNAME) return ToPascalJson.serialize(user), 200
true
true
f72caa4b74837bd62d61442cc130cfd18f4a2cb9
602
py
Python
src/command_modules/azure-cli-find/azure/cli/command_modules/find/_help.py
v-Ajnava/azure-cli
febec631d79bfca151e84267b5b409594bad598e
[ "MIT" ]
null
null
null
src/command_modules/azure-cli-find/azure/cli/command_modules/find/_help.py
v-Ajnava/azure-cli
febec631d79bfca151e84267b5b409594bad598e
[ "MIT" ]
3
2021-03-26T00:48:20.000Z
2022-03-29T22:05:39.000Z
src/command_modules/azure-cli-find/azure/cli/command_modules/find/_help.py
v-Ajnava/azure-cli
febec631d79bfca151e84267b5b409594bad598e
[ "MIT" ]
1
2017-12-28T04:51:44.000Z
2017-12-28T04:51:44.000Z
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from azure.cli.core.help_files import helps helps['find'] = """ type: command short-summary: Find Azure CLI commands. examples: - name: Search for commands containing 'vm' or 'secret' text: > az find -q vm secret """
37.625
94
0.465116
from azure.cli.core.help_files import helps helps['find'] = """ type: command short-summary: Find Azure CLI commands. examples: - name: Search for commands containing 'vm' or 'secret' text: > az find -q vm secret """
true
true
f72caa944d2ed0ef2d12c5b7459dddcc53fc9b34
12,347
py
Python
EPro-PnP-Det/epropnp_det/core/bbox_3d/misc.py
Lakonik/EPro-PnP
931df847190ce10eddd1dc3e3168ce1a2f295ffa
[ "Apache-2.0" ]
19
2022-03-21T10:22:24.000Z
2022-03-30T15:43:46.000Z
EPro-PnP-Det/epropnp_det/core/bbox_3d/misc.py
Lakonik/EPro-PnP
931df847190ce10eddd1dc3e3168ce1a2f295ffa
[ "Apache-2.0" ]
null
null
null
EPro-PnP-Det/epropnp_det/core/bbox_3d/misc.py
Lakonik/EPro-PnP
931df847190ce10eddd1dc3e3168ce1a2f295ffa
[ "Apache-2.0" ]
3
2022-03-26T08:08:24.000Z
2022-03-30T11:17:11.000Z
""" Copyright (C) 2010-2022 Alibaba Group Holding Limited. This file is modified from https://github.com/tjiiv-cprg/MonoRUn """ import math import numpy as np import torch from pytorch3d.structures.meshes import Meshes from epropnp_det.ops.iou3d.iou3d_utils import nms_gpu def gen_unit_noc(num_pts, device=None): indices = torch.arange(0, num_pts, dtype=torch.float32, device=device) + 0.5 phi = torch.arccos(1 - 2 * indices / num_pts) theta = math.pi * (1 + 5**0.5) * indices xyz = torch.stack( (torch.cos(theta) * torch.sin(phi), torch.sin(theta) * torch.sin(phi), torch.cos(phi)), dim=-1) return xyz def project_to_image_r_mat( x3d, r_mat, t_vec, cam_intrinsic, img_shapes, z_min=0.5, allowed_border=200, return_z=False, return_clip_mask=False): """ Args: x3d (torch.Tensor): shape (*, num_points, 3) r_mat (torch.Tensor): shape (*, 3, 3) t_vec (torch.Tensor): shape (*, 3) in format [x, y, z] cam_intrinsic (torch.Tensor): shape (*, 3, 3) img_shapes (torch.Tensor): shape (*, 2) Returns: Tensor: x2d_proj, shape (*, num_points, 2) """ proj_r_mats = cam_intrinsic @ r_mat # (*, 3, 3) proj_t_vecs = cam_intrinsic @ t_vec.unsqueeze(-1) # (*, 3, 1) # (*, num_points, 3) = ((*, 3, 3) @ (*, 3, num_points) + (*, 3, 1)).T xyz_proj = (proj_r_mats @ x3d.transpose(-1, -2) + proj_t_vecs).transpose(-1, -2) z_proj = xyz_proj[..., 2:] # (*, num_points, 1) if return_clip_mask: z_clip_mask = z_proj < z_min z_proj = z_proj.clamp(min=z_min) x2d_proj = xyz_proj[..., :2] / z_proj # (*, num_points, 2) # clip to border x2d_min = -allowed_border - 0.5 # Number x2d_max = img_shapes[..., None, [1, 0]] + (allowed_border - 0.5) # (*, 1, 2) if return_clip_mask: x2d_clip_mask = (x2d_proj < x2d_min) | (x2d_proj > x2d_max) clip_mask = z_clip_mask.squeeze(-1) | x2d_clip_mask.any(-1) # (*, num_points) x2d_proj = torch.min(x2d_proj.clamp(min=x2d_min), x2d_max) if not return_z: if not return_clip_mask: return x2d_proj else: return x2d_proj, clip_mask else: if not return_clip_mask: return x2d_proj, z_proj else: return x2d_proj, z_proj, clip_mask def project_to_image( x3d, pose, cam_intrinsic, img_shapes, z_min=0.5, allowed_border=200, return_z=False, return_clip_mask=False): """ Args: x3d (torch.Tensor): shape (*, num_points, 3) pose (torch.Tensor): shape (*, 4) in format [x, y, z, yaw] cam_intrinsic (torch.Tensor): shape (*, 3, 3) img_shapes (torch.Tensor): shape (*, 2) Returns: Tensor: x2d_proj, shape (*, num_points, 2) """ r_mat = yaw_to_rot_mat(pose[..., 3]) t_vec = pose[..., :3] return project_to_image_r_mat(x3d, r_mat, t_vec, cam_intrinsic, img_shapes, z_min, allowed_border, return_z, return_clip_mask) def yaw_to_rot_mat(yaw): """ Args: yaw: (*) Returns: rot_mats: (*, 3, 3) """ if isinstance(yaw, torch.Tensor): pkg = torch device_kwarg = dict(device=yaw.device) else: pkg = np device_kwarg = dict() sin_yaw = pkg.sin(yaw) cos_yaw = pkg.cos(yaw) # [[ cos_yaw, 0, sin_yaw], # [ 0, 1, 0], # [-sin_yaw, 0, cos_yaw]] rot_mats = pkg.zeros(yaw.shape + (3, 3), dtype=pkg.float32, **device_kwarg) rot_mats[..., 0, 0] = cos_yaw rot_mats[..., 2, 2] = cos_yaw rot_mats[..., 0, 2] = sin_yaw rot_mats[..., 2, 0] = -sin_yaw rot_mats[..., 1, 1] = 1 return rot_mats def rot_mat_to_yaw(rot_mat): """ Args: rot_mat: (*, 3, 3) Returns: yaw: (*) """ if isinstance(rot_mat, torch.Tensor): atan2 = torch.atan2 else: atan2 = np.arctan2 yaw = atan2(rot_mat[..., 0, 2] - rot_mat[..., 2, 0], rot_mat[..., 0, 0] + rot_mat[..., 2, 2]) return yaw def box_mesh(): return Meshes( verts=[torch.tensor([[-1, -1, 1], [ 1, -1, 1], [-1, 1, 1], [ 1, 1, 1], [-1, -1, -1], [ 1, -1, -1], [-1, 1, -1], [ 1, 1, -1]], dtype=torch.float32)], faces=[torch.tensor([[0, 1, 2], [1, 3, 2], [2, 3, 7], [2, 7, 6], [1, 7, 3], [1, 5, 7], [6, 7, 4], [7, 5, 4], [0, 4, 1], [1, 4, 5], [2, 6, 4], [0, 2, 4]], dtype=torch.int)]) def compute_box_3d(bbox_3d): """ Args: bbox_3d: (*, 7) Returns: corners: (*, 8, 3) edge_corner_idx: (12, 2) """ bs = bbox_3d.shape[:-1] rotation_matrix = yaw_to_rot_mat(bbox_3d[..., 6]) # (*bs, 3, 3) edge_corner_idx = np.array([[0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7], [7, 4], [0, 4], [1, 5], [2, 6], [3, 7]]) corners = np.array([[ 0.5, 0.5, 0.5], [ 0.5, 0.5, -0.5], [-0.5, 0.5, -0.5], [-0.5, 0.5, 0.5], [ 0.5, -0.5, 0.5], [ 0.5, -0.5, -0.5], [-0.5, -0.5, -0.5], [-0.5, -0.5, 0.5]], dtype=np.float32) if isinstance(bbox_3d, torch.Tensor): edge_corner_idx = torch.from_numpy(edge_corner_idx).to(device=bbox_3d.device) corners = torch.from_numpy(corners).to(device=bbox_3d.device) corners = corners * bbox_3d[..., None, :3] # (*bs, 8, 3) corners = (rotation_matrix[..., None, :, :] @ corners[..., None]).reshape(*bs, 8, 3) \ + bbox_3d[..., None, 3:6] return corners, edge_corner_idx def edge_intersection(corners, edge_corner_idx, clip_axis, clip_val, op, edge_valid_mask=None): """ Args: corners: (bs, 8, 3/2) edge_corner_idx: (12, 2) clip_val: (bs, ) edge_valid_mask: (bs, 12) """ if op == 'greater': op = torch.greater elif op == 'less': op = torch.less if edge_valid_mask is None: edge_valid_mask = corners.new_ones( (corners.size(0), edge_corner_idx.size(0)), dtype=torch.bool) corners_inside = op(corners[..., clip_axis], clip_val[:, None]) # (bs, 8) # compute z intersection edges_0_inside = corners_inside[:, edge_corner_idx[:, 0]] # (bs, 12) edges_1_inside = corners_inside[:, edge_corner_idx[:, 1]] # (bs, 12) edges_clipped = (edges_0_inside ^ edges_1_inside) & edge_valid_mask # (bs, 12) edges_clipped_idx = edges_clipped.nonzero() # (num_nonzero, 2) in [bs_ind, edge_ind] if edges_clipped_idx.shape[0] > 0: edge_corner_idx_to_clip = edge_corner_idx[edges_clipped_idx[:, 1], :] # (num_nonzero, 2) edges_0 = corners[edges_clipped_idx[:, 0], edge_corner_idx_to_clip[:, 0], :] # (num_nonzero, 3) edges_1 = corners[edges_clipped_idx[:, 0], edge_corner_idx_to_clip[:, 1], :] # (num_nonzero, 3) axval0 = edges_0[:, clip_axis] # (num_nonzero, ) axval1 = edges_1[:, clip_axis] clip_val_ = clip_val[edges_clipped_idx[:, 0]] weight_0 = axval1 - clip_val_ # (num_nonzero, ) weight_1 = clip_val_ - axval0 intersection = (edges_0 * weight_0[:, None] + edges_1 * weight_1[:, None] ) * (1 / (axval1 - axval0)).clamp(min=-1e6, max=1e6)[:, None] # (num_nonzero, 3) clip_idx = torch.where(op(axval0, clip_val_), edge_corner_idx_to_clip[:, 1], edge_corner_idx_to_clip[:, 0]) # (num_nonzero, ) corners[edges_clipped_idx[:, 0], clip_idx, :] = intersection # replace clipped corners with intersection corners_inside[edges_clipped_idx[:, 0], clip_idx] = True edge_valid_mask &= corners_inside[:, edge_corner_idx[:, 0]] & corners_inside[:, edge_corner_idx[:, 1]] else: edge_valid_mask &= edges_0_inside & edges_1_inside return corners, corners_inside, edge_valid_mask def bboxes_3d_to_2d(bbox_3d, cam_intrinsic, imsize, z_clip=0.1, min_size=4.0, clip=False): """ Args: bbox_3d: (bs, 7) cam_intrinsic: (bs, 3, 3) imsize: (bs, 2) in [h, w] """ assert bbox_3d.dim() == 2 bs = bbox_3d.size(0) if bs > 0: # (bs, 8, 3), (12, 2) corners, edge_corner_idx = compute_box_3d(bbox_3d) corners, in_front, edge_valid_mask = edge_intersection( corners, edge_corner_idx, 2, corners.new_tensor([z_clip]).expand(bs), 'greater') pts_2d = corners @ cam_intrinsic.transpose(-1, -2) pts_2d = pts_2d[..., :2] / pts_2d[..., 2:].clamp(min=z_clip) + 0.5 # (bs, 8, 2) in_canvas = in_front if clip: pts_2d, in_canvas_x0, edge_valid_mask = edge_intersection( pts_2d, edge_corner_idx, 0, corners.new_tensor([0]).expand(bs), 'greater', edge_valid_mask) pts_2d, in_canvas_y0, edge_valid_mask = edge_intersection( pts_2d, edge_corner_idx, 1, corners.new_tensor([0]).expand(bs), 'greater', edge_valid_mask) pts_2d, in_canvas_x1, edge_valid_mask = edge_intersection( pts_2d, edge_corner_idx, 0, imsize[:, 1], 'less', edge_valid_mask) pts_2d, in_canvas_y1, edge_valid_mask = edge_intersection( pts_2d, edge_corner_idx, 1, imsize[:, 0], 'less', edge_valid_mask) in_canvas = in_canvas & in_canvas_x0 & in_canvas_x1 & in_canvas_y0 & in_canvas_y1 # (bs, 8) not_in_canvas = ~in_canvas pts_2d[not_in_canvas] = imsize[:, None, [1, 0]].expand(-1, 8, -1)[not_in_canvas] x0y0 = pts_2d.min(dim=1)[0].clamp(min=0) # (bs, 2) pts_2d[not_in_canvas] = 0 x1y1 = torch.minimum(pts_2d.max(dim=1)[0], imsize[:, [1, 0]]) bbox = torch.cat((x0y0, x1y1), dim=1) # (bs, 4) bbox_valid_mask = (x1y1 - x0y0).min(dim=1)[0] >= min_size # (bs, ) else: bbox = bbox_3d.new_empty((0, 4)) bbox_valid_mask = bbox_3d.new_empty((0, ), dtype=torch.bool) return bbox, bbox_valid_mask def xywhr2xyxyr(boxes_xywhr): """Convert a rotated boxes in XYWHR format to XYXYR format. Args: boxes_xywhr (torch.Tensor): Rotated boxes in XYWHR format. Returns: torch.Tensor: Converted boxes in XYXYR format. """ boxes = torch.zeros_like(boxes_xywhr) half_w = boxes_xywhr[:, 2] / 2 # l in bbox_3d half_h = boxes_xywhr[:, 3] / 2 # w in bbox_3d # x in cam coord boxes[:, 0] = boxes_xywhr[:, 0] - half_w # z in cam coord, mirrored_direction boxes[:, 1] = boxes_xywhr[:, 1] - half_h boxes[:, 2] = boxes_xywhr[:, 0] + half_w boxes[:, 3] = boxes_xywhr[:, 1] + half_h boxes[:, 4] = boxes_xywhr[:, 4] return boxes def batched_bev_nms(bbox_3d, batch_inds, nms_thr=0.25): """ Args: bbox_3d (Tensor): tensor shape (N, 8+), in format [l, h, w, x, y, z, ry, score, ind, *] batch_inds (Tensor): tensor shape (N, ) nms_thr (float) Returns: Tuple: bbox_3d_out (Tensor) keep_inds (Tensor) """ n = bbox_3d.size(0) if n > 1: boxes_for_nms = xywhr2xyxyr( bbox_3d[:, [3, 5, 0, 2, 6]]) offset_unit = (boxes_for_nms[:, :4].max() - boxes_for_nms[:, :4].min()) * 2 boxes_for_nms[:, :4] = boxes_for_nms[:, :4] + (offset_unit * batch_inds)[:, None] keep_inds = nms_gpu( boxes_for_nms, bbox_3d[:, 7], nms_thr) else: keep_inds = bbox_3d.new_zeros(0, dtype=torch.int64) bbox_3d_out = bbox_3d[keep_inds] return bbox_3d_out, keep_inds
37.990769
113
0.53268
import math import numpy as np import torch from pytorch3d.structures.meshes import Meshes from epropnp_det.ops.iou3d.iou3d_utils import nms_gpu def gen_unit_noc(num_pts, device=None): indices = torch.arange(0, num_pts, dtype=torch.float32, device=device) + 0.5 phi = torch.arccos(1 - 2 * indices / num_pts) theta = math.pi * (1 + 5**0.5) * indices xyz = torch.stack( (torch.cos(theta) * torch.sin(phi), torch.sin(theta) * torch.sin(phi), torch.cos(phi)), dim=-1) return xyz def project_to_image_r_mat( x3d, r_mat, t_vec, cam_intrinsic, img_shapes, z_min=0.5, allowed_border=200, return_z=False, return_clip_mask=False): proj_r_mats = cam_intrinsic @ r_mat proj_t_vecs = cam_intrinsic @ t_vec.unsqueeze(-1) xyz_proj = (proj_r_mats @ x3d.transpose(-1, -2) + proj_t_vecs).transpose(-1, -2) z_proj = xyz_proj[..., 2:] if return_clip_mask: z_clip_mask = z_proj < z_min z_proj = z_proj.clamp(min=z_min) x2d_proj = xyz_proj[..., :2] / z_proj x2d_min = -allowed_border - 0.5 x2d_max = img_shapes[..., None, [1, 0]] + (allowed_border - 0.5) if return_clip_mask: x2d_clip_mask = (x2d_proj < x2d_min) | (x2d_proj > x2d_max) clip_mask = z_clip_mask.squeeze(-1) | x2d_clip_mask.any(-1) x2d_proj = torch.min(x2d_proj.clamp(min=x2d_min), x2d_max) if not return_z: if not return_clip_mask: return x2d_proj else: return x2d_proj, clip_mask else: if not return_clip_mask: return x2d_proj, z_proj else: return x2d_proj, z_proj, clip_mask def project_to_image( x3d, pose, cam_intrinsic, img_shapes, z_min=0.5, allowed_border=200, return_z=False, return_clip_mask=False): r_mat = yaw_to_rot_mat(pose[..., 3]) t_vec = pose[..., :3] return project_to_image_r_mat(x3d, r_mat, t_vec, cam_intrinsic, img_shapes, z_min, allowed_border, return_z, return_clip_mask) def yaw_to_rot_mat(yaw): if isinstance(yaw, torch.Tensor): pkg = torch device_kwarg = dict(device=yaw.device) else: pkg = np device_kwarg = dict() sin_yaw = pkg.sin(yaw) cos_yaw = pkg.cos(yaw) rot_mats = pkg.zeros(yaw.shape + (3, 3), dtype=pkg.float32, **device_kwarg) rot_mats[..., 0, 0] = cos_yaw rot_mats[..., 2, 2] = cos_yaw rot_mats[..., 0, 2] = sin_yaw rot_mats[..., 2, 0] = -sin_yaw rot_mats[..., 1, 1] = 1 return rot_mats def rot_mat_to_yaw(rot_mat): if isinstance(rot_mat, torch.Tensor): atan2 = torch.atan2 else: atan2 = np.arctan2 yaw = atan2(rot_mat[..., 0, 2] - rot_mat[..., 2, 0], rot_mat[..., 0, 0] + rot_mat[..., 2, 2]) return yaw def box_mesh(): return Meshes( verts=[torch.tensor([[-1, -1, 1], [ 1, -1, 1], [-1, 1, 1], [ 1, 1, 1], [-1, -1, -1], [ 1, -1, -1], [-1, 1, -1], [ 1, 1, -1]], dtype=torch.float32)], faces=[torch.tensor([[0, 1, 2], [1, 3, 2], [2, 3, 7], [2, 7, 6], [1, 7, 3], [1, 5, 7], [6, 7, 4], [7, 5, 4], [0, 4, 1], [1, 4, 5], [2, 6, 4], [0, 2, 4]], dtype=torch.int)]) def compute_box_3d(bbox_3d): bs = bbox_3d.shape[:-1] rotation_matrix = yaw_to_rot_mat(bbox_3d[..., 6]) edge_corner_idx = np.array([[0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7], [7, 4], [0, 4], [1, 5], [2, 6], [3, 7]]) corners = np.array([[ 0.5, 0.5, 0.5], [ 0.5, 0.5, -0.5], [-0.5, 0.5, -0.5], [-0.5, 0.5, 0.5], [ 0.5, -0.5, 0.5], [ 0.5, -0.5, -0.5], [-0.5, -0.5, -0.5], [-0.5, -0.5, 0.5]], dtype=np.float32) if isinstance(bbox_3d, torch.Tensor): edge_corner_idx = torch.from_numpy(edge_corner_idx).to(device=bbox_3d.device) corners = torch.from_numpy(corners).to(device=bbox_3d.device) corners = corners * bbox_3d[..., None, :3] corners = (rotation_matrix[..., None, :, :] @ corners[..., None]).reshape(*bs, 8, 3) \ + bbox_3d[..., None, 3:6] return corners, edge_corner_idx def edge_intersection(corners, edge_corner_idx, clip_axis, clip_val, op, edge_valid_mask=None): if op == 'greater': op = torch.greater elif op == 'less': op = torch.less if edge_valid_mask is None: edge_valid_mask = corners.new_ones( (corners.size(0), edge_corner_idx.size(0)), dtype=torch.bool) corners_inside = op(corners[..., clip_axis], clip_val[:, None]) edges_0_inside = corners_inside[:, edge_corner_idx[:, 0]] edges_1_inside = corners_inside[:, edge_corner_idx[:, 1]] edges_clipped = (edges_0_inside ^ edges_1_inside) & edge_valid_mask edges_clipped_idx = edges_clipped.nonzero() if edges_clipped_idx.shape[0] > 0: edge_corner_idx_to_clip = edge_corner_idx[edges_clipped_idx[:, 1], :] edges_0 = corners[edges_clipped_idx[:, 0], edge_corner_idx_to_clip[:, 0], :] edges_1 = corners[edges_clipped_idx[:, 0], edge_corner_idx_to_clip[:, 1], :] axval0 = edges_0[:, clip_axis] axval1 = edges_1[:, clip_axis] clip_val_ = clip_val[edges_clipped_idx[:, 0]] weight_0 = axval1 - clip_val_ weight_1 = clip_val_ - axval0 intersection = (edges_0 * weight_0[:, None] + edges_1 * weight_1[:, None] ) * (1 / (axval1 - axval0)).clamp(min=-1e6, max=1e6)[:, None] clip_idx = torch.where(op(axval0, clip_val_), edge_corner_idx_to_clip[:, 1], edge_corner_idx_to_clip[:, 0]) corners[edges_clipped_idx[:, 0], clip_idx, :] = intersection corners_inside[edges_clipped_idx[:, 0], clip_idx] = True edge_valid_mask &= corners_inside[:, edge_corner_idx[:, 0]] & corners_inside[:, edge_corner_idx[:, 1]] else: edge_valid_mask &= edges_0_inside & edges_1_inside return corners, corners_inside, edge_valid_mask def bboxes_3d_to_2d(bbox_3d, cam_intrinsic, imsize, z_clip=0.1, min_size=4.0, clip=False): assert bbox_3d.dim() == 2 bs = bbox_3d.size(0) if bs > 0: corners, edge_corner_idx = compute_box_3d(bbox_3d) corners, in_front, edge_valid_mask = edge_intersection( corners, edge_corner_idx, 2, corners.new_tensor([z_clip]).expand(bs), 'greater') pts_2d = corners @ cam_intrinsic.transpose(-1, -2) pts_2d = pts_2d[..., :2] / pts_2d[..., 2:].clamp(min=z_clip) + 0.5 in_canvas = in_front if clip: pts_2d, in_canvas_x0, edge_valid_mask = edge_intersection( pts_2d, edge_corner_idx, 0, corners.new_tensor([0]).expand(bs), 'greater', edge_valid_mask) pts_2d, in_canvas_y0, edge_valid_mask = edge_intersection( pts_2d, edge_corner_idx, 1, corners.new_tensor([0]).expand(bs), 'greater', edge_valid_mask) pts_2d, in_canvas_x1, edge_valid_mask = edge_intersection( pts_2d, edge_corner_idx, 0, imsize[:, 1], 'less', edge_valid_mask) pts_2d, in_canvas_y1, edge_valid_mask = edge_intersection( pts_2d, edge_corner_idx, 1, imsize[:, 0], 'less', edge_valid_mask) in_canvas = in_canvas & in_canvas_x0 & in_canvas_x1 & in_canvas_y0 & in_canvas_y1 not_in_canvas = ~in_canvas pts_2d[not_in_canvas] = imsize[:, None, [1, 0]].expand(-1, 8, -1)[not_in_canvas] x0y0 = pts_2d.min(dim=1)[0].clamp(min=0) pts_2d[not_in_canvas] = 0 x1y1 = torch.minimum(pts_2d.max(dim=1)[0], imsize[:, [1, 0]]) bbox = torch.cat((x0y0, x1y1), dim=1) bbox_valid_mask = (x1y1 - x0y0).min(dim=1)[0] >= min_size else: bbox = bbox_3d.new_empty((0, 4)) bbox_valid_mask = bbox_3d.new_empty((0, ), dtype=torch.bool) return bbox, bbox_valid_mask def xywhr2xyxyr(boxes_xywhr): boxes = torch.zeros_like(boxes_xywhr) half_w = boxes_xywhr[:, 2] / 2 half_h = boxes_xywhr[:, 3] / 2 boxes[:, 0] = boxes_xywhr[:, 0] - half_w boxes[:, 1] = boxes_xywhr[:, 1] - half_h boxes[:, 2] = boxes_xywhr[:, 0] + half_w boxes[:, 3] = boxes_xywhr[:, 1] + half_h boxes[:, 4] = boxes_xywhr[:, 4] return boxes def batched_bev_nms(bbox_3d, batch_inds, nms_thr=0.25): n = bbox_3d.size(0) if n > 1: boxes_for_nms = xywhr2xyxyr( bbox_3d[:, [3, 5, 0, 2, 6]]) offset_unit = (boxes_for_nms[:, :4].max() - boxes_for_nms[:, :4].min()) * 2 boxes_for_nms[:, :4] = boxes_for_nms[:, :4] + (offset_unit * batch_inds)[:, None] keep_inds = nms_gpu( boxes_for_nms, bbox_3d[:, 7], nms_thr) else: keep_inds = bbox_3d.new_zeros(0, dtype=torch.int64) bbox_3d_out = bbox_3d[keep_inds] return bbox_3d_out, keep_inds
true
true
f72cab0568521a363e71061115573b79f5eea8ff
22,874
py
Python
sdk/python/pulumi_azure_nextgen/compute/v20191201/virtual_machine.py
pulumi/pulumi-azure-nextgen
452736b0a1cf584c2d4c04666e017af6e9b2c15c
[ "Apache-2.0" ]
31
2020-09-21T09:41:01.000Z
2021-02-26T13:21:59.000Z
sdk/python/pulumi_azure_nextgen/compute/v20191201/virtual_machine.py
pulumi/pulumi-azure-nextgen
452736b0a1cf584c2d4c04666e017af6e9b2c15c
[ "Apache-2.0" ]
231
2020-09-21T09:38:45.000Z
2021-03-01T11:16:03.000Z
sdk/python/pulumi_azure_nextgen/compute/v20191201/virtual_machine.py
pulumi/pulumi-azure-nextgen
452736b0a1cf584c2d4c04666e017af6e9b2c15c
[ "Apache-2.0" ]
4
2020-09-29T14:14:59.000Z
2021-02-10T20:38:16.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs from ._enums import * from ._inputs import * __all__ = ['VirtualMachine'] class VirtualMachine(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, additional_capabilities: Optional[pulumi.Input[pulumi.InputType['AdditionalCapabilitiesArgs']]] = None, availability_set: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None, billing_profile: Optional[pulumi.Input[pulumi.InputType['BillingProfileArgs']]] = None, diagnostics_profile: Optional[pulumi.Input[pulumi.InputType['DiagnosticsProfileArgs']]] = None, eviction_policy: Optional[pulumi.Input[Union[str, 'VirtualMachineEvictionPolicyTypes']]] = None, hardware_profile: Optional[pulumi.Input[pulumi.InputType['HardwareProfileArgs']]] = None, host: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None, identity: Optional[pulumi.Input[pulumi.InputType['VirtualMachineIdentityArgs']]] = None, license_type: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, network_profile: Optional[pulumi.Input[pulumi.InputType['NetworkProfileArgs']]] = None, os_profile: Optional[pulumi.Input[pulumi.InputType['OSProfileArgs']]] = None, plan: Optional[pulumi.Input[pulumi.InputType['PlanArgs']]] = None, priority: Optional[pulumi.Input[Union[str, 'VirtualMachinePriorityTypes']]] = None, proximity_placement_group: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, storage_profile: Optional[pulumi.Input[pulumi.InputType['StorageProfileArgs']]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_machine_scale_set: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None, vm_name: Optional[pulumi.Input[str]] = None, zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, __props__=None, __name__=None, __opts__=None): """ Describes a Virtual Machine. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[pulumi.InputType['AdditionalCapabilitiesArgs']] additional_capabilities: Specifies additional capabilities enabled or disabled on the virtual machine. :param pulumi.Input[pulumi.InputType['SubResourceArgs']] availability_set: Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). <br><br> For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Currently, a VM can only be added to availability set at creation time. The availability set to which the VM is being added should be under the same resource group as the availability set resource. An existing VM cannot be added to an availability set. <br><br>This property cannot exist along with a non-null properties.virtualMachineScaleSet reference. :param pulumi.Input[pulumi.InputType['BillingProfileArgs']] billing_profile: Specifies the billing related details of a Azure Spot virtual machine. <br><br>Minimum api-version: 2019-03-01. :param pulumi.Input[pulumi.InputType['DiagnosticsProfileArgs']] diagnostics_profile: Specifies the boot diagnostic settings state. <br><br>Minimum api-version: 2015-06-15. :param pulumi.Input[Union[str, 'VirtualMachineEvictionPolicyTypes']] eviction_policy: Specifies the eviction policy for the Azure Spot virtual machine and Azure Spot scale set. <br><br>For Azure Spot virtual machines, both 'Deallocate' and 'Delete' are supported and the minimum api-version is 2019-03-01. <br><br>For Azure Spot scale sets, both 'Deallocate' and 'Delete' are supported and the minimum api-version is 2017-10-30-preview. :param pulumi.Input[pulumi.InputType['HardwareProfileArgs']] hardware_profile: Specifies the hardware settings for the virtual machine. :param pulumi.Input[pulumi.InputType['SubResourceArgs']] host: Specifies information about the dedicated host that the virtual machine resides in. <br><br>Minimum api-version: 2018-10-01. :param pulumi.Input[pulumi.InputType['VirtualMachineIdentityArgs']] identity: The identity of the virtual machine, if configured. :param pulumi.Input[str] license_type: Specifies that the image or disk that is being used was licensed on-premises. This element is only used for images that contain the Windows Server operating system. <br><br> Possible values are: <br><br> Windows_Client <br><br> Windows_Server <br><br> If this element is included in a request for an update, the value must match the initial value. This value cannot be updated. <br><br> For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Minimum api-version: 2015-06-15 :param pulumi.Input[str] location: Resource location :param pulumi.Input[pulumi.InputType['NetworkProfileArgs']] network_profile: Specifies the network interfaces of the virtual machine. :param pulumi.Input[pulumi.InputType['OSProfileArgs']] os_profile: Specifies the operating system settings used while creating the virtual machine. Some of the settings cannot be changed once VM is provisioned. :param pulumi.Input[pulumi.InputType['PlanArgs']] plan: Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. :param pulumi.Input[Union[str, 'VirtualMachinePriorityTypes']] priority: Specifies the priority for the virtual machine. <br><br>Minimum api-version: 2019-03-01 :param pulumi.Input[pulumi.InputType['SubResourceArgs']] proximity_placement_group: Specifies information about the proximity placement group that the virtual machine should be assigned to. <br><br>Minimum api-version: 2018-04-01. :param pulumi.Input[str] resource_group_name: The name of the resource group. :param pulumi.Input[pulumi.InputType['StorageProfileArgs']] storage_profile: Specifies the storage settings for the virtual machine disks. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags :param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_machine_scale_set: Specifies information about the virtual machine scale set that the virtual machine should be assigned to. Virtual machines specified in the same virtual machine scale set are allocated to different nodes to maximize availability. Currently, a VM can only be added to virtual machine scale set at creation time. An existing VM cannot be added to a virtual machine scale set. <br><br>This property cannot exist along with a non-null properties.availabilitySet reference. <br><br>Minimum api‐version: 2019‐03‐01 :param pulumi.Input[str] vm_name: The name of the virtual machine. :param pulumi.Input[Sequence[pulumi.Input[str]]] zones: The virtual machine zones. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['additional_capabilities'] = additional_capabilities __props__['availability_set'] = availability_set __props__['billing_profile'] = billing_profile __props__['diagnostics_profile'] = diagnostics_profile __props__['eviction_policy'] = eviction_policy __props__['hardware_profile'] = hardware_profile __props__['host'] = host __props__['identity'] = identity __props__['license_type'] = license_type __props__['location'] = location __props__['network_profile'] = network_profile __props__['os_profile'] = os_profile __props__['plan'] = plan __props__['priority'] = priority __props__['proximity_placement_group'] = proximity_placement_group if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['storage_profile'] = storage_profile __props__['tags'] = tags __props__['virtual_machine_scale_set'] = virtual_machine_scale_set __props__['vm_name'] = vm_name __props__['zones'] = zones __props__['instance_view'] = None __props__['name'] = None __props__['provisioning_state'] = None __props__['resources'] = None __props__['type'] = None __props__['vm_id'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:compute:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/latest:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20150615:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20160330:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20160430preview:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20170330:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20171201:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20180401:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20180601:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20181001:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20190301:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20190701:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20200601:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20201201:VirtualMachine")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(VirtualMachine, __self__).__init__( 'azure-nextgen:compute/v20191201:VirtualMachine', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'VirtualMachine': """ Get an existing VirtualMachine resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() return VirtualMachine(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="additionalCapabilities") def additional_capabilities(self) -> pulumi.Output[Optional['outputs.AdditionalCapabilitiesResponse']]: """ Specifies additional capabilities enabled or disabled on the virtual machine. """ return pulumi.get(self, "additional_capabilities") @property @pulumi.getter(name="availabilitySet") def availability_set(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]: """ Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). <br><br> For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Currently, a VM can only be added to availability set at creation time. The availability set to which the VM is being added should be under the same resource group as the availability set resource. An existing VM cannot be added to an availability set. <br><br>This property cannot exist along with a non-null properties.virtualMachineScaleSet reference. """ return pulumi.get(self, "availability_set") @property @pulumi.getter(name="billingProfile") def billing_profile(self) -> pulumi.Output[Optional['outputs.BillingProfileResponse']]: """ Specifies the billing related details of a Azure Spot virtual machine. <br><br>Minimum api-version: 2019-03-01. """ return pulumi.get(self, "billing_profile") @property @pulumi.getter(name="diagnosticsProfile") def diagnostics_profile(self) -> pulumi.Output[Optional['outputs.DiagnosticsProfileResponse']]: """ Specifies the boot diagnostic settings state. <br><br>Minimum api-version: 2015-06-15. """ return pulumi.get(self, "diagnostics_profile") @property @pulumi.getter(name="evictionPolicy") def eviction_policy(self) -> pulumi.Output[Optional[str]]: """ Specifies the eviction policy for the Azure Spot virtual machine and Azure Spot scale set. <br><br>For Azure Spot virtual machines, both 'Deallocate' and 'Delete' are supported and the minimum api-version is 2019-03-01. <br><br>For Azure Spot scale sets, both 'Deallocate' and 'Delete' are supported and the minimum api-version is 2017-10-30-preview. """ return pulumi.get(self, "eviction_policy") @property @pulumi.getter(name="hardwareProfile") def hardware_profile(self) -> pulumi.Output[Optional['outputs.HardwareProfileResponse']]: """ Specifies the hardware settings for the virtual machine. """ return pulumi.get(self, "hardware_profile") @property @pulumi.getter def host(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]: """ Specifies information about the dedicated host that the virtual machine resides in. <br><br>Minimum api-version: 2018-10-01. """ return pulumi.get(self, "host") @property @pulumi.getter def identity(self) -> pulumi.Output[Optional['outputs.VirtualMachineIdentityResponse']]: """ The identity of the virtual machine, if configured. """ return pulumi.get(self, "identity") @property @pulumi.getter(name="instanceView") def instance_view(self) -> pulumi.Output['outputs.VirtualMachineInstanceViewResponse']: """ The virtual machine instance view. """ return pulumi.get(self, "instance_view") @property @pulumi.getter(name="licenseType") def license_type(self) -> pulumi.Output[Optional[str]]: """ Specifies that the image or disk that is being used was licensed on-premises. This element is only used for images that contain the Windows Server operating system. <br><br> Possible values are: <br><br> Windows_Client <br><br> Windows_Server <br><br> If this element is included in a request for an update, the value must match the initial value. This value cannot be updated. <br><br> For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Minimum api-version: 2015-06-15 """ return pulumi.get(self, "license_type") @property @pulumi.getter def location(self) -> pulumi.Output[str]: """ Resource location """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Resource name """ return pulumi.get(self, "name") @property @pulumi.getter(name="networkProfile") def network_profile(self) -> pulumi.Output[Optional['outputs.NetworkProfileResponse']]: """ Specifies the network interfaces of the virtual machine. """ return pulumi.get(self, "network_profile") @property @pulumi.getter(name="osProfile") def os_profile(self) -> pulumi.Output[Optional['outputs.OSProfileResponse']]: """ Specifies the operating system settings used while creating the virtual machine. Some of the settings cannot be changed once VM is provisioned. """ return pulumi.get(self, "os_profile") @property @pulumi.getter def plan(self) -> pulumi.Output[Optional['outputs.PlanResponse']]: """ Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. """ return pulumi.get(self, "plan") @property @pulumi.getter def priority(self) -> pulumi.Output[Optional[str]]: """ Specifies the priority for the virtual machine. <br><br>Minimum api-version: 2019-03-01 """ return pulumi.get(self, "priority") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> pulumi.Output[str]: """ The provisioning state, which only appears in the response. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="proximityPlacementGroup") def proximity_placement_group(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]: """ Specifies information about the proximity placement group that the virtual machine should be assigned to. <br><br>Minimum api-version: 2018-04-01. """ return pulumi.get(self, "proximity_placement_group") @property @pulumi.getter def resources(self) -> pulumi.Output[Sequence['outputs.VirtualMachineExtensionResponse']]: """ The virtual machine child extension resources. """ return pulumi.get(self, "resources") @property @pulumi.getter(name="storageProfile") def storage_profile(self) -> pulumi.Output[Optional['outputs.StorageProfileResponse']]: """ Specifies the storage settings for the virtual machine disks. """ return pulumi.get(self, "storage_profile") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Resource tags """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ Resource type """ return pulumi.get(self, "type") @property @pulumi.getter(name="virtualMachineScaleSet") def virtual_machine_scale_set(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]: """ Specifies information about the virtual machine scale set that the virtual machine should be assigned to. Virtual machines specified in the same virtual machine scale set are allocated to different nodes to maximize availability. Currently, a VM can only be added to virtual machine scale set at creation time. An existing VM cannot be added to a virtual machine scale set. <br><br>This property cannot exist along with a non-null properties.availabilitySet reference. <br><br>Minimum api‐version: 2019‐03‐01 """ return pulumi.get(self, "virtual_machine_scale_set") @property @pulumi.getter(name="vmId") def vm_id(self) -> pulumi.Output[str]: """ Specifies the VM unique ID which is a 128-bits identifier that is encoded and stored in all Azure IaaS VMs SMBIOS and can be read using platform BIOS commands. """ return pulumi.get(self, "vm_id") @property @pulumi.getter def zones(self) -> pulumi.Output[Optional[Sequence[str]]]: """ The virtual machine zones. """ return pulumi.get(self, "zones") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
65.354286
1,169
0.707135
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs from ._enums import * from ._inputs import * __all__ = ['VirtualMachine'] class VirtualMachine(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, additional_capabilities: Optional[pulumi.Input[pulumi.InputType['AdditionalCapabilitiesArgs']]] = None, availability_set: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None, billing_profile: Optional[pulumi.Input[pulumi.InputType['BillingProfileArgs']]] = None, diagnostics_profile: Optional[pulumi.Input[pulumi.InputType['DiagnosticsProfileArgs']]] = None, eviction_policy: Optional[pulumi.Input[Union[str, 'VirtualMachineEvictionPolicyTypes']]] = None, hardware_profile: Optional[pulumi.Input[pulumi.InputType['HardwareProfileArgs']]] = None, host: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None, identity: Optional[pulumi.Input[pulumi.InputType['VirtualMachineIdentityArgs']]] = None, license_type: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, network_profile: Optional[pulumi.Input[pulumi.InputType['NetworkProfileArgs']]] = None, os_profile: Optional[pulumi.Input[pulumi.InputType['OSProfileArgs']]] = None, plan: Optional[pulumi.Input[pulumi.InputType['PlanArgs']]] = None, priority: Optional[pulumi.Input[Union[str, 'VirtualMachinePriorityTypes']]] = None, proximity_placement_group: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, storage_profile: Optional[pulumi.Input[pulumi.InputType['StorageProfileArgs']]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_machine_scale_set: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None, vm_name: Optional[pulumi.Input[str]] = None, zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, __props__=None, __name__=None, __opts__=None): if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['additional_capabilities'] = additional_capabilities __props__['availability_set'] = availability_set __props__['billing_profile'] = billing_profile __props__['diagnostics_profile'] = diagnostics_profile __props__['eviction_policy'] = eviction_policy __props__['hardware_profile'] = hardware_profile __props__['host'] = host __props__['identity'] = identity __props__['license_type'] = license_type __props__['location'] = location __props__['network_profile'] = network_profile __props__['os_profile'] = os_profile __props__['plan'] = plan __props__['priority'] = priority __props__['proximity_placement_group'] = proximity_placement_group if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['storage_profile'] = storage_profile __props__['tags'] = tags __props__['virtual_machine_scale_set'] = virtual_machine_scale_set __props__['vm_name'] = vm_name __props__['zones'] = zones __props__['instance_view'] = None __props__['name'] = None __props__['provisioning_state'] = None __props__['resources'] = None __props__['type'] = None __props__['vm_id'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:compute:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/latest:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20150615:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20160330:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20160430preview:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20170330:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20171201:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20180401:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20180601:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20181001:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20190301:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20190701:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20200601:VirtualMachine"), pulumi.Alias(type_="azure-nextgen:compute/v20201201:VirtualMachine")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(VirtualMachine, __self__).__init__( 'azure-nextgen:compute/v20191201:VirtualMachine', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'VirtualMachine': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() return VirtualMachine(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="additionalCapabilities") def additional_capabilities(self) -> pulumi.Output[Optional['outputs.AdditionalCapabilitiesResponse']]: return pulumi.get(self, "additional_capabilities") @property @pulumi.getter(name="availabilitySet") def availability_set(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]: return pulumi.get(self, "availability_set") @property @pulumi.getter(name="billingProfile") def billing_profile(self) -> pulumi.Output[Optional['outputs.BillingProfileResponse']]: return pulumi.get(self, "billing_profile") @property @pulumi.getter(name="diagnosticsProfile") def diagnostics_profile(self) -> pulumi.Output[Optional['outputs.DiagnosticsProfileResponse']]: return pulumi.get(self, "diagnostics_profile") @property @pulumi.getter(name="evictionPolicy") def eviction_policy(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "eviction_policy") @property @pulumi.getter(name="hardwareProfile") def hardware_profile(self) -> pulumi.Output[Optional['outputs.HardwareProfileResponse']]: return pulumi.get(self, "hardware_profile") @property @pulumi.getter def host(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]: return pulumi.get(self, "host") @property @pulumi.getter def identity(self) -> pulumi.Output[Optional['outputs.VirtualMachineIdentityResponse']]: return pulumi.get(self, "identity") @property @pulumi.getter(name="instanceView") def instance_view(self) -> pulumi.Output['outputs.VirtualMachineInstanceViewResponse']: return pulumi.get(self, "instance_view") @property @pulumi.getter(name="licenseType") def license_type(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "license_type") @property @pulumi.getter def location(self) -> pulumi.Output[str]: return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="networkProfile") def network_profile(self) -> pulumi.Output[Optional['outputs.NetworkProfileResponse']]: return pulumi.get(self, "network_profile") @property @pulumi.getter(name="osProfile") def os_profile(self) -> pulumi.Output[Optional['outputs.OSProfileResponse']]: return pulumi.get(self, "os_profile") @property @pulumi.getter def plan(self) -> pulumi.Output[Optional['outputs.PlanResponse']]: return pulumi.get(self, "plan") @property @pulumi.getter def priority(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "priority") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> pulumi.Output[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="proximityPlacementGroup") def proximity_placement_group(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]: return pulumi.get(self, "proximity_placement_group") @property @pulumi.getter def resources(self) -> pulumi.Output[Sequence['outputs.VirtualMachineExtensionResponse']]: return pulumi.get(self, "resources") @property @pulumi.getter(name="storageProfile") def storage_profile(self) -> pulumi.Output[Optional['outputs.StorageProfileResponse']]: return pulumi.get(self, "storage_profile") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> pulumi.Output[str]: return pulumi.get(self, "type") @property @pulumi.getter(name="virtualMachineScaleSet") def virtual_machine_scale_set(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]: return pulumi.get(self, "virtual_machine_scale_set") @property @pulumi.getter(name="vmId") def vm_id(self) -> pulumi.Output[str]: return pulumi.get(self, "vm_id") @property @pulumi.getter def zones(self) -> pulumi.Output[Optional[Sequence[str]]]: return pulumi.get(self, "zones") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
true
true
f72cab35e960fe08f1ad7e2c27dda165a8cea5a9
352
py
Python
qualysapi/__init__.py
trolldbois/qualysapi
33de3cda1e1073e5c960740e38864d1f551bfd3d
[ "Apache-2.0" ]
4
2019-03-20T14:49:01.000Z
2020-06-19T19:03:54.000Z
qualysapi/__init__.py
trolldbois/qualysapi
33de3cda1e1073e5c960740e38864d1f551bfd3d
[ "Apache-2.0" ]
2
2019-02-05T16:20:44.000Z
2019-02-06T09:50:27.000Z
qualysapi/__init__.py
trolldbois/qualysapi
33de3cda1e1073e5c960740e38864d1f551bfd3d
[ "Apache-2.0" ]
1
2020-06-01T18:57:41.000Z
2020-06-01T18:57:41.000Z
# -*- coding: future_fstrings -*- # This is the version string assigned to the entire egg post # setup.py install # Ownership and Copyright Information. from __future__ import absolute_import __author__ = "Parag Baxi <parag.baxi@gmail.com>" __copyright__ = "Copyright 2011-2013, Parag Baxi" __license__ = "BSD-new" from qualysapi.util import connect
29.333333
60
0.772727
from __future__ import absolute_import __author__ = "Parag Baxi <parag.baxi@gmail.com>" __copyright__ = "Copyright 2011-2013, Parag Baxi" __license__ = "BSD-new" from qualysapi.util import connect
true
true
f72cab98f8c6d40b4cfe232aace0f320986b5a88
607
py
Python
biosimulators_utils/sedml/exceptions.py
biosimulators/Biosimulators_utils
c1363467263120bf1166da2b75e38fc7f56dc94f
[ "MIT" ]
2
2021-06-02T13:26:34.000Z
2021-12-27T23:12:47.000Z
biosimulators_utils/sedml/exceptions.py
biosimulators/Biosimulators_utils
c1363467263120bf1166da2b75e38fc7f56dc94f
[ "MIT" ]
102
2020-12-06T19:47:43.000Z
2022-03-31T12:56:17.000Z
biosimulators_utils/sedml/exceptions.py
biosimulators/Biosimulators_utils
c1363467263120bf1166da2b75e38fc7f56dc94f
[ "MIT" ]
4
2021-01-27T19:56:34.000Z
2022-02-03T21:08:20.000Z
""" Exceptions for SED-ML :Author: Jonathan Karr <karr@mssm.edu> :Date: 2021-01-12 :Copyright: 2021, Center for Reproducible Biomedical Modeling :License: MIT """ from ..exceptions import BioSimulatorsException __all__ = [ 'SedmlExecutionError', 'UnsupportedModelLanguageError', ] class SedmlExecutionError(BioSimulatorsException): """ Error that a SED document could not be executed """ pass # pragma: no cover class UnsupportedModelLanguageError(BioSimulatorsException, NotImplementedError): """ Error that a SED document could not be executed """ pass # pragma: no cover
24.28
81
0.742998
from ..exceptions import BioSimulatorsException __all__ = [ 'SedmlExecutionError', 'UnsupportedModelLanguageError', ] class SedmlExecutionError(BioSimulatorsException): pass class UnsupportedModelLanguageError(BioSimulatorsException, NotImplementedError): pass
true
true
f72cabca4a5200b7b635654d553d20ae2f30155f
3,356
py
Python
tests/bindings/test_python.py
mfkiwl/hgdb
6279b2d671b09094b7e69c592fa8f2eca3f6bacd
[ "BSD-2-Clause" ]
34
2021-01-19T21:14:06.000Z
2022-03-31T18:42:58.000Z
tests/bindings/test_python.py
mfkiwl/hgdb
6279b2d671b09094b7e69c592fa8f2eca3f6bacd
[ "BSD-2-Clause" ]
33
2021-01-12T18:50:16.000Z
2022-03-23T04:49:20.000Z
tests/bindings/test_python.py
mfkiwl/hgdb
6279b2d671b09094b7e69c592fa8f2eca3f6bacd
[ "BSD-2-Clause" ]
2
2021-03-28T06:58:46.000Z
2022-03-31T02:55:53.000Z
import sqlite3 import tempfile import hgdb import os import pytest def get_conn_cursor(db_name): conn = sqlite3.connect(db_name) c = conn.cursor() return conn, c def test_store_instance(): with tempfile.TemporaryDirectory() as temp: db_name = os.path.join(temp, "debug.db") db = hgdb.DebugSymbolTable(db_name) db.store_instance(42, "test") conn, c = get_conn_cursor(db_name) c.execute("SELECT COUNT(*) FROM instance WHERE id=?", (42,)) r = c.fetchone()[0] assert r == 1 conn.close() def test_store_breakpoint(): with tempfile.TemporaryDirectory() as temp: db_name = os.path.join(temp, "debug.db") db = hgdb.DebugSymbolTable(db_name) # no instance matching yet with pytest.raises(hgdb.db.DebugSymbolTableException) as ex: db.store_breakpoint(1, 42, "/tmp/test.py", 1) assert ex.value.args[0] db.store_instance(42, "test") db.store_breakpoint(1, 42, "/tmp/test.py", 1) conn, c = get_conn_cursor(db_name) c.execute("SELECT COUNT(*) FROM breakpoint WHERE filename=? AND line_num=?", ("/tmp/test.py", 1)) r = c.fetchone()[0] assert r == 1 conn.close() def test_store_context_variable(): with tempfile.TemporaryDirectory() as temp: db_name = os.path.join(temp, "debug.db") db = hgdb.DebugSymbolTable(db_name) # no variable matching yet with pytest.raises(hgdb.db.DebugSymbolTableException) as ex: db.store_context_variable("a", 1, 43) assert ex.value.args[0] db.store_instance(42, "test") db.store_breakpoint(1, 42, "/tmp/test.py", 1) db.store_variable(43, "value") db.store_context_variable("a", 1, 43) conn, c = get_conn_cursor(db_name) c.execute("SELECT COUNT(*) FROM context_variable WHERE breakpoint_id=?", (1, )) r = c.fetchone()[0] assert r == 1 conn.close() def test_store_generator_variable(): with tempfile.TemporaryDirectory() as temp: db_name = os.path.join(temp, "debug.db") db = hgdb.DebugSymbolTable(db_name) # no instance matching yet with pytest.raises(hgdb.db.DebugSymbolTableException) as ex: db.store_generator_variable("a", 42, 43) assert ex.value.args[0] db.store_instance(42, "test") db.store_breakpoint(1, 42, "/tmp/test.py", 1) db.store_variable(43, "value") db.store_generator_variable("a", 42, 43) conn, c = get_conn_cursor(db_name) c.execute("SELECT COUNT(*) FROM generator_variable WHERE instance_id=?", (42, )) r = c.fetchone()[0] assert r == 1 conn.close() def test_store_scope(): with tempfile.TemporaryDirectory() as temp: db_name = os.path.join(temp, "debug.db") db = hgdb.DebugSymbolTable(db_name) db.store_instance(42, "test") for i in range(4): db.store_breakpoint(i, 42, "/tmp/test.py", i + 1) db.store_scope(0, *[0, 1, 2, 3]) conn, c = get_conn_cursor(db_name) c.execute("SELECT breakpoints FROM scope WHERE scope=?", (0, )) r = c.fetchone()[0] assert r == " ".join([str(i) for i in range(4)]) conn.close() if __name__ == "__main__": test_store_scope()
31.660377
105
0.61025
import sqlite3 import tempfile import hgdb import os import pytest def get_conn_cursor(db_name): conn = sqlite3.connect(db_name) c = conn.cursor() return conn, c def test_store_instance(): with tempfile.TemporaryDirectory() as temp: db_name = os.path.join(temp, "debug.db") db = hgdb.DebugSymbolTable(db_name) db.store_instance(42, "test") conn, c = get_conn_cursor(db_name) c.execute("SELECT COUNT(*) FROM instance WHERE id=?", (42,)) r = c.fetchone()[0] assert r == 1 conn.close() def test_store_breakpoint(): with tempfile.TemporaryDirectory() as temp: db_name = os.path.join(temp, "debug.db") db = hgdb.DebugSymbolTable(db_name) with pytest.raises(hgdb.db.DebugSymbolTableException) as ex: db.store_breakpoint(1, 42, "/tmp/test.py", 1) assert ex.value.args[0] db.store_instance(42, "test") db.store_breakpoint(1, 42, "/tmp/test.py", 1) conn, c = get_conn_cursor(db_name) c.execute("SELECT COUNT(*) FROM breakpoint WHERE filename=? AND line_num=?", ("/tmp/test.py", 1)) r = c.fetchone()[0] assert r == 1 conn.close() def test_store_context_variable(): with tempfile.TemporaryDirectory() as temp: db_name = os.path.join(temp, "debug.db") db = hgdb.DebugSymbolTable(db_name) with pytest.raises(hgdb.db.DebugSymbolTableException) as ex: db.store_context_variable("a", 1, 43) assert ex.value.args[0] db.store_instance(42, "test") db.store_breakpoint(1, 42, "/tmp/test.py", 1) db.store_variable(43, "value") db.store_context_variable("a", 1, 43) conn, c = get_conn_cursor(db_name) c.execute("SELECT COUNT(*) FROM context_variable WHERE breakpoint_id=?", (1, )) r = c.fetchone()[0] assert r == 1 conn.close() def test_store_generator_variable(): with tempfile.TemporaryDirectory() as temp: db_name = os.path.join(temp, "debug.db") db = hgdb.DebugSymbolTable(db_name) with pytest.raises(hgdb.db.DebugSymbolTableException) as ex: db.store_generator_variable("a", 42, 43) assert ex.value.args[0] db.store_instance(42, "test") db.store_breakpoint(1, 42, "/tmp/test.py", 1) db.store_variable(43, "value") db.store_generator_variable("a", 42, 43) conn, c = get_conn_cursor(db_name) c.execute("SELECT COUNT(*) FROM generator_variable WHERE instance_id=?", (42, )) r = c.fetchone()[0] assert r == 1 conn.close() def test_store_scope(): with tempfile.TemporaryDirectory() as temp: db_name = os.path.join(temp, "debug.db") db = hgdb.DebugSymbolTable(db_name) db.store_instance(42, "test") for i in range(4): db.store_breakpoint(i, 42, "/tmp/test.py", i + 1) db.store_scope(0, *[0, 1, 2, 3]) conn, c = get_conn_cursor(db_name) c.execute("SELECT breakpoints FROM scope WHERE scope=?", (0, )) r = c.fetchone()[0] assert r == " ".join([str(i) for i in range(4)]) conn.close() if __name__ == "__main__": test_store_scope()
true
true
f72cac3af394de7e0476052b87a340105bd5386f
2,482
py
Python
bot.py
StarkGang/TagChecker
390191a03afc17c9003a046954586532947d10d4
[ "MIT" ]
1
2021-07-18T01:12:55.000Z
2021-07-18T01:12:55.000Z
bot.py
StarkGang/TagChecker
390191a03afc17c9003a046954586532947d10d4
[ "MIT" ]
null
null
null
bot.py
StarkGang/TagChecker
390191a03afc17c9003a046954586532947d10d4
[ "MIT" ]
null
null
null
from pyrogram import filters, Client import logging import os from pyrogram.types import ( ChatPermissions, InlineKeyboardButton, InlineKeyboardMarkup ) logging.basicConfig(level=logging.INFO) API_ID = int(os.environ.get("API_ID", 6)) API_HASH = os.environ.get("API_HASH", "eb06d4abfb49dc3eeb1aeb98ae0f581e") TOKEN = os.environ.get("TOKEN", None) TAG = os.environ.get("TAG", None) OWNER_ID = int(os.environ.get("OWNER_ID", 1704673514)) tagcheck = Client( "tagcheck", bot_token=TOKEN, api_id=API_ID, api_hash=API_HASH ) user_s = {} async def is_admin(message): user = await tagcheck.get_chat_member(message.chat.id, message.from_user.id) if user.status in ("administrator", "creator"): return True return False @tagcheck.on_message(filters.command("start") & filters.user(OWNER_ID)) async def start(_, message): await message.reply("I am Alive.") @tagcheck.on_message(filters.group) async def tag_check(_, message): if await is_admin(message): return user = message.from_user.id if TAG not in message.from_user.first_name: try: await tagcheck.restrict_chat_member( message.chat.id, user, ChatPermissions(), ) except BaseException as be: await message.reply(f"**Error:**\n`{be}`") return text = f""" **Heya {message.from_user.mention}** Please add our tag in your name to chat again in the group. **Tag:** `{TAG}` **Note:** __Click The Below Button For Unmuting YourSelf!__ """ await message.reply( text, reply_markup=InlineKeyboardMarkup([ [InlineKeyboardButton("Unmute Me", callback_data="unmute")] ] ) ) user_s.update({"user_id": user}) @tagcheck.on_callback_query(filters.regex("unmute")) async def unmute(client, cb): try: user = user_s["user_id"] except KeyError: await cb.answer( "Oops!\nIts looks like i lost your id from my server\nContact Admins For Unmiting", show_alert=True ) return if cb.from_user.id != user: await cb.answer("This Button is not for you!", show_alert=True) return if TAG in cb.from_user.first_name: await tagcheck.unban_chat_member(cb.message.chat.id, user) await cb.answer("Succesfully Unmuted!") await message.delete() return await cb.answer("Please add tag in your name!", show_alert=True) tagcheck.run()
26.404255
92
0.657937
from pyrogram import filters, Client import logging import os from pyrogram.types import ( ChatPermissions, InlineKeyboardButton, InlineKeyboardMarkup ) logging.basicConfig(level=logging.INFO) API_ID = int(os.environ.get("API_ID", 6)) API_HASH = os.environ.get("API_HASH", "eb06d4abfb49dc3eeb1aeb98ae0f581e") TOKEN = os.environ.get("TOKEN", None) TAG = os.environ.get("TAG", None) OWNER_ID = int(os.environ.get("OWNER_ID", 1704673514)) tagcheck = Client( "tagcheck", bot_token=TOKEN, api_id=API_ID, api_hash=API_HASH ) user_s = {} async def is_admin(message): user = await tagcheck.get_chat_member(message.chat.id, message.from_user.id) if user.status in ("administrator", "creator"): return True return False @tagcheck.on_message(filters.command("start") & filters.user(OWNER_ID)) async def start(_, message): await message.reply("I am Alive.") @tagcheck.on_message(filters.group) async def tag_check(_, message): if await is_admin(message): return user = message.from_user.id if TAG not in message.from_user.first_name: try: await tagcheck.restrict_chat_member( message.chat.id, user, ChatPermissions(), ) except BaseException as be: await message.reply(f"**Error:**\n`{be}`") return text = f""" **Heya {message.from_user.mention}** Please add our tag in your name to chat again in the group. **Tag:** `{TAG}` **Note:** __Click The Below Button For Unmuting YourSelf!__ """ await message.reply( text, reply_markup=InlineKeyboardMarkup([ [InlineKeyboardButton("Unmute Me", callback_data="unmute")] ] ) ) user_s.update({"user_id": user}) @tagcheck.on_callback_query(filters.regex("unmute")) async def unmute(client, cb): try: user = user_s["user_id"] except KeyError: await cb.answer( "Oops!\nIts looks like i lost your id from my server\nContact Admins For Unmiting", show_alert=True ) return if cb.from_user.id != user: await cb.answer("This Button is not for you!", show_alert=True) return if TAG in cb.from_user.first_name: await tagcheck.unban_chat_member(cb.message.chat.id, user) await cb.answer("Succesfully Unmuted!") await message.delete() return await cb.answer("Please add tag in your name!", show_alert=True) tagcheck.run()
true
true
f72cad1a00cbc3a4cfeedd1cef65f5d5f630641b
2,143
py
Python
oneflow/python/framework/watcher.py
666DZY666/oneflow
2062cb211dd1e0619d610659e6d41598d5f73e17
[ "Apache-2.0" ]
null
null
null
oneflow/python/framework/watcher.py
666DZY666/oneflow
2062cb211dd1e0619d610659e6d41598d5f73e17
[ "Apache-2.0" ]
null
null
null
oneflow/python/framework/watcher.py
666DZY666/oneflow
2062cb211dd1e0619d610659e6d41598d5f73e17
[ "Apache-2.0" ]
1
2021-11-10T07:57:01.000Z
2021-11-10T07:57:01.000Z
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import import traceback import oneflow.core.record.record_pb2 as record_util import oneflow.python.framework.local_blob as local_blob_util import oneflow.python.framework.ofblob as ofblob import oneflow.python.framework.remote_blob as remote_blob_util import oneflow.python.framework.session_context as session_ctx import oneflow.python.framework.typing_util as oft_util import oneflow_api from google.protobuf import text_format def BindUuidAndHandler(uuid, blob_watched, handler): assert isinstance(blob_watched, oneflow_api.ConsistentBlob) session_ctx.GetDefaultSession().uuid2watch_handler[uuid] = (blob_watched, handler) class _Watcher(oneflow_api.ForeignWatcher): def __init__(self): oneflow_api.ForeignWatcher.__init__(self) def Call(self, handler_uuid, of_blob_ptr): try: _WatcherHandler(handler_uuid, of_blob_ptr) except Exception as e: print(traceback.format_exc()) raise e def _WatcherHandler(handler_uuid, of_blob_ptr): uuid2handler = session_ctx.GetDefaultSession().uuid2watch_handler assert handler_uuid in uuid2handler blob_watched, handler = uuid2handler[handler_uuid] assert callable(handler) ndarray_lists = ofblob.OfBlob(of_blob_ptr).CopyToNdarrayLists() local_blob = local_blob_util.MakeLocalBlob(ndarray_lists, blob_watched) handler(oft_util.TransformWatchedBlob(local_blob, handler)) # static lifetime _global_watcher = _Watcher() oneflow_api.RegisterWatcherOnlyOnce(_global_watcher)
35.716667
86
0.792814
from __future__ import absolute_import import traceback import oneflow.core.record.record_pb2 as record_util import oneflow.python.framework.local_blob as local_blob_util import oneflow.python.framework.ofblob as ofblob import oneflow.python.framework.remote_blob as remote_blob_util import oneflow.python.framework.session_context as session_ctx import oneflow.python.framework.typing_util as oft_util import oneflow_api from google.protobuf import text_format def BindUuidAndHandler(uuid, blob_watched, handler): assert isinstance(blob_watched, oneflow_api.ConsistentBlob) session_ctx.GetDefaultSession().uuid2watch_handler[uuid] = (blob_watched, handler) class _Watcher(oneflow_api.ForeignWatcher): def __init__(self): oneflow_api.ForeignWatcher.__init__(self) def Call(self, handler_uuid, of_blob_ptr): try: _WatcherHandler(handler_uuid, of_blob_ptr) except Exception as e: print(traceback.format_exc()) raise e def _WatcherHandler(handler_uuid, of_blob_ptr): uuid2handler = session_ctx.GetDefaultSession().uuid2watch_handler assert handler_uuid in uuid2handler blob_watched, handler = uuid2handler[handler_uuid] assert callable(handler) ndarray_lists = ofblob.OfBlob(of_blob_ptr).CopyToNdarrayLists() local_blob = local_blob_util.MakeLocalBlob(ndarray_lists, blob_watched) handler(oft_util.TransformWatchedBlob(local_blob, handler)) _global_watcher = _Watcher() oneflow_api.RegisterWatcherOnlyOnce(_global_watcher)
true
true
f72cad26cae4ebe6adabb39d7a5dfcd09cf17363
31,637
py
Python
sdk/python/pulumi_f5bigip/ltm/snat.py
pulumi/pulumi-f5bigip
4bce074f8bd7cb42f359ef4814ca5b437230fd1c
[ "ECL-2.0", "Apache-2.0" ]
4
2018-12-21T23:30:33.000Z
2021-10-12T16:38:27.000Z
sdk/python/pulumi_f5bigip/ltm/snat.py
pulumi/pulumi-f5bigip
4bce074f8bd7cb42f359ef4814ca5b437230fd1c
[ "ECL-2.0", "Apache-2.0" ]
61
2019-01-09T01:50:19.000Z
2022-03-31T15:27:17.000Z
sdk/python/pulumi_f5bigip/ltm/snat.py
pulumi/pulumi-f5bigip
4bce074f8bd7cb42f359ef4814ca5b437230fd1c
[ "ECL-2.0", "Apache-2.0" ]
1
2019-10-05T10:36:30.000Z
2019-10-05T10:36:30.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._inputs import * __all__ = ['SnatArgs', 'Snat'] @pulumi.input_type class SnatArgs: def __init__(__self__, *, name: pulumi.Input[str], origins: pulumi.Input[Sequence[pulumi.Input['SnatOriginArgs']]], autolasthop: Optional[pulumi.Input[str]] = None, full_path: Optional[pulumi.Input[str]] = None, mirror: Optional[pulumi.Input[str]] = None, partition: Optional[pulumi.Input[str]] = None, snatpool: Optional[pulumi.Input[str]] = None, sourceport: Optional[pulumi.Input[str]] = None, translation: Optional[pulumi.Input[str]] = None, vlans: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, vlansdisabled: Optional[pulumi.Input[bool]] = None): """ The set of arguments for constructing a Snat resource. :param pulumi.Input[str] name: Name of the snat :param pulumi.Input[Sequence[pulumi.Input['SnatOriginArgs']]] origins: IP or hostname of the snat :param pulumi.Input[str] autolasthop: -(Optional) Specifies whether to automatically map last hop for pools or not. The default is to use next level's default. :param pulumi.Input[str] full_path: Fullpath :param pulumi.Input[str] mirror: Enables or disables mirroring of SNAT connections. :param pulumi.Input[str] partition: Displays the administrative partition within which this profile resides :param pulumi.Input[str] snatpool: Specifies the name of a SNAT pool. You can only use this option when automap and translation are not used. :param pulumi.Input[str] sourceport: Specifies whether the system preserves the source port of the connection. The default is preserve. Use of the preserve-strict setting should be restricted to UDP only under very special circumstances such as nPath or transparent (that is, no translation of any other L3/L4 field), where there is a 1:1 relationship between virtual IP addresses and node addresses, or when clustered multi-processing (CMP) is disabled. The change setting is useful for obfuscating internal network addresses. :param pulumi.Input[str] translation: Specifies the name of a translated IP address. Note that translated addresses are outside the traffic management system. You can only use this option when automap and snatpool are not used. :param pulumi.Input[Sequence[pulumi.Input[str]]] vlans: Specifies the name of the VLAN to which you want to assign the SNAT. The default is vlans-enabled. :param pulumi.Input[bool] vlansdisabled: Disables the SNAT on all VLANs. """ pulumi.set(__self__, "name", name) pulumi.set(__self__, "origins", origins) if autolasthop is not None: pulumi.set(__self__, "autolasthop", autolasthop) if full_path is not None: pulumi.set(__self__, "full_path", full_path) if mirror is not None: pulumi.set(__self__, "mirror", mirror) if partition is not None: pulumi.set(__self__, "partition", partition) if snatpool is not None: pulumi.set(__self__, "snatpool", snatpool) if sourceport is not None: pulumi.set(__self__, "sourceport", sourceport) if translation is not None: pulumi.set(__self__, "translation", translation) if vlans is not None: pulumi.set(__self__, "vlans", vlans) if vlansdisabled is not None: pulumi.set(__self__, "vlansdisabled", vlansdisabled) @property @pulumi.getter def name(self) -> pulumi.Input[str]: """ Name of the snat """ return pulumi.get(self, "name") @name.setter def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) @property @pulumi.getter def origins(self) -> pulumi.Input[Sequence[pulumi.Input['SnatOriginArgs']]]: """ IP or hostname of the snat """ return pulumi.get(self, "origins") @origins.setter def origins(self, value: pulumi.Input[Sequence[pulumi.Input['SnatOriginArgs']]]): pulumi.set(self, "origins", value) @property @pulumi.getter def autolasthop(self) -> Optional[pulumi.Input[str]]: """ -(Optional) Specifies whether to automatically map last hop for pools or not. The default is to use next level's default. """ return pulumi.get(self, "autolasthop") @autolasthop.setter def autolasthop(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "autolasthop", value) @property @pulumi.getter(name="fullPath") def full_path(self) -> Optional[pulumi.Input[str]]: """ Fullpath """ return pulumi.get(self, "full_path") @full_path.setter def full_path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "full_path", value) @property @pulumi.getter def mirror(self) -> Optional[pulumi.Input[str]]: """ Enables or disables mirroring of SNAT connections. """ return pulumi.get(self, "mirror") @mirror.setter def mirror(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "mirror", value) @property @pulumi.getter def partition(self) -> Optional[pulumi.Input[str]]: """ Displays the administrative partition within which this profile resides """ return pulumi.get(self, "partition") @partition.setter def partition(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "partition", value) @property @pulumi.getter def snatpool(self) -> Optional[pulumi.Input[str]]: """ Specifies the name of a SNAT pool. You can only use this option when automap and translation are not used. """ return pulumi.get(self, "snatpool") @snatpool.setter def snatpool(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "snatpool", value) @property @pulumi.getter def sourceport(self) -> Optional[pulumi.Input[str]]: """ Specifies whether the system preserves the source port of the connection. The default is preserve. Use of the preserve-strict setting should be restricted to UDP only under very special circumstances such as nPath or transparent (that is, no translation of any other L3/L4 field), where there is a 1:1 relationship between virtual IP addresses and node addresses, or when clustered multi-processing (CMP) is disabled. The change setting is useful for obfuscating internal network addresses. """ return pulumi.get(self, "sourceport") @sourceport.setter def sourceport(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "sourceport", value) @property @pulumi.getter def translation(self) -> Optional[pulumi.Input[str]]: """ Specifies the name of a translated IP address. Note that translated addresses are outside the traffic management system. You can only use this option when automap and snatpool are not used. """ return pulumi.get(self, "translation") @translation.setter def translation(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "translation", value) @property @pulumi.getter def vlans(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Specifies the name of the VLAN to which you want to assign the SNAT. The default is vlans-enabled. """ return pulumi.get(self, "vlans") @vlans.setter def vlans(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "vlans", value) @property @pulumi.getter def vlansdisabled(self) -> Optional[pulumi.Input[bool]]: """ Disables the SNAT on all VLANs. """ return pulumi.get(self, "vlansdisabled") @vlansdisabled.setter def vlansdisabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "vlansdisabled", value) @pulumi.input_type class _SnatState: def __init__(__self__, *, autolasthop: Optional[pulumi.Input[str]] = None, full_path: Optional[pulumi.Input[str]] = None, mirror: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, origins: Optional[pulumi.Input[Sequence[pulumi.Input['SnatOriginArgs']]]] = None, partition: Optional[pulumi.Input[str]] = None, snatpool: Optional[pulumi.Input[str]] = None, sourceport: Optional[pulumi.Input[str]] = None, translation: Optional[pulumi.Input[str]] = None, vlans: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, vlansdisabled: Optional[pulumi.Input[bool]] = None): """ Input properties used for looking up and filtering Snat resources. :param pulumi.Input[str] autolasthop: -(Optional) Specifies whether to automatically map last hop for pools or not. The default is to use next level's default. :param pulumi.Input[str] full_path: Fullpath :param pulumi.Input[str] mirror: Enables or disables mirroring of SNAT connections. :param pulumi.Input[str] name: Name of the snat :param pulumi.Input[Sequence[pulumi.Input['SnatOriginArgs']]] origins: IP or hostname of the snat :param pulumi.Input[str] partition: Displays the administrative partition within which this profile resides :param pulumi.Input[str] snatpool: Specifies the name of a SNAT pool. You can only use this option when automap and translation are not used. :param pulumi.Input[str] sourceport: Specifies whether the system preserves the source port of the connection. The default is preserve. Use of the preserve-strict setting should be restricted to UDP only under very special circumstances such as nPath or transparent (that is, no translation of any other L3/L4 field), where there is a 1:1 relationship between virtual IP addresses and node addresses, or when clustered multi-processing (CMP) is disabled. The change setting is useful for obfuscating internal network addresses. :param pulumi.Input[str] translation: Specifies the name of a translated IP address. Note that translated addresses are outside the traffic management system. You can only use this option when automap and snatpool are not used. :param pulumi.Input[Sequence[pulumi.Input[str]]] vlans: Specifies the name of the VLAN to which you want to assign the SNAT. The default is vlans-enabled. :param pulumi.Input[bool] vlansdisabled: Disables the SNAT on all VLANs. """ if autolasthop is not None: pulumi.set(__self__, "autolasthop", autolasthop) if full_path is not None: pulumi.set(__self__, "full_path", full_path) if mirror is not None: pulumi.set(__self__, "mirror", mirror) if name is not None: pulumi.set(__self__, "name", name) if origins is not None: pulumi.set(__self__, "origins", origins) if partition is not None: pulumi.set(__self__, "partition", partition) if snatpool is not None: pulumi.set(__self__, "snatpool", snatpool) if sourceport is not None: pulumi.set(__self__, "sourceport", sourceport) if translation is not None: pulumi.set(__self__, "translation", translation) if vlans is not None: pulumi.set(__self__, "vlans", vlans) if vlansdisabled is not None: pulumi.set(__self__, "vlansdisabled", vlansdisabled) @property @pulumi.getter def autolasthop(self) -> Optional[pulumi.Input[str]]: """ -(Optional) Specifies whether to automatically map last hop for pools or not. The default is to use next level's default. """ return pulumi.get(self, "autolasthop") @autolasthop.setter def autolasthop(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "autolasthop", value) @property @pulumi.getter(name="fullPath") def full_path(self) -> Optional[pulumi.Input[str]]: """ Fullpath """ return pulumi.get(self, "full_path") @full_path.setter def full_path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "full_path", value) @property @pulumi.getter def mirror(self) -> Optional[pulumi.Input[str]]: """ Enables or disables mirroring of SNAT connections. """ return pulumi.get(self, "mirror") @mirror.setter def mirror(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "mirror", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of the snat """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SnatOriginArgs']]]]: """ IP or hostname of the snat """ return pulumi.get(self, "origins") @origins.setter def origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SnatOriginArgs']]]]): pulumi.set(self, "origins", value) @property @pulumi.getter def partition(self) -> Optional[pulumi.Input[str]]: """ Displays the administrative partition within which this profile resides """ return pulumi.get(self, "partition") @partition.setter def partition(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "partition", value) @property @pulumi.getter def snatpool(self) -> Optional[pulumi.Input[str]]: """ Specifies the name of a SNAT pool. You can only use this option when automap and translation are not used. """ return pulumi.get(self, "snatpool") @snatpool.setter def snatpool(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "snatpool", value) @property @pulumi.getter def sourceport(self) -> Optional[pulumi.Input[str]]: """ Specifies whether the system preserves the source port of the connection. The default is preserve. Use of the preserve-strict setting should be restricted to UDP only under very special circumstances such as nPath or transparent (that is, no translation of any other L3/L4 field), where there is a 1:1 relationship between virtual IP addresses and node addresses, or when clustered multi-processing (CMP) is disabled. The change setting is useful for obfuscating internal network addresses. """ return pulumi.get(self, "sourceport") @sourceport.setter def sourceport(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "sourceport", value) @property @pulumi.getter def translation(self) -> Optional[pulumi.Input[str]]: """ Specifies the name of a translated IP address. Note that translated addresses are outside the traffic management system. You can only use this option when automap and snatpool are not used. """ return pulumi.get(self, "translation") @translation.setter def translation(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "translation", value) @property @pulumi.getter def vlans(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Specifies the name of the VLAN to which you want to assign the SNAT. The default is vlans-enabled. """ return pulumi.get(self, "vlans") @vlans.setter def vlans(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "vlans", value) @property @pulumi.getter def vlansdisabled(self) -> Optional[pulumi.Input[bool]]: """ Disables the SNAT on all VLANs. """ return pulumi.get(self, "vlansdisabled") @vlansdisabled.setter def vlansdisabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "vlansdisabled", value) class Snat(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, autolasthop: Optional[pulumi.Input[str]] = None, full_path: Optional[pulumi.Input[str]] = None, mirror: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, origins: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SnatOriginArgs']]]]] = None, partition: Optional[pulumi.Input[str]] = None, snatpool: Optional[pulumi.Input[str]] = None, sourceport: Optional[pulumi.Input[str]] = None, translation: Optional[pulumi.Input[str]] = None, vlans: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, vlansdisabled: Optional[pulumi.Input[bool]] = None, __props__=None): """ `ltm.Snat` Manages a snat configuration For resources should be named with their "full path". The full path is the combination of the partition + name of the resource. For example /Common/my-pool. ## Example Usage ```python import pulumi import pulumi_f5bigip as f5bigip test_snat = f5bigip.ltm.Snat("test-snat", autolasthop="default", full_path="/Common/test-snat", mirror="disabled", name="TEST_SNAT_NAME", origins=[ f5bigip.ltm.SnatOriginArgs( name="2.2.2.2", ), f5bigip.ltm.SnatOriginArgs( name="3.3.3.3", ), ], partition="Common", translation="/Common/136.1.1.1", vlansdisabled=True) ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] autolasthop: -(Optional) Specifies whether to automatically map last hop for pools or not. The default is to use next level's default. :param pulumi.Input[str] full_path: Fullpath :param pulumi.Input[str] mirror: Enables or disables mirroring of SNAT connections. :param pulumi.Input[str] name: Name of the snat :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SnatOriginArgs']]]] origins: IP or hostname of the snat :param pulumi.Input[str] partition: Displays the administrative partition within which this profile resides :param pulumi.Input[str] snatpool: Specifies the name of a SNAT pool. You can only use this option when automap and translation are not used. :param pulumi.Input[str] sourceport: Specifies whether the system preserves the source port of the connection. The default is preserve. Use of the preserve-strict setting should be restricted to UDP only under very special circumstances such as nPath or transparent (that is, no translation of any other L3/L4 field), where there is a 1:1 relationship between virtual IP addresses and node addresses, or when clustered multi-processing (CMP) is disabled. The change setting is useful for obfuscating internal network addresses. :param pulumi.Input[str] translation: Specifies the name of a translated IP address. Note that translated addresses are outside the traffic management system. You can only use this option when automap and snatpool are not used. :param pulumi.Input[Sequence[pulumi.Input[str]]] vlans: Specifies the name of the VLAN to which you want to assign the SNAT. The default is vlans-enabled. :param pulumi.Input[bool] vlansdisabled: Disables the SNAT on all VLANs. """ ... @overload def __init__(__self__, resource_name: str, args: SnatArgs, opts: Optional[pulumi.ResourceOptions] = None): """ `ltm.Snat` Manages a snat configuration For resources should be named with their "full path". The full path is the combination of the partition + name of the resource. For example /Common/my-pool. ## Example Usage ```python import pulumi import pulumi_f5bigip as f5bigip test_snat = f5bigip.ltm.Snat("test-snat", autolasthop="default", full_path="/Common/test-snat", mirror="disabled", name="TEST_SNAT_NAME", origins=[ f5bigip.ltm.SnatOriginArgs( name="2.2.2.2", ), f5bigip.ltm.SnatOriginArgs( name="3.3.3.3", ), ], partition="Common", translation="/Common/136.1.1.1", vlansdisabled=True) ``` :param str resource_name: The name of the resource. :param SnatArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(SnatArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, autolasthop: Optional[pulumi.Input[str]] = None, full_path: Optional[pulumi.Input[str]] = None, mirror: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, origins: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SnatOriginArgs']]]]] = None, partition: Optional[pulumi.Input[str]] = None, snatpool: Optional[pulumi.Input[str]] = None, sourceport: Optional[pulumi.Input[str]] = None, translation: Optional[pulumi.Input[str]] = None, vlans: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, vlansdisabled: Optional[pulumi.Input[bool]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = SnatArgs.__new__(SnatArgs) __props__.__dict__["autolasthop"] = autolasthop __props__.__dict__["full_path"] = full_path __props__.__dict__["mirror"] = mirror if name is None and not opts.urn: raise TypeError("Missing required property 'name'") __props__.__dict__["name"] = name if origins is None and not opts.urn: raise TypeError("Missing required property 'origins'") __props__.__dict__["origins"] = origins __props__.__dict__["partition"] = partition __props__.__dict__["snatpool"] = snatpool __props__.__dict__["sourceport"] = sourceport __props__.__dict__["translation"] = translation __props__.__dict__["vlans"] = vlans __props__.__dict__["vlansdisabled"] = vlansdisabled super(Snat, __self__).__init__( 'f5bigip:ltm/snat:Snat', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, autolasthop: Optional[pulumi.Input[str]] = None, full_path: Optional[pulumi.Input[str]] = None, mirror: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, origins: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SnatOriginArgs']]]]] = None, partition: Optional[pulumi.Input[str]] = None, snatpool: Optional[pulumi.Input[str]] = None, sourceport: Optional[pulumi.Input[str]] = None, translation: Optional[pulumi.Input[str]] = None, vlans: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, vlansdisabled: Optional[pulumi.Input[bool]] = None) -> 'Snat': """ Get an existing Snat resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] autolasthop: -(Optional) Specifies whether to automatically map last hop for pools or not. The default is to use next level's default. :param pulumi.Input[str] full_path: Fullpath :param pulumi.Input[str] mirror: Enables or disables mirroring of SNAT connections. :param pulumi.Input[str] name: Name of the snat :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SnatOriginArgs']]]] origins: IP or hostname of the snat :param pulumi.Input[str] partition: Displays the administrative partition within which this profile resides :param pulumi.Input[str] snatpool: Specifies the name of a SNAT pool. You can only use this option when automap and translation are not used. :param pulumi.Input[str] sourceport: Specifies whether the system preserves the source port of the connection. The default is preserve. Use of the preserve-strict setting should be restricted to UDP only under very special circumstances such as nPath or transparent (that is, no translation of any other L3/L4 field), where there is a 1:1 relationship between virtual IP addresses and node addresses, or when clustered multi-processing (CMP) is disabled. The change setting is useful for obfuscating internal network addresses. :param pulumi.Input[str] translation: Specifies the name of a translated IP address. Note that translated addresses are outside the traffic management system. You can only use this option when automap and snatpool are not used. :param pulumi.Input[Sequence[pulumi.Input[str]]] vlans: Specifies the name of the VLAN to which you want to assign the SNAT. The default is vlans-enabled. :param pulumi.Input[bool] vlansdisabled: Disables the SNAT on all VLANs. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _SnatState.__new__(_SnatState) __props__.__dict__["autolasthop"] = autolasthop __props__.__dict__["full_path"] = full_path __props__.__dict__["mirror"] = mirror __props__.__dict__["name"] = name __props__.__dict__["origins"] = origins __props__.__dict__["partition"] = partition __props__.__dict__["snatpool"] = snatpool __props__.__dict__["sourceport"] = sourceport __props__.__dict__["translation"] = translation __props__.__dict__["vlans"] = vlans __props__.__dict__["vlansdisabled"] = vlansdisabled return Snat(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def autolasthop(self) -> pulumi.Output[Optional[str]]: """ -(Optional) Specifies whether to automatically map last hop for pools or not. The default is to use next level's default. """ return pulumi.get(self, "autolasthop") @property @pulumi.getter(name="fullPath") def full_path(self) -> pulumi.Output[Optional[str]]: """ Fullpath """ return pulumi.get(self, "full_path") @property @pulumi.getter def mirror(self) -> pulumi.Output[Optional[str]]: """ Enables or disables mirroring of SNAT connections. """ return pulumi.get(self, "mirror") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Name of the snat """ return pulumi.get(self, "name") @property @pulumi.getter def origins(self) -> pulumi.Output[Sequence['outputs.SnatOrigin']]: """ IP or hostname of the snat """ return pulumi.get(self, "origins") @property @pulumi.getter def partition(self) -> pulumi.Output[Optional[str]]: """ Displays the administrative partition within which this profile resides """ return pulumi.get(self, "partition") @property @pulumi.getter def snatpool(self) -> pulumi.Output[Optional[str]]: """ Specifies the name of a SNAT pool. You can only use this option when automap and translation are not used. """ return pulumi.get(self, "snatpool") @property @pulumi.getter def sourceport(self) -> pulumi.Output[Optional[str]]: """ Specifies whether the system preserves the source port of the connection. The default is preserve. Use of the preserve-strict setting should be restricted to UDP only under very special circumstances such as nPath or transparent (that is, no translation of any other L3/L4 field), where there is a 1:1 relationship between virtual IP addresses and node addresses, or when clustered multi-processing (CMP) is disabled. The change setting is useful for obfuscating internal network addresses. """ return pulumi.get(self, "sourceport") @property @pulumi.getter def translation(self) -> pulumi.Output[Optional[str]]: """ Specifies the name of a translated IP address. Note that translated addresses are outside the traffic management system. You can only use this option when automap and snatpool are not used. """ return pulumi.get(self, "translation") @property @pulumi.getter def vlans(self) -> pulumi.Output[Optional[Sequence[str]]]: """ Specifies the name of the VLAN to which you want to assign the SNAT. The default is vlans-enabled. """ return pulumi.get(self, "vlans") @property @pulumi.getter def vlansdisabled(self) -> pulumi.Output[Optional[bool]]: """ Disables the SNAT on all VLANs. """ return pulumi.get(self, "vlansdisabled")
46.939169
535
0.649746
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._inputs import * __all__ = ['SnatArgs', 'Snat'] @pulumi.input_type class SnatArgs: def __init__(__self__, *, name: pulumi.Input[str], origins: pulumi.Input[Sequence[pulumi.Input['SnatOriginArgs']]], autolasthop: Optional[pulumi.Input[str]] = None, full_path: Optional[pulumi.Input[str]] = None, mirror: Optional[pulumi.Input[str]] = None, partition: Optional[pulumi.Input[str]] = None, snatpool: Optional[pulumi.Input[str]] = None, sourceport: Optional[pulumi.Input[str]] = None, translation: Optional[pulumi.Input[str]] = None, vlans: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, vlansdisabled: Optional[pulumi.Input[bool]] = None): pulumi.set(__self__, "name", name) pulumi.set(__self__, "origins", origins) if autolasthop is not None: pulumi.set(__self__, "autolasthop", autolasthop) if full_path is not None: pulumi.set(__self__, "full_path", full_path) if mirror is not None: pulumi.set(__self__, "mirror", mirror) if partition is not None: pulumi.set(__self__, "partition", partition) if snatpool is not None: pulumi.set(__self__, "snatpool", snatpool) if sourceport is not None: pulumi.set(__self__, "sourceport", sourceport) if translation is not None: pulumi.set(__self__, "translation", translation) if vlans is not None: pulumi.set(__self__, "vlans", vlans) if vlansdisabled is not None: pulumi.set(__self__, "vlansdisabled", vlansdisabled) @property @pulumi.getter def name(self) -> pulumi.Input[str]: return pulumi.get(self, "name") @name.setter def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) @property @pulumi.getter def origins(self) -> pulumi.Input[Sequence[pulumi.Input['SnatOriginArgs']]]: return pulumi.get(self, "origins") @origins.setter def origins(self, value: pulumi.Input[Sequence[pulumi.Input['SnatOriginArgs']]]): pulumi.set(self, "origins", value) @property @pulumi.getter def autolasthop(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "autolasthop") @autolasthop.setter def autolasthop(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "autolasthop", value) @property @pulumi.getter(name="fullPath") def full_path(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "full_path") @full_path.setter def full_path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "full_path", value) @property @pulumi.getter def mirror(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "mirror") @mirror.setter def mirror(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "mirror", value) @property @pulumi.getter def partition(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "partition") @partition.setter def partition(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "partition", value) @property @pulumi.getter def snatpool(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "snatpool") @snatpool.setter def snatpool(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "snatpool", value) @property @pulumi.getter def sourceport(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "sourceport") @sourceport.setter def sourceport(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "sourceport", value) @property @pulumi.getter def translation(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "translation") @translation.setter def translation(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "translation", value) @property @pulumi.getter def vlans(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: return pulumi.get(self, "vlans") @vlans.setter def vlans(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "vlans", value) @property @pulumi.getter def vlansdisabled(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "vlansdisabled") @vlansdisabled.setter def vlansdisabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "vlansdisabled", value) @pulumi.input_type class _SnatState: def __init__(__self__, *, autolasthop: Optional[pulumi.Input[str]] = None, full_path: Optional[pulumi.Input[str]] = None, mirror: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, origins: Optional[pulumi.Input[Sequence[pulumi.Input['SnatOriginArgs']]]] = None, partition: Optional[pulumi.Input[str]] = None, snatpool: Optional[pulumi.Input[str]] = None, sourceport: Optional[pulumi.Input[str]] = None, translation: Optional[pulumi.Input[str]] = None, vlans: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, vlansdisabled: Optional[pulumi.Input[bool]] = None): if autolasthop is not None: pulumi.set(__self__, "autolasthop", autolasthop) if full_path is not None: pulumi.set(__self__, "full_path", full_path) if mirror is not None: pulumi.set(__self__, "mirror", mirror) if name is not None: pulumi.set(__self__, "name", name) if origins is not None: pulumi.set(__self__, "origins", origins) if partition is not None: pulumi.set(__self__, "partition", partition) if snatpool is not None: pulumi.set(__self__, "snatpool", snatpool) if sourceport is not None: pulumi.set(__self__, "sourceport", sourceport) if translation is not None: pulumi.set(__self__, "translation", translation) if vlans is not None: pulumi.set(__self__, "vlans", vlans) if vlansdisabled is not None: pulumi.set(__self__, "vlansdisabled", vlansdisabled) @property @pulumi.getter def autolasthop(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "autolasthop") @autolasthop.setter def autolasthop(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "autolasthop", value) @property @pulumi.getter(name="fullPath") def full_path(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "full_path") @full_path.setter def full_path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "full_path", value) @property @pulumi.getter def mirror(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "mirror") @mirror.setter def mirror(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "mirror", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SnatOriginArgs']]]]: return pulumi.get(self, "origins") @origins.setter def origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SnatOriginArgs']]]]): pulumi.set(self, "origins", value) @property @pulumi.getter def partition(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "partition") @partition.setter def partition(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "partition", value) @property @pulumi.getter def snatpool(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "snatpool") @snatpool.setter def snatpool(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "snatpool", value) @property @pulumi.getter def sourceport(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "sourceport") @sourceport.setter def sourceport(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "sourceport", value) @property @pulumi.getter def translation(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "translation") @translation.setter def translation(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "translation", value) @property @pulumi.getter def vlans(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: return pulumi.get(self, "vlans") @vlans.setter def vlans(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "vlans", value) @property @pulumi.getter def vlansdisabled(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "vlansdisabled") @vlansdisabled.setter def vlansdisabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "vlansdisabled", value) class Snat(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, autolasthop: Optional[pulumi.Input[str]] = None, full_path: Optional[pulumi.Input[str]] = None, mirror: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, origins: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SnatOriginArgs']]]]] = None, partition: Optional[pulumi.Input[str]] = None, snatpool: Optional[pulumi.Input[str]] = None, sourceport: Optional[pulumi.Input[str]] = None, translation: Optional[pulumi.Input[str]] = None, vlans: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, vlansdisabled: Optional[pulumi.Input[bool]] = None, __props__=None): ... @overload def __init__(__self__, resource_name: str, args: SnatArgs, opts: Optional[pulumi.ResourceOptions] = None): ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(SnatArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, autolasthop: Optional[pulumi.Input[str]] = None, full_path: Optional[pulumi.Input[str]] = None, mirror: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, origins: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SnatOriginArgs']]]]] = None, partition: Optional[pulumi.Input[str]] = None, snatpool: Optional[pulumi.Input[str]] = None, sourceport: Optional[pulumi.Input[str]] = None, translation: Optional[pulumi.Input[str]] = None, vlans: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, vlansdisabled: Optional[pulumi.Input[bool]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = SnatArgs.__new__(SnatArgs) __props__.__dict__["autolasthop"] = autolasthop __props__.__dict__["full_path"] = full_path __props__.__dict__["mirror"] = mirror if name is None and not opts.urn: raise TypeError("Missing required property 'name'") __props__.__dict__["name"] = name if origins is None and not opts.urn: raise TypeError("Missing required property 'origins'") __props__.__dict__["origins"] = origins __props__.__dict__["partition"] = partition __props__.__dict__["snatpool"] = snatpool __props__.__dict__["sourceport"] = sourceport __props__.__dict__["translation"] = translation __props__.__dict__["vlans"] = vlans __props__.__dict__["vlansdisabled"] = vlansdisabled super(Snat, __self__).__init__( 'f5bigip:ltm/snat:Snat', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, autolasthop: Optional[pulumi.Input[str]] = None, full_path: Optional[pulumi.Input[str]] = None, mirror: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, origins: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SnatOriginArgs']]]]] = None, partition: Optional[pulumi.Input[str]] = None, snatpool: Optional[pulumi.Input[str]] = None, sourceport: Optional[pulumi.Input[str]] = None, translation: Optional[pulumi.Input[str]] = None, vlans: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, vlansdisabled: Optional[pulumi.Input[bool]] = None) -> 'Snat': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _SnatState.__new__(_SnatState) __props__.__dict__["autolasthop"] = autolasthop __props__.__dict__["full_path"] = full_path __props__.__dict__["mirror"] = mirror __props__.__dict__["name"] = name __props__.__dict__["origins"] = origins __props__.__dict__["partition"] = partition __props__.__dict__["snatpool"] = snatpool __props__.__dict__["sourceport"] = sourceport __props__.__dict__["translation"] = translation __props__.__dict__["vlans"] = vlans __props__.__dict__["vlansdisabled"] = vlansdisabled return Snat(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def autolasthop(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "autolasthop") @property @pulumi.getter(name="fullPath") def full_path(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "full_path") @property @pulumi.getter def mirror(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "mirror") @property @pulumi.getter def name(self) -> pulumi.Output[str]: return pulumi.get(self, "name") @property @pulumi.getter def origins(self) -> pulumi.Output[Sequence['outputs.SnatOrigin']]: return pulumi.get(self, "origins") @property @pulumi.getter def partition(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "partition") @property @pulumi.getter def snatpool(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "snatpool") @property @pulumi.getter def sourceport(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "sourceport") @property @pulumi.getter def translation(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "translation") @property @pulumi.getter def vlans(self) -> pulumi.Output[Optional[Sequence[str]]]: return pulumi.get(self, "vlans") @property @pulumi.getter def vlansdisabled(self) -> pulumi.Output[Optional[bool]]: return pulumi.get(self, "vlansdisabled")
true
true
f72cad63668c1a50f31829882512b8a9df77f041
12,170
py
Python
keystone-moon/keystone/tests/unit/test_backend_endpoint_policy.py
hashnfv/hashnfv-moon
daaba34fa2ed4426bc0fde359e54a5e1b872208c
[ "Apache-2.0" ]
1
2019-05-08T06:09:35.000Z
2019-05-08T06:09:35.000Z
keystone-moon/keystone/tests/unit/test_backend_endpoint_policy.py
hashnfv/hashnfv-moon
daaba34fa2ed4426bc0fde359e54a5e1b872208c
[ "Apache-2.0" ]
4
2018-08-22T14:51:02.000Z
2018-10-17T14:04:26.000Z
keystone-moon/keystone/tests/unit/test_backend_endpoint_policy.py
hashnfv/hashnfv-moon
daaba34fa2ed4426bc0fde359e54a5e1b872208c
[ "Apache-2.0" ]
5
2018-08-03T17:19:34.000Z
2019-01-11T15:54:42.000Z
# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from six.moves import range from testtools import matchers from keystone import exception from keystone.tests import unit class PolicyAssociationTests(object): def _assert_correct_policy(self, endpoint, policy): ref = ( self.endpoint_policy_api.get_policy_for_endpoint(endpoint['id'])) self.assertEqual(policy['id'], ref['id']) def _assert_correct_endpoints(self, policy, endpoint_list): endpoint_id_list = [ep['id'] for ep in endpoint_list] endpoints = ( self.endpoint_policy_api.list_endpoints_for_policy(policy['id'])) self.assertThat(endpoints, matchers.HasLength(len(endpoint_list))) for endpoint in endpoints: self.assertIn(endpoint['id'], endpoint_id_list) def load_sample_data(self): """Create sample data to test policy associations. The following data is created: - 3 regions, in a hierarchy, 0 -> 1 -> 2 (where 0 is top) - 3 services - 6 endpoints, 2 in each region, with a mixture of services: 0 - region 0, Service 0 1 - region 0, Service 1 2 - region 1, Service 1 3 - region 1, Service 2 4 - region 2, Service 2 5 - region 2, Service 0 """ def new_endpoint(region_id, service_id): endpoint = unit.new_endpoint_ref(interface='test', region_id=region_id, service_id=service_id, url='/url') self.endpoint.append(self.catalog_api.create_endpoint( endpoint['id'], endpoint)) self.policy = [] self.endpoint = [] self.service = [] self.region = [] parent_region_id = None for i in range(3): policy = unit.new_policy_ref() self.policy.append(self.policy_api.create_policy(policy['id'], policy)) service = unit.new_service_ref() self.service.append(self.catalog_api.create_service(service['id'], service)) region = unit.new_region_ref(parent_region_id=parent_region_id) # Link the regions together as a hierarchy, [0] at the top parent_region_id = region['id'] self.region.append(self.catalog_api.create_region(region)) new_endpoint(self.region[0]['id'], self.service[0]['id']) new_endpoint(self.region[0]['id'], self.service[1]['id']) new_endpoint(self.region[1]['id'], self.service[1]['id']) new_endpoint(self.region[1]['id'], self.service[2]['id']) new_endpoint(self.region[2]['id'], self.service[2]['id']) new_endpoint(self.region[2]['id'], self.service[0]['id']) def test_policy_to_endpoint_association_crud(self): self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self.endpoint_policy_api.check_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self.endpoint_policy_api.delete_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) def test_overwriting_policy_to_endpoint_association(self): self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self.endpoint_policy_api.create_policy_association( self.policy[1]['id'], endpoint_id=self.endpoint[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self.endpoint_policy_api.check_policy_association( self.policy[1]['id'], endpoint_id=self.endpoint[0]['id']) def test_invalid_policy_to_endpoint_association(self): self.assertRaises(exception.InvalidPolicyAssociation, self.endpoint_policy_api.create_policy_association, self.policy[0]['id']) self.assertRaises(exception.InvalidPolicyAssociation, self.endpoint_policy_api.create_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'], region_id=self.region[0]['id']) self.assertRaises(exception.InvalidPolicyAssociation, self.endpoint_policy_api.create_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'], service_id=self.service[0]['id']) self.assertRaises(exception.InvalidPolicyAssociation, self.endpoint_policy_api.create_policy_association, self.policy[0]['id'], region_id=self.region[0]['id']) def test_policy_to_explicit_endpoint_association(self): # Associate policy 0 with endpoint 0 self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self._assert_correct_policy(self.endpoint[0], self.policy[0]) self._assert_correct_endpoints(self.policy[0], [self.endpoint[0]]) self.assertRaises(exception.NotFound, self.endpoint_policy_api.get_policy_for_endpoint, uuid.uuid4().hex) def test_policy_to_service_association(self): self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id']) self.endpoint_policy_api.create_policy_association( self.policy[1]['id'], service_id=self.service[1]['id']) # Endpoints 0 and 5 are part of service 0 self._assert_correct_policy(self.endpoint[0], self.policy[0]) self._assert_correct_policy(self.endpoint[5], self.policy[0]) self._assert_correct_endpoints( self.policy[0], [self.endpoint[0], self.endpoint[5]]) # Endpoints 1 and 2 are part of service 1 self._assert_correct_policy(self.endpoint[1], self.policy[1]) self._assert_correct_policy(self.endpoint[2], self.policy[1]) self._assert_correct_endpoints( self.policy[1], [self.endpoint[1], self.endpoint[2]]) def test_policy_to_region_and_service_association(self): self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id']) self.endpoint_policy_api.create_policy_association( self.policy[1]['id'], service_id=self.service[1]['id'], region_id=self.region[1]['id']) self.endpoint_policy_api.create_policy_association( self.policy[2]['id'], service_id=self.service[2]['id'], region_id=self.region[2]['id']) # Endpoint 0 is in region 0 with service 0, so should get policy 0 self._assert_correct_policy(self.endpoint[0], self.policy[0]) # Endpoint 5 is in Region 2 with service 0, so should also get # policy 0 by searching up the tree to Region 0 self._assert_correct_policy(self.endpoint[5], self.policy[0]) # Looking the other way round, policy 2 should only be in use by # endpoint 4, since that's the only endpoint in region 2 with the # correct service self._assert_correct_endpoints( self.policy[2], [self.endpoint[4]]) # Policy 1 should only be in use by endpoint 2, since that's the only # endpoint in region 1 (and region 2 below it) with the correct service self._assert_correct_endpoints( self.policy[1], [self.endpoint[2]]) # Policy 0 should be in use by endpoint 0, as well as 5 (since 5 is # of the correct service and in region 2 below it) self._assert_correct_endpoints( self.policy[0], [self.endpoint[0], self.endpoint[5]]) def test_delete_association_by_entity(self): self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self.endpoint_policy_api.delete_association_by_endpoint( self.endpoint[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) # Make sure deleting it again is silent - since this method is used # in response to notifications by the controller. self.endpoint_policy_api.delete_association_by_endpoint( self.endpoint[0]['id']) # Now try with service - ensure both combined region & service # associations and explicit service ones are removed self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id']) self.endpoint_policy_api.create_policy_association( self.policy[1]['id'], service_id=self.service[0]['id'], region_id=self.region[1]['id']) self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id']) self.endpoint_policy_api.delete_association_by_service( self.service[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[1]['id'], service_id=self.service[0]['id'], region_id=self.region[1]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], service_id=self.service[0]['id']) # Finally, check delete by region self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id']) self.endpoint_policy_api.delete_association_by_region( self.region[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], service_id=self.service[0]['id'])
48.68
79
0.607313
import uuid from six.moves import range from testtools import matchers from keystone import exception from keystone.tests import unit class PolicyAssociationTests(object): def _assert_correct_policy(self, endpoint, policy): ref = ( self.endpoint_policy_api.get_policy_for_endpoint(endpoint['id'])) self.assertEqual(policy['id'], ref['id']) def _assert_correct_endpoints(self, policy, endpoint_list): endpoint_id_list = [ep['id'] for ep in endpoint_list] endpoints = ( self.endpoint_policy_api.list_endpoints_for_policy(policy['id'])) self.assertThat(endpoints, matchers.HasLength(len(endpoint_list))) for endpoint in endpoints: self.assertIn(endpoint['id'], endpoint_id_list) def load_sample_data(self): def new_endpoint(region_id, service_id): endpoint = unit.new_endpoint_ref(interface='test', region_id=region_id, service_id=service_id, url='/url') self.endpoint.append(self.catalog_api.create_endpoint( endpoint['id'], endpoint)) self.policy = [] self.endpoint = [] self.service = [] self.region = [] parent_region_id = None for i in range(3): policy = unit.new_policy_ref() self.policy.append(self.policy_api.create_policy(policy['id'], policy)) service = unit.new_service_ref() self.service.append(self.catalog_api.create_service(service['id'], service)) region = unit.new_region_ref(parent_region_id=parent_region_id) parent_region_id = region['id'] self.region.append(self.catalog_api.create_region(region)) new_endpoint(self.region[0]['id'], self.service[0]['id']) new_endpoint(self.region[0]['id'], self.service[1]['id']) new_endpoint(self.region[1]['id'], self.service[1]['id']) new_endpoint(self.region[1]['id'], self.service[2]['id']) new_endpoint(self.region[2]['id'], self.service[2]['id']) new_endpoint(self.region[2]['id'], self.service[0]['id']) def test_policy_to_endpoint_association_crud(self): self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self.endpoint_policy_api.check_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self.endpoint_policy_api.delete_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) def test_overwriting_policy_to_endpoint_association(self): self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self.endpoint_policy_api.create_policy_association( self.policy[1]['id'], endpoint_id=self.endpoint[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self.endpoint_policy_api.check_policy_association( self.policy[1]['id'], endpoint_id=self.endpoint[0]['id']) def test_invalid_policy_to_endpoint_association(self): self.assertRaises(exception.InvalidPolicyAssociation, self.endpoint_policy_api.create_policy_association, self.policy[0]['id']) self.assertRaises(exception.InvalidPolicyAssociation, self.endpoint_policy_api.create_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'], region_id=self.region[0]['id']) self.assertRaises(exception.InvalidPolicyAssociation, self.endpoint_policy_api.create_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'], service_id=self.service[0]['id']) self.assertRaises(exception.InvalidPolicyAssociation, self.endpoint_policy_api.create_policy_association, self.policy[0]['id'], region_id=self.region[0]['id']) def test_policy_to_explicit_endpoint_association(self): self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self._assert_correct_policy(self.endpoint[0], self.policy[0]) self._assert_correct_endpoints(self.policy[0], [self.endpoint[0]]) self.assertRaises(exception.NotFound, self.endpoint_policy_api.get_policy_for_endpoint, uuid.uuid4().hex) def test_policy_to_service_association(self): self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id']) self.endpoint_policy_api.create_policy_association( self.policy[1]['id'], service_id=self.service[1]['id']) self._assert_correct_policy(self.endpoint[0], self.policy[0]) self._assert_correct_policy(self.endpoint[5], self.policy[0]) self._assert_correct_endpoints( self.policy[0], [self.endpoint[0], self.endpoint[5]]) self._assert_correct_policy(self.endpoint[1], self.policy[1]) self._assert_correct_policy(self.endpoint[2], self.policy[1]) self._assert_correct_endpoints( self.policy[1], [self.endpoint[1], self.endpoint[2]]) def test_policy_to_region_and_service_association(self): self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id']) self.endpoint_policy_api.create_policy_association( self.policy[1]['id'], service_id=self.service[1]['id'], region_id=self.region[1]['id']) self.endpoint_policy_api.create_policy_association( self.policy[2]['id'], service_id=self.service[2]['id'], region_id=self.region[2]['id']) self._assert_correct_policy(self.endpoint[0], self.policy[0]) self._assert_correct_policy(self.endpoint[5], self.policy[0]) # correct service self._assert_correct_endpoints( self.policy[2], [self.endpoint[4]]) # Policy 1 should only be in use by endpoint 2, since that's the only self._assert_correct_endpoints( self.policy[1], [self.endpoint[2]]) self._assert_correct_endpoints( self.policy[0], [self.endpoint[0], self.endpoint[5]]) def test_delete_association_by_entity(self): self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self.endpoint_policy_api.delete_association_by_endpoint( self.endpoint[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self.endpoint_policy_api.delete_association_by_endpoint( self.endpoint[0]['id']) self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id']) self.endpoint_policy_api.create_policy_association( self.policy[1]['id'], service_id=self.service[0]['id'], region_id=self.region[1]['id']) self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id']) self.endpoint_policy_api.delete_association_by_service( self.service[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[1]['id'], service_id=self.service[0]['id'], region_id=self.region[1]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], service_id=self.service[0]['id']) self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id']) self.endpoint_policy_api.delete_association_by_region( self.region[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], service_id=self.service[0]['id'])
true
true
f72cae2c89049bcff133dee51ea839c617f5fd7f
372
py
Python
supervisely/src/mask_image.py
supervisely-ecosystem/ritm-interactive-segmentation
c86df3c7c95ce20ffd3c9cc5e3f07abe8c162f4c
[ "MIT" ]
1
2022-03-25T14:36:18.000Z
2022-03-25T14:36:18.000Z
supervisely/src/mask_image.py
supervisely-ecosystem/ritm-interactive-segmentation
c86df3c7c95ce20ffd3c9cc5e3f07abe8c162f4c
[ "MIT" ]
null
null
null
supervisely/src/mask_image.py
supervisely-ecosystem/ritm-interactive-segmentation
c86df3c7c95ce20ffd3c9cc5e3f07abe8c162f4c
[ "MIT" ]
1
2022-03-17T06:39:39.000Z
2022-03-17T06:39:39.000Z
import sly_globals as g def get_mask_from_clicks(image_np, clicks_list): g.CONTROLLER.set_image(image_np) for click in clicks_list: g.CONTROLLER.add_click(click.coords[1], click.coords[0], click.is_positive) try: res_mask = g.CONTROLLER.result_mask except Exception(f"Couldn't process image"): res_mask = None return res_mask
28.615385
83
0.712366
import sly_globals as g def get_mask_from_clicks(image_np, clicks_list): g.CONTROLLER.set_image(image_np) for click in clicks_list: g.CONTROLLER.add_click(click.coords[1], click.coords[0], click.is_positive) try: res_mask = g.CONTROLLER.result_mask except Exception(f"Couldn't process image"): res_mask = None return res_mask
true
true
f72caed168c08d84dcc3dd7cb27e247c5df1716d
348
py
Python
Algorithms/kadane_algorithm/python-kadane-algorithm-O(n).py
omega07/Yet_Another_Algorithms_Repository
7c967e115e96b3c07010a3bf94ca1cdb898a6e82
[ "MIT" ]
33
2019-10-14T19:19:43.000Z
2021-11-30T13:40:20.000Z
Algorithms/kadane_algorithm/python-kadane-algorithm-O(n).py
omega07/Yet_Another_Algorithms_Repository
7c967e115e96b3c07010a3bf94ca1cdb898a6e82
[ "MIT" ]
317
2019-10-14T18:35:22.000Z
2020-03-03T17:45:06.000Z
Algorithms/kadane_algorithm/python-kadane-algorithm-O(n).py
omega07/Yet_Another_Algorithms_Repository
7c967e115e96b3c07010a3bf94ca1cdb898a6e82
[ "MIT" ]
332
2019-10-14T18:39:08.000Z
2021-09-02T16:19:11.000Z
def maxSubArraySum(a,size): max_so_far =a[0] curr_max = a[0] for i in range(1,size): curr_max = max(a[i], curr_max + a[i]) max_so_far = max(max_so_far,curr_max) return max_so_far a = [-2, -3, 4, -1, -2, 1, 5, -3] print("Maximum contiguous sum is" , maxSubArraySum(a,len(a)))
23.2
61
0.531609
def maxSubArraySum(a,size): max_so_far =a[0] curr_max = a[0] for i in range(1,size): curr_max = max(a[i], curr_max + a[i]) max_so_far = max(max_so_far,curr_max) return max_so_far a = [-2, -3, 4, -1, -2, 1, 5, -3] print("Maximum contiguous sum is" , maxSubArraySum(a,len(a)))
true
true
f72caeec9c99f7dddcbe170095eba9f6591f69ab
910
py
Python
dwavebinarycsp/package_info.py
JoelPasvolsky/dwavebinarycsp
ef260bff6d606d8176b287bb6e27a05d6f72de9f
[ "Apache-2.0" ]
null
null
null
dwavebinarycsp/package_info.py
JoelPasvolsky/dwavebinarycsp
ef260bff6d606d8176b287bb6e27a05d6f72de9f
[ "Apache-2.0" ]
null
null
null
dwavebinarycsp/package_info.py
JoelPasvolsky/dwavebinarycsp
ef260bff6d606d8176b287bb6e27a05d6f72de9f
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 D-Wave Systems Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ================================================================================================ __version__ = '0.1.3' __author__ = 'D-Wave Systems Inc.' __authoremail__ = 'acondello@dwavesys.com' __description__ = 'Solves constraints satisfaction problems with binary quadratic model samplers'
43.333333
98
0.661538
__version__ = '0.1.3' __author__ = 'D-Wave Systems Inc.' __authoremail__ = 'acondello@dwavesys.com' __description__ = 'Solves constraints satisfaction problems with binary quadratic model samplers'
true
true
f72cb12504a8487b6ebb6f694946918cd78f6d7b
267
py
Python
output/models/nist_data/atomic/integer/schema_instance/nistschema_sv_iv_atomic_integer_total_digits_2_xsd/__init__.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
1
2021-08-14T17:59:21.000Z
2021-08-14T17:59:21.000Z
output/models/nist_data/atomic/integer/schema_instance/nistschema_sv_iv_atomic_integer_total_digits_2_xsd/__init__.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
4
2020-02-12T21:30:44.000Z
2020-04-15T20:06:46.000Z
output/models/nist_data/atomic/integer/schema_instance/nistschema_sv_iv_atomic_integer_total_digits_2_xsd/__init__.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
null
null
null
from output.models.nist_data.atomic.integer.schema_instance.nistschema_sv_iv_atomic_integer_total_digits_2_xsd.nistschema_sv_iv_atomic_integer_total_digits_2 import NistschemaSvIvAtomicIntegerTotalDigits2 __all__ = [ "NistschemaSvIvAtomicIntegerTotalDigits2", ]
44.5
204
0.898876
from output.models.nist_data.atomic.integer.schema_instance.nistschema_sv_iv_atomic_integer_total_digits_2_xsd.nistschema_sv_iv_atomic_integer_total_digits_2 import NistschemaSvIvAtomicIntegerTotalDigits2 __all__ = [ "NistschemaSvIvAtomicIntegerTotalDigits2", ]
true
true
f72cb1f015fcd1360def9463bd8e6047da25b737
11,947
py
Python
examples/ex_icub_trust_cognitive_architecture/endorsement.py
riccardobrue/SOM-example-1
8a977e73844f9206ee1704be577f8a7521d2b306
[ "MIT" ]
null
null
null
examples/ex_icub_trust_cognitive_architecture/endorsement.py
riccardobrue/SOM-example-1
8a977e73844f9206ee1704be577f8a7521d2b306
[ "MIT" ]
null
null
null
examples/ex_icub_trust_cognitive_architecture/endorsement.py
riccardobrue/SOM-example-1
8a977e73844f9206ee1704be577f8a7521d2b306
[ "MIT" ]
1
2021-03-16T16:02:16.000Z
2021-03-16T16:02:16.000Z
#!/usr/bin/python # The MIT License (MIT) # # Copyright (c) 2017 Massimiliano Patacchiola # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. #ATTENTION: to work it requires to lunch the iCub world: # yarpserver # ./iCub_SIM # ./iKinGazeCtrl --from configSim.ini # yarpdev --device opencv_grabber # yarp connect /grabber /icubSim/texture/screen # # For the cartesian controller of the left arm # ./simCartesianControl # ./iKinCartesianSolver --context simCartesianControl --part left_arm # PocketSphinx valid Commands are: # The prefix [iCub] or [hey] is optional # learn <object name> # this is a <object name> # forget <object name> # what is this # find the <object name> # stop detection # look at me from speech_recognition import SpeechRecognizer from icub import iCub import cv2 import random import time import os import sys def initialise(): # Initialise the speech recognition engine and the iCub controller my_speech = SpeechRecognizer( hmm_path="/home/massimiliano/pyERA/examples/ex_icub_trust_cognitive_architecture/sphinx/model/en-us/en-us", language_model_path="/home/massimiliano/pyERA/examples/ex_icub_trust_cognitive_architecture/sphinx/model/en-us/en-us.lm.bin", dictionary_path="/home/massimiliano/pyERA/examples/ex_icub_trust_cognitive_architecture/sphinx/data/icub.dic", grammar_path="/home/massimiliano/pyERA/examples/ex_icub_trust_cognitive_architecture/sphinx/data/icub.gram", rule_name='icub.basicCmd', fsg_name="icub") # iCub initialization my_icub = iCub(icub_root='/icubSim') # Load acapela configuration from file my_icub.set_acapela_credential("./acapela_config.csv") account_login, application_login, application_password, service_url = my_icub.get_acapela_credential() print("[ACAPELA]Acapela configuration parameters:") print("Account Login: " + str(account_login)) print("Application Login: " + str(application_login)) print("Account Password: " + str(application_password)) print("Service URL: " + str(service_url)) print("") # Return the objects return my_speech, my_icub def speech_to_action(speech_string): """ Take the sentence from the speech recognition and plan an action <action> = (learn new object | watch | inspect | find | search | look | what | start | stop); <target> = (ball | cup | book | dog | chair | table | at me | is this | movement detection); @param speech_string: @return: """ if speech_string.find('learn') > -1 or speech_string.find('this is a') > -1: response_list = ['I like to learn! This is a ', 'Ok, this is a ', 'I learned a new object, ', ''] object_name = speech_string.rsplit(None, 1)[-1] response_string = response_list[random.randint(0, len(response_list)-1)] + object_name state = 'learn' elif speech_string.find('what is this') > -1: response_string = "" state = 'what' elif speech_string.find('find the') > -1 or speech_string.find('search the') > -1: object_name = speech_string.rsplit(None, 1)[-1] object_path = "./objects/" + str(object_name) + ".png" if not os.path.isfile(object_path): print("[SPEECH-TO-ACTION][WARNING] " + "this file does not exist: " + str(object_path) + "\n") response_string = "Sorry I do not know this object!" state = 'key' else: response_list = ["Ok, now I'm looking for a ", 'Ok I will track the ', 'Ready to track the '] response_string = response_list[random.randint(0, len(response_list)-1)] + object_name state = 'movedetect on' elif speech_string.find('stop detection') > -1: response_list = ["Ok, no more movements", 'Ok I will stop it', "I'm gonna stop it!"] response_string = response_list[random.randint(0, len(response_list)-1)] state = 'movedetect off' elif speech_string.find('look at me') > -1: response_list = ["Ok!", 'Sure!'] response_string = response_list[random.randint(0, len(response_list)-1)] state = 'look' else: response_list = ["Sorry I did not understand.", 'Sorry, can you repeat?', 'Repeat again please.'] response_string = response_list[random.randint(0,len(response_list)-1)] state = 'key' return response_string, state def main(): inputfile = '' outputfile = '' informant_name = '' if len(sys.argv) == 1 or len(sys.argv) > 4: print("python familiarization.py <inputfile> <outputfilename> <informant_name>") elif len(sys.argv) == 4: inputfile = sys.argv[1] outputfile = sys.argv[2] informant_name = sys.argv[3] print("Input file: " + str(inputfile)) print("Output file: " + str(outputfile)) print("Informant Name: " + str(informant_name)) STATE = 'show' speech_string = "" fovea_offset = 40 # side of the fovea square my_speech, my_icub = initialise() is_connected = my_icub.check_connection() if is_connected: print("[STATE Init] intenet connection present.") else: print("[STATE Init][ERROR] internet connection not present!!!") my_icub.say_something(text="I'm ready!") cv2.namedWindow('main') while True: if STATE == 'record': #image = my_icub.return_left_camera_image(mode='BGR') my_speech.record_audio("/tmp/audio.wav", seconds=3, extension='wav', harddev='3,0') raw_file_path = my_speech.convert_to_raw(file_name="/tmp/audio.wav", file_name_raw="/tmp/audio.raw", extension='wav') speech_string = my_speech.return_text_from_audio("/tmp/audio.raw") print("[STATE " + str(STATE) + "] " + "Speech recognised: " + speech_string) STATE = 'understand' elif STATE == 'understand': response_string, local_state = speech_to_action(speech_string) print("[STATE " + str(STATE) + "] " + "Speech recognised: " + speech_string) print("[STATE " + str(STATE) + "] " + "Next state: " + local_state) my_icub.say_something(text=response_string) STATE = local_state elif STATE == 'show': left_image = my_icub.return_left_camera_image(mode='BGR') img_cx = int(left_image.shape[1] / 2) img_cy = int(left_image.shape[0] / 2) cv2.rectangle(left_image, (img_cx-fovea_offset, img_cy-fovea_offset), (img_cx+fovea_offset, img_cy+fovea_offset), (0, 255, 0), 1) cv2.imshow('main', left_image) STATE = 'key' elif STATE == 'movedetect on': object_name = response_string.rsplit(None, 1)[-1] print("[STATE " + str(STATE) + "] " + "start tracking of: " + str(object_name) + "\n") object_path = "./objects/" + str(object_name) + ".png" if my_icub.is_movement_detection(): my_icub.stop_movement_detection() time.sleep(0.5) my_icub.start_movement_detection(template_path=object_path, delay=1.0) else: my_icub.start_movement_detection(template_path=object_path, delay=1.0) STATE = 'key' elif STATE == 'movedetect off': print("[STATE " + str(STATE) + "] " + "stop movement tracking" + "\n") my_icub.stop_movement_detection() time.sleep(0.5) my_icub.reset_head_pose() STATE = 'key' elif STATE == 'look': print("[STATE " + str(STATE) + "] " + "gaze reset" + "\n") my_icub.reset_head_pose() STATE = 'key' elif STATE == 'learn': object_name = response_string.rsplit(None, 1)[-1] print("[STATE " + str(STATE) + "] " + "Learning new object: " + object_name + "\n") left_image = my_icub.return_left_camera_image(mode='BGR') #left_image = image img_cx = int(left_image.shape[1] / 2) img_cy = int(left_image.shape[0] / 2) left_image = left_image[img_cy-fovea_offset:img_cy+fovea_offset, img_cx-fovea_offset:img_cx+fovea_offset] my_icub.learn_object_from_histogram(left_image, object_name) print("[STATE " + str(STATE) + "] " + "Writing new template in ./objects/" + object_name + ".png" + "\n") cv2.imwrite('./objects/' + str(object_name) + '.png', left_image) STATE = 'key' elif STATE == 'what': print("[STATE " + str(STATE) + "] " + "Recalling object from memory..." + "\n") left_image = my_icub.return_left_camera_image(mode='BGR') #left_image = image img_cx = int(left_image.shape[1] / 2) img_cy = int(left_image.shape[0] / 2) left_image = left_image[img_cy-25:img_cy+25, img_cx-25:img_cx+25] object_name = my_icub.recall_object_from_histogram(left_image) if object_name is None: my_icub.say_something("My memory is empty. Teach me something!") else: print("[STATE " + str(STATE) + "] " + "Name returned: " + str(object_name) + "\n") response_list = ["Let me see. I think this is a ", "Let me think. It's a ", "Just a second. It may be a ", "It should be a "] response_string = response_list[random.randint(0, len(response_list) - 1)] my_icub.say_something(response_string + str(object_name)) STATE = 'key' elif STATE == 'key': key_pressed = cv2.waitKey(10) # delay in millisecond if key_pressed==113: #q=QUIT print("[STATE " + str(STATE) + "] " + "Button (q)uit pressed..." + "\n") STATE = "close" elif key_pressed==110: #n= print("[STATE " + str(STATE) + "] " + "Button (n) pressed..." + "\n") elif key_pressed==102: #f= print("[STATE " + str(STATE) + "] " + "Button (f) pressed..." + "\n") elif key_pressed == 114: # r=RECORD print("[STATE " + str(STATE) + "] " + "Button (r)ecord pressed..." + "\n") STATE = "record" else: STATE = 'show' elif STATE == 'close': my_icub.say_something(text="See you soon, bye bye!") my_icub.stop_movement_detection() my_icub.close() cv2.destroyAllWindows() break if __name__ == "__main__": main()
44.913534
133
0.601657
from speech_recognition import SpeechRecognizer from icub import iCub import cv2 import random import time import os import sys def initialise(): my_speech = SpeechRecognizer( hmm_path="/home/massimiliano/pyERA/examples/ex_icub_trust_cognitive_architecture/sphinx/model/en-us/en-us", language_model_path="/home/massimiliano/pyERA/examples/ex_icub_trust_cognitive_architecture/sphinx/model/en-us/en-us.lm.bin", dictionary_path="/home/massimiliano/pyERA/examples/ex_icub_trust_cognitive_architecture/sphinx/data/icub.dic", grammar_path="/home/massimiliano/pyERA/examples/ex_icub_trust_cognitive_architecture/sphinx/data/icub.gram", rule_name='icub.basicCmd', fsg_name="icub") my_icub = iCub(icub_root='/icubSim') my_icub.set_acapela_credential("./acapela_config.csv") account_login, application_login, application_password, service_url = my_icub.get_acapela_credential() print("[ACAPELA]Acapela configuration parameters:") print("Account Login: " + str(account_login)) print("Application Login: " + str(application_login)) print("Account Password: " + str(application_password)) print("Service URL: " + str(service_url)) print("") return my_speech, my_icub def speech_to_action(speech_string): if speech_string.find('learn') > -1 or speech_string.find('this is a') > -1: response_list = ['I like to learn! This is a ', 'Ok, this is a ', 'I learned a new object, ', ''] object_name = speech_string.rsplit(None, 1)[-1] response_string = response_list[random.randint(0, len(response_list)-1)] + object_name state = 'learn' elif speech_string.find('what is this') > -1: response_string = "" state = 'what' elif speech_string.find('find the') > -1 or speech_string.find('search the') > -1: object_name = speech_string.rsplit(None, 1)[-1] object_path = "./objects/" + str(object_name) + ".png" if not os.path.isfile(object_path): print("[SPEECH-TO-ACTION][WARNING] " + "this file does not exist: " + str(object_path) + "\n") response_string = "Sorry I do not know this object!" state = 'key' else: response_list = ["Ok, now I'm looking for a ", 'Ok I will track the ', 'Ready to track the '] response_string = response_list[random.randint(0, len(response_list)-1)] + object_name state = 'movedetect on' elif speech_string.find('stop detection') > -1: response_list = ["Ok, no more movements", 'Ok I will stop it', "I'm gonna stop it!"] response_string = response_list[random.randint(0, len(response_list)-1)] state = 'movedetect off' elif speech_string.find('look at me') > -1: response_list = ["Ok!", 'Sure!'] response_string = response_list[random.randint(0, len(response_list)-1)] state = 'look' else: response_list = ["Sorry I did not understand.", 'Sorry, can you repeat?', 'Repeat again please.'] response_string = response_list[random.randint(0,len(response_list)-1)] state = 'key' return response_string, state def main(): inputfile = '' outputfile = '' informant_name = '' if len(sys.argv) == 1 or len(sys.argv) > 4: print("python familiarization.py <inputfile> <outputfilename> <informant_name>") elif len(sys.argv) == 4: inputfile = sys.argv[1] outputfile = sys.argv[2] informant_name = sys.argv[3] print("Input file: " + str(inputfile)) print("Output file: " + str(outputfile)) print("Informant Name: " + str(informant_name)) STATE = 'show' speech_string = "" fovea_offset = 40 my_speech, my_icub = initialise() is_connected = my_icub.check_connection() if is_connected: print("[STATE Init] intenet connection present.") else: print("[STATE Init][ERROR] internet connection not present!!!") my_icub.say_something(text="I'm ready!") cv2.namedWindow('main') while True: if STATE == 'record': #image = my_icub.return_left_camera_image(mode='BGR') my_speech.record_audio("/tmp/audio.wav", seconds=3, extension='wav', harddev='3,0') raw_file_path = my_speech.convert_to_raw(file_name="/tmp/audio.wav", file_name_raw="/tmp/audio.raw", extension='wav') speech_string = my_speech.return_text_from_audio("/tmp/audio.raw") print("[STATE " + str(STATE) + "] " + "Speech recognised: " + speech_string) STATE = 'understand' elif STATE == 'understand': response_string, local_state = speech_to_action(speech_string) print("[STATE " + str(STATE) + "] " + "Speech recognised: " + speech_string) print("[STATE " + str(STATE) + "] " + "Next state: " + local_state) my_icub.say_something(text=response_string) STATE = local_state elif STATE == 'show': left_image = my_icub.return_left_camera_image(mode='BGR') img_cx = int(left_image.shape[1] / 2) img_cy = int(left_image.shape[0] / 2) cv2.rectangle(left_image, (img_cx-fovea_offset, img_cy-fovea_offset), (img_cx+fovea_offset, img_cy+fovea_offset), (0, 255, 0), 1) cv2.imshow('main', left_image) STATE = 'key' elif STATE == 'movedetect on': object_name = response_string.rsplit(None, 1)[-1] print("[STATE " + str(STATE) + "] " + "start tracking of: " + str(object_name) + "\n") object_path = "./objects/" + str(object_name) + ".png" if my_icub.is_movement_detection(): my_icub.stop_movement_detection() time.sleep(0.5) my_icub.start_movement_detection(template_path=object_path, delay=1.0) else: my_icub.start_movement_detection(template_path=object_path, delay=1.0) STATE = 'key' elif STATE == 'movedetect off': print("[STATE " + str(STATE) + "] " + "stop movement tracking" + "\n") my_icub.stop_movement_detection() time.sleep(0.5) my_icub.reset_head_pose() STATE = 'key' elif STATE == 'look': print("[STATE " + str(STATE) + "] " + "gaze reset" + "\n") my_icub.reset_head_pose() STATE = 'key' elif STATE == 'learn': object_name = response_string.rsplit(None, 1)[-1] print("[STATE " + str(STATE) + "] " + "Learning new object: " + object_name + "\n") left_image = my_icub.return_left_camera_image(mode='BGR') #left_image = image img_cx = int(left_image.shape[1] / 2) img_cy = int(left_image.shape[0] / 2) left_image = left_image[img_cy-fovea_offset:img_cy+fovea_offset, img_cx-fovea_offset:img_cx+fovea_offset] my_icub.learn_object_from_histogram(left_image, object_name) print("[STATE " + str(STATE) + "] " + "Writing new template in ./objects/" + object_name + ".png" + "\n") cv2.imwrite('./objects/' + str(object_name) + '.png', left_image) STATE = 'key' elif STATE == 'what': print("[STATE " + str(STATE) + "] " + "Recalling object from memory..." + "\n") left_image = my_icub.return_left_camera_image(mode='BGR') #left_image = image img_cx = int(left_image.shape[1] / 2) img_cy = int(left_image.shape[0] / 2) left_image = left_image[img_cy-25:img_cy+25, img_cx-25:img_cx+25] object_name = my_icub.recall_object_from_histogram(left_image) if object_name is None: my_icub.say_something("My memory is empty. Teach me something!") else: print("[STATE " + str(STATE) + "] " + "Name returned: " + str(object_name) + "\n") response_list = ["Let me see. I think this is a ", "Let me think. It's a ", "Just a second. It may be a ", "It should be a "] response_string = response_list[random.randint(0, len(response_list) - 1)] my_icub.say_something(response_string + str(object_name)) STATE = 'key' elif STATE == 'key': key_pressed = cv2.waitKey(10) if key_pressed==113: print("[STATE " + str(STATE) + "] " + "Button (q)uit pressed..." + "\n") STATE = "close" elif key_pressed==110: print("[STATE " + str(STATE) + "] " + "Button (n) pressed..." + "\n") elif key_pressed==102: print("[STATE " + str(STATE) + "] " + "Button (f) pressed..." + "\n") elif key_pressed == 114: print("[STATE " + str(STATE) + "] " + "Button (r)ecord pressed..." + "\n") STATE = "record" else: STATE = 'show' elif STATE == 'close': my_icub.say_something(text="See you soon, bye bye!") my_icub.stop_movement_detection() my_icub.close() cv2.destroyAllWindows() break if __name__ == "__main__": main()
true
true
f72cb22b484e4768378d4a3b0201733382c540d7
2,332
py
Python
tests/integration/test_sdv.py
joanvaquer/SDV
83e4fdf0ff72e6c5b72cfc8c6ec9584dbd34de28
[ "MIT" ]
null
null
null
tests/integration/test_sdv.py
joanvaquer/SDV
83e4fdf0ff72e6c5b72cfc8c6ec9584dbd34de28
[ "MIT" ]
null
null
null
tests/integration/test_sdv.py
joanvaquer/SDV
83e4fdf0ff72e6c5b72cfc8c6ec9584dbd34de28
[ "MIT" ]
null
null
null
from sdv import SDV, load_demo def test_sdv(): metadata, tables = load_demo(metadata=True) sdv = SDV() sdv.fit(metadata, tables) # Sample all sampled = sdv.sample_all() assert set(sampled.keys()) == {'users', 'sessions', 'transactions'} assert len(sampled['users']) == 10 # Sample with children sampled = sdv.sample('users', reset_primary_keys=True) assert set(sampled.keys()) == {'users', 'sessions', 'transactions'} assert len(sampled['users']) == 10 # Sample without children users = sdv.sample('users', sample_children=False) assert users.shape == tables['users'].shape assert set(users.columns) == set(tables['users'].columns) sessions = sdv.sample('sessions', sample_children=False) assert sessions.shape == tables['sessions'].shape assert set(sessions.columns) == set(tables['sessions'].columns) transactions = sdv.sample('transactions', sample_children=False) assert transactions.shape == tables['transactions'].shape assert set(transactions.columns) == set(tables['transactions'].columns) def test_sdv_multiparent(): metadata, tables = load_demo('got_families', metadata=True) sdv = SDV() sdv.fit(metadata, tables) # Sample all sampled = sdv.sample_all() assert set(sampled.keys()) == {'characters', 'families', 'character_families'} assert len(sampled['characters']) == 7 # Sample with children sampled = sdv.sample('characters', reset_primary_keys=True) assert set(sampled.keys()) == {'characters', 'character_families'} assert len(sampled['characters']) == 7 assert 'family_id' in sampled['character_families'] # Sample without children characters = sdv.sample('characters', sample_children=False) assert characters.shape == tables['characters'].shape assert set(characters.columns) == set(tables['characters'].columns) families = sdv.sample('families', sample_children=False) assert families.shape == tables['families'].shape assert set(families.columns) == set(tables['families'].columns) character_families = sdv.sample('character_families', sample_children=False) assert character_families.shape == tables['character_families'].shape assert set(character_families.columns) == set(tables['character_families'].columns)
31.945205
87
0.694683
from sdv import SDV, load_demo def test_sdv(): metadata, tables = load_demo(metadata=True) sdv = SDV() sdv.fit(metadata, tables) sampled = sdv.sample_all() assert set(sampled.keys()) == {'users', 'sessions', 'transactions'} assert len(sampled['users']) == 10 sampled = sdv.sample('users', reset_primary_keys=True) assert set(sampled.keys()) == {'users', 'sessions', 'transactions'} assert len(sampled['users']) == 10 users = sdv.sample('users', sample_children=False) assert users.shape == tables['users'].shape assert set(users.columns) == set(tables['users'].columns) sessions = sdv.sample('sessions', sample_children=False) assert sessions.shape == tables['sessions'].shape assert set(sessions.columns) == set(tables['sessions'].columns) transactions = sdv.sample('transactions', sample_children=False) assert transactions.shape == tables['transactions'].shape assert set(transactions.columns) == set(tables['transactions'].columns) def test_sdv_multiparent(): metadata, tables = load_demo('got_families', metadata=True) sdv = SDV() sdv.fit(metadata, tables) sampled = sdv.sample_all() assert set(sampled.keys()) == {'characters', 'families', 'character_families'} assert len(sampled['characters']) == 7 sampled = sdv.sample('characters', reset_primary_keys=True) assert set(sampled.keys()) == {'characters', 'character_families'} assert len(sampled['characters']) == 7 assert 'family_id' in sampled['character_families'] characters = sdv.sample('characters', sample_children=False) assert characters.shape == tables['characters'].shape assert set(characters.columns) == set(tables['characters'].columns) families = sdv.sample('families', sample_children=False) assert families.shape == tables['families'].shape assert set(families.columns) == set(tables['families'].columns) character_families = sdv.sample('character_families', sample_children=False) assert character_families.shape == tables['character_families'].shape assert set(character_families.columns) == set(tables['character_families'].columns)
true
true
f72cb255bbd9dbaa14f82003586431b14c8cdf93
340
py
Python
WebApp/admin.py
divij-pherwani/PythonProject
3ba262be580022cffc840f4cf967363eb7d3417b
[ "MIT" ]
null
null
null
WebApp/admin.py
divij-pherwani/PythonProject
3ba262be580022cffc840f4cf967363eb7d3417b
[ "MIT" ]
null
null
null
WebApp/admin.py
divij-pherwani/PythonProject
3ba262be580022cffc840f4cf967363eb7d3417b
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import StudentDetail, UniversityDetail, CourseDetail, CourseName, ApplicationDetail admin.site.register(StudentDetail) admin.site.register(UniversityDetail) admin.site.register(CourseDetail) admin.site.register(CourseName) admin.site.register(ApplicationDetail) # Register your models here.
28.333333
96
0.844118
from django.contrib import admin from .models import StudentDetail, UniversityDetail, CourseDetail, CourseName, ApplicationDetail admin.site.register(StudentDetail) admin.site.register(UniversityDetail) admin.site.register(CourseDetail) admin.site.register(CourseName) admin.site.register(ApplicationDetail)
true
true
f72cb2583a8f94f5dbbfd81abcd00d5e5a7903fa
2,490
py
Python
serpcord/models/guild.py
PgBiel/serpcord
482736dc691027417edcd6500cdfbf9053f92b63
[ "MIT" ]
null
null
null
serpcord/models/guild.py
PgBiel/serpcord
482736dc691027417edcd6500cdfbf9053f92b63
[ "MIT" ]
null
null
null
serpcord/models/guild.py
PgBiel/serpcord
482736dc691027417edcd6500cdfbf9053f92b63
[ "MIT" ]
null
null
null
import typing import datetime from typing import Mapping, Any, Optional, Iterable, List from .model_abc import JsonAPIModel from .snowflake import Snowflake from .user import User from .enums import PermissionFlags from .permissions import Role from serpcord.utils.model import _init_model_from_mapping_json_data if typing.TYPE_CHECKING: from serpcord.botclient import BotClient class GuildMember(JsonAPIModel[Mapping[str, Any]]): # TODO: Optional[Guild] - make sure the guild itself adds itself def __init__(self, client: "BotClient", user: User, # TODO: docs + slots *, nick: Optional[str] = None, guild_avatar_hash: Optional[str] = None, role_ids: Iterable[Snowflake], roles: Iterable[Role], joined_at: datetime.datetime, premium_since: Optional[datetime.datetime] = None, is_deaf: bool, is_muted: bool, is_pending: bool = False, permissions: Optional[PermissionFlags] = None, communication_disabled_until: Optional[datetime.datetime] = None): self.client: "BotClient" = client self.user: User = user # NOTE: Must be injected in MESSAGE_CREATE / MESSAGE_UPDATE events (not provided by API) self.nick: Optional[str] = str(nick) if nick is not None else None self.guild_avatar_hash: Optional[str] = str(guild_avatar_hash) if guild_avatar_hash is not None else None self.role_ids: List[Snowflake] = list(role_ids) self.joined_at: datetime.datetime = joined_at self.premium_since: Optional[datetime.datetime] = premium_since self.is_deaf = bool(is_deaf) self.is_muted = bool(is_muted) self.is_pending = bool(is_pending) self.permissions = PermissionFlags(permissions) if permissions is not None else None self.communication_disabled_until: Optional[datetime.datetime] = communication_disabled_until @property def id(self) -> Snowflake: return self.user.id @property def username(self) -> str: return self.user.username @property def display_name(self) -> str: return self.nick or self.username @classmethod def _from_json_data(cls, client: "BotClient", json_data: Mapping[str, Any]): return _init_model_from_mapping_json_data(cls, client, json_data, rename=dict( avatar="guild_avatar_hash", roles="role_ids", deaf="is_deaf", muted="is_muted", pending="is_pending" ), type_check_types=True)
46.111111
120
0.701606
import typing import datetime from typing import Mapping, Any, Optional, Iterable, List from .model_abc import JsonAPIModel from .snowflake import Snowflake from .user import User from .enums import PermissionFlags from .permissions import Role from serpcord.utils.model import _init_model_from_mapping_json_data if typing.TYPE_CHECKING: from serpcord.botclient import BotClient class GuildMember(JsonAPIModel[Mapping[str, Any]]): def __init__(self, client: "BotClient", user: User, *, nick: Optional[str] = None, guild_avatar_hash: Optional[str] = None, role_ids: Iterable[Snowflake], roles: Iterable[Role], joined_at: datetime.datetime, premium_since: Optional[datetime.datetime] = None, is_deaf: bool, is_muted: bool, is_pending: bool = False, permissions: Optional[PermissionFlags] = None, communication_disabled_until: Optional[datetime.datetime] = None): self.client: "BotClient" = client self.user: User = user self.nick: Optional[str] = str(nick) if nick is not None else None self.guild_avatar_hash: Optional[str] = str(guild_avatar_hash) if guild_avatar_hash is not None else None self.role_ids: List[Snowflake] = list(role_ids) self.joined_at: datetime.datetime = joined_at self.premium_since: Optional[datetime.datetime] = premium_since self.is_deaf = bool(is_deaf) self.is_muted = bool(is_muted) self.is_pending = bool(is_pending) self.permissions = PermissionFlags(permissions) if permissions is not None else None self.communication_disabled_until: Optional[datetime.datetime] = communication_disabled_until @property def id(self) -> Snowflake: return self.user.id @property def username(self) -> str: return self.user.username @property def display_name(self) -> str: return self.nick or self.username @classmethod def _from_json_data(cls, client: "BotClient", json_data: Mapping[str, Any]): return _init_model_from_mapping_json_data(cls, client, json_data, rename=dict( avatar="guild_avatar_hash", roles="role_ids", deaf="is_deaf", muted="is_muted", pending="is_pending" ), type_check_types=True)
true
true
f72cb27896211cd7a2fb7552b1e8abcbeb59a726
713
py
Python
dns/migrations/0016_autozones_path.py
prorevizor/noc
37e44b8afc64318b10699c06a1138eee9e7d6a4e
[ "BSD-3-Clause" ]
84
2017-10-22T11:01:39.000Z
2022-02-27T03:43:48.000Z
dns/migrations/0016_autozones_path.py
prorevizor/noc
37e44b8afc64318b10699c06a1138eee9e7d6a4e
[ "BSD-3-Clause" ]
22
2017-12-11T07:21:56.000Z
2021-09-23T02:53:50.000Z
dns/migrations/0016_autozones_path.py
prorevizor/noc
37e44b8afc64318b10699c06a1138eee9e7d6a4e
[ "BSD-3-Clause" ]
23
2017-12-06T06:59:52.000Z
2022-02-24T00:02:25.000Z
# ---------------------------------------------------------------------- # autozones_path # ---------------------------------------------------------------------- # Copyright (C) 2007-2019 The NOC Project # See LICENSE for details # ---------------------------------------------------------------------- # Third-party modules from django.db import models # NOC modules from noc.core.migration.base import BaseMigration class Migration(BaseMigration): def migrate(self): self.db.add_column( "dns_dnsserver", "autozones_path", models.CharField( "Autozones path", max_length=256, blank=True, null=True, default="autozones" ), )
29.708333
92
0.4446
from django.db import models from noc.core.migration.base import BaseMigration class Migration(BaseMigration): def migrate(self): self.db.add_column( "dns_dnsserver", "autozones_path", models.CharField( "Autozones path", max_length=256, blank=True, null=True, default="autozones" ), )
true
true
f72cb379e5c099506c5177d3a7d4578f63d14794
8,765
py
Python
models/resnet_cifar_quant.py
mengjian0502/GroupLasso_Quant
1c54c940739babf86e362ffc57752c2aa4c8986d
[ "MIT" ]
null
null
null
models/resnet_cifar_quant.py
mengjian0502/GroupLasso_Quant
1c54c940739babf86e362ffc57752c2aa4c8986d
[ "MIT" ]
null
null
null
models/resnet_cifar_quant.py
mengjian0502/GroupLasso_Quant
1c54c940739babf86e362ffc57752c2aa4c8986d
[ "MIT" ]
null
null
null
""" ResNet on CIFAR10 """ import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import init from .quant import ClippedReLU, int_conv2d, int_linear from .mpdr_score import get_mpdr_score import math class DownsampleA(nn.Module): def __init__(self, nIn, nOut, stride): super(DownsampleA, self).__init__() assert stride == 2 self.avg = nn.AvgPool2d(kernel_size=1, stride=stride) def forward(self, x): x = self.avg(x) return torch.cat((x, x.mul(0)), 1) class ResNetBasicblock(nn.Module): expansion = 1 """ RexNet basicblock (https://github.com/facebook/fb.resnet.torch/blob/master/models/resnet.lua) """ def __init__(self, inplanes, planes, stride=1, downsample=None, wbit=4, abit=4, alpha_init=10, mode='mean', k=2, ch_group=16, push=False): super(ResNetBasicblock, self).__init__() # self.conv_a = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False) # quantization self.conv_a = int_conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=push) # quantization self.bn_a = nn.BatchNorm2d(planes) self.relu1 = ClippedReLU(num_bits=abit, alpha=alpha_init, inplace=True) # Clipped ReLU function 4 - bits # self.relu1 = nn.ReLU(inplace=True) self.conv_b = int_conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=push) # quantization # self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) # quantization self.bn_b = nn.BatchNorm2d(planes) self.relu2 = ClippedReLU(num_bits=abit, alpha=alpha_init, inplace=True) # Clipped ReLU function 4 - bits self.downsample = downsample def forward(self, x): residual = x basicblock = self.conv_a(x) basicblock = self.bn_a(basicblock) basicblock = self.relu1(basicblock) basicblock = self.conv_b(basicblock) basicblock = self.bn_b(basicblock) if self.downsample is not None: residual = self.downsample(x) return self.relu2(residual + basicblock) class CifarResNet(nn.Module): """ ResNet optimized for the Cifar dataset, as specified in https://arxiv.org/abs/1512.03385.pdf """ def __init__(self, depth, num_classes, wbit=4, abit=4, alpha_init=10, mode='mean', k=2, ch_group=16, push=False): """ Constructor Args: depth: number of layers. num_classes: number of classes base_width: base width """ super(CifarResNet, self).__init__() block = ResNetBasicblock #Model type specifies number of layers for CIFAR-10 and CIFAR-100 model assert (depth - 2) % 6 == 0, 'depth should be one of 20, 32, 44, 56, 110' layer_blocks = (depth - 2) // 6 print ('CifarResNet : Depth : {} , Layers for each block : {}'.format(depth, layer_blocks)) self.num_classes = num_classes self.ch_group = ch_group # self.conv_1_3x3 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False) self.conv_1_3x3 = int_conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=False) # skip the push process for the first conv layer self.relu0 = ClippedReLU(num_bits=abit, alpha=alpha_init, inplace=True) self.bn_1 = nn.BatchNorm2d(16) self.inplanes = 16 self.stage_1 = self._make_layer(block, 16, layer_blocks, 1, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push) self.stage_2 = self._make_layer(block, 32, layer_blocks, 2, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push) self.stage_3 = self._make_layer(block, 64, layer_blocks, 2, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push) self.avgpool = nn.AvgPool2d(8) self.classifier = int_linear(64*block.expansion, num_classes, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=False) # skip the push process for the last fc layer for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) #m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): init.kaiming_normal_(m.weight) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1, wbit=4, abit=4, alpha_init=10, mode='mean', k=2, ch_group=16, push=False): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( int_conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push)) return nn.Sequential(*layers) def forward(self, x): x = self.conv_1_3x3(x) x = self.relu0(self.bn_1(x)) x = self.stage_1(x) x = self.stage_2(x) x = self.stage_3(x) x = self.avgpool(x) x = x.view(x.size(0), -1) return self.classifier(x) def get_group_val(self): val = torch.Tensor() if torch.cuda.is_available(): val = val.cuda() count = 0 for m in self.modules(): if isinstance(m, int_conv2d): kw = m.weight.size(2) if kw != 1: if not count in [0]: w_l = m.weight num_group = w_l.size(0) * w_l.size(1) // self.ch_group w_l = w_l.view(w_l.size(0), w_l.size(1) // self.ch_group, self.ch_group, kw, kw) w_l = w_l.contiguous().view((num_group, self.ch_group*kw*kw)) g = w_l.pow(2).sum(dim=1).pow(1/2) val = torch.cat((val.view(-1), g.view(-1))) count += 1 return val def get_global_thre(self, ratio): grp_val = self.get_group_val() # grp_mean = grp_val.mean() # threshold = ratio * grp_mean sorted_block_values, indices = torch.sort(grp_val.contiguous().view(-1)) thre_index = int(grp_val.data.numel() * ratio) threshold = sorted_block_values[thre_index] return threshold def get_group_mp(self): val = torch.Tensor() if torch.cuda.is_available(): val = val.cuda() count = 0 for m in self.modules(): if isinstance(m, int_conv2d): kw = m.weight.size(2) if kw != 1: if not count in [0]: w_l = m.weight num_group = w_l.size(0) * w_l.size(1) // self.ch_group w_l = w_l.view(w_l.size(0), w_l.size(1) // self.ch_group, self.ch_group, kw, kw) w_l = w_l.contiguous().view((num_group, self.ch_group*kw*kw)) g = w_l.abs().mean(dim=1) val = torch.cat((val.view(-1), g.view(-1))) count += 1 return val def get_global_mp_thre(self, ratio): grp_val = self.get_group_mp() sorted_block_values, indices = torch.sort(grp_val.contiguous().view(-1)) thre_index = int(grp_val.data.numel() * ratio) threshold = sorted_block_values[thre_index] return threshold def get_group_mpdr(self): val = torch.Tensor() if torch.cuda.is_available(): val = val.cuda() count = 0 for m in self.modules(): if isinstance(m, int_conv2d): kw = m.weight.size(2) if kw != 1: if not count in [0]: w_l = get_mpdr_score(m.weight) num_group = w_l.size(0) * w_l.size(1) // self.ch_group w_l = w_l.view(w_l.size(0), w_l.size(1) // self.ch_group, self.ch_group, kw, kw) w_l = w_l.contiguous().view((num_group, self.ch_group*kw*kw)) g = w_l.mean(dim=1) # compute the mean of the mpdr score val = torch.cat((val.view(-1), g.view(-1))) count += 1 return val def get_global_mpdr_thre(self, ratio): grp_val = self.get_group_mpdr() sorted_block_values, indices = torch.sort(grp_val.contiguous().view(-1)) thre_index = int(grp_val.data.numel() * ratio) threshold = sorted_block_values[thre_index] return threshold class resnet20_quant: base=CifarResNet args = list() kwargs = {'depth': 20} class resnet32_quant: base=CifarResNet args = list() kwargs = {'depth': 32}
37.780172
196
0.650542
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import init from .quant import ClippedReLU, int_conv2d, int_linear from .mpdr_score import get_mpdr_score import math class DownsampleA(nn.Module): def __init__(self, nIn, nOut, stride): super(DownsampleA, self).__init__() assert stride == 2 self.avg = nn.AvgPool2d(kernel_size=1, stride=stride) def forward(self, x): x = self.avg(x) return torch.cat((x, x.mul(0)), 1) class ResNetBasicblock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, wbit=4, abit=4, alpha_init=10, mode='mean', k=2, ch_group=16, push=False): super(ResNetBasicblock, self).__init__() _a = int_conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=push) self.bn_a = nn.BatchNorm2d(planes) self.relu1 = ClippedReLU(num_bits=abit, alpha=alpha_init, inplace=True) self.conv_b = int_conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=push) = nn.BatchNorm2d(planes) self.relu2 = ClippedReLU(num_bits=abit, alpha=alpha_init, inplace=True) self.downsample = downsample def forward(self, x): residual = x basicblock = self.conv_a(x) basicblock = self.bn_a(basicblock) basicblock = self.relu1(basicblock) basicblock = self.conv_b(basicblock) basicblock = self.bn_b(basicblock) if self.downsample is not None: residual = self.downsample(x) return self.relu2(residual + basicblock) class CifarResNet(nn.Module): def __init__(self, depth, num_classes, wbit=4, abit=4, alpha_init=10, mode='mean', k=2, ch_group=16, push=False): super(CifarResNet, self).__init__() block = ResNetBasicblock assert (depth - 2) % 6 == 0, 'depth should be one of 20, 32, 44, 56, 110' layer_blocks = (depth - 2) // 6 print ('CifarResNet : Depth : {} , Layers for each block : {}'.format(depth, layer_blocks)) self.num_classes = num_classes self.ch_group = ch_group self.conv_1_3x3 = int_conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=False) self.relu0 = ClippedReLU(num_bits=abit, alpha=alpha_init, inplace=True) self.bn_1 = nn.BatchNorm2d(16) self.inplanes = 16 self.stage_1 = self._make_layer(block, 16, layer_blocks, 1, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push) self.stage_2 = self._make_layer(block, 32, layer_blocks, 2, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push) self.stage_3 = self._make_layer(block, 64, layer_blocks, 2, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push) self.avgpool = nn.AvgPool2d(8) self.classifier = int_linear(64*block.expansion, num_classes, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=False) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): init.kaiming_normal_(m.weight) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1, wbit=4, abit=4, alpha_init=10, mode='mean', k=2, ch_group=16, push=False): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( int_conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push)) return nn.Sequential(*layers) def forward(self, x): x = self.conv_1_3x3(x) x = self.relu0(self.bn_1(x)) x = self.stage_1(x) x = self.stage_2(x) x = self.stage_3(x) x = self.avgpool(x) x = x.view(x.size(0), -1) return self.classifier(x) def get_group_val(self): val = torch.Tensor() if torch.cuda.is_available(): val = val.cuda() count = 0 for m in self.modules(): if isinstance(m, int_conv2d): kw = m.weight.size(2) if kw != 1: if not count in [0]: w_l = m.weight num_group = w_l.size(0) * w_l.size(1) // self.ch_group w_l = w_l.view(w_l.size(0), w_l.size(1) // self.ch_group, self.ch_group, kw, kw) w_l = w_l.contiguous().view((num_group, self.ch_group*kw*kw)) g = w_l.pow(2).sum(dim=1).pow(1/2) val = torch.cat((val.view(-1), g.view(-1))) count += 1 return val def get_global_thre(self, ratio): grp_val = self.get_group_val() sorted_block_values, indices = torch.sort(grp_val.contiguous().view(-1)) thre_index = int(grp_val.data.numel() * ratio) threshold = sorted_block_values[thre_index] return threshold def get_group_mp(self): val = torch.Tensor() if torch.cuda.is_available(): val = val.cuda() count = 0 for m in self.modules(): if isinstance(m, int_conv2d): kw = m.weight.size(2) if kw != 1: if not count in [0]: w_l = m.weight num_group = w_l.size(0) * w_l.size(1) // self.ch_group w_l = w_l.view(w_l.size(0), w_l.size(1) // self.ch_group, self.ch_group, kw, kw) w_l = w_l.contiguous().view((num_group, self.ch_group*kw*kw)) g = w_l.abs().mean(dim=1) val = torch.cat((val.view(-1), g.view(-1))) count += 1 return val def get_global_mp_thre(self, ratio): grp_val = self.get_group_mp() sorted_block_values, indices = torch.sort(grp_val.contiguous().view(-1)) thre_index = int(grp_val.data.numel() * ratio) threshold = sorted_block_values[thre_index] return threshold def get_group_mpdr(self): val = torch.Tensor() if torch.cuda.is_available(): val = val.cuda() count = 0 for m in self.modules(): if isinstance(m, int_conv2d): kw = m.weight.size(2) if kw != 1: if not count in [0]: w_l = get_mpdr_score(m.weight) num_group = w_l.size(0) * w_l.size(1) // self.ch_group w_l = w_l.view(w_l.size(0), w_l.size(1) // self.ch_group, self.ch_group, kw, kw) w_l = w_l.contiguous().view((num_group, self.ch_group*kw*kw)) g = w_l.mean(dim=1) val = torch.cat((val.view(-1), g.view(-1))) count += 1 return val def get_global_mpdr_thre(self, ratio): grp_val = self.get_group_mpdr() sorted_block_values, indices = torch.sort(grp_val.contiguous().view(-1)) thre_index = int(grp_val.data.numel() * ratio) threshold = sorted_block_values[thre_index] return threshold class resnet20_quant: base=CifarResNet args = list() kwargs = {'depth': 20} class resnet32_quant: base=CifarResNet args = list() kwargs = {'depth': 32}
true
true
f72cb40930dc9e29198e8bc1f4a2818b2e161a8f
449
py
Python
util.py
codefordc/us-congress-pizza-flag-tracker
766c72e01e2c01342d4c6dbe2108fded2022ee74
[ "CC0-1.0" ]
5
2021-01-31T14:29:43.000Z
2021-07-15T16:22:30.000Z
util.py
rajindermavi/us-congress-pizza-flag-tracker
10827f3d6f2ef0cc434a475fc9782fc840cb81ab
[ "CC0-1.0" ]
85
2021-05-12T23:31:29.000Z
2022-03-30T21:23:58.000Z
util.py
rajindermavi/us-congress-pizza-flag-tracker
10827f3d6f2ef0cc434a475fc9782fc840cb81ab
[ "CC0-1.0" ]
8
2021-04-11T16:44:15.000Z
2021-10-30T21:14:17.000Z
import json from config import db from models import UserModel def table_record_to_json(record): modelClass = type(record) columns = [record for record in filter(lambda item: not item.startswith('_'),modelClass.__dict__)] json_value = {column_name: str(getattr(record, column_name))for column_name in columns} return json_value def table_to_json(table): return { "data": [table_record_to_json(record) for record in table] }
28.0625
102
0.752784
import json from config import db from models import UserModel def table_record_to_json(record): modelClass = type(record) columns = [record for record in filter(lambda item: not item.startswith('_'),modelClass.__dict__)] json_value = {column_name: str(getattr(record, column_name))for column_name in columns} return json_value def table_to_json(table): return { "data": [table_record_to_json(record) for record in table] }
true
true
f72cb478099ad21f4b980eaa5ef8fdbe1740ca81
519
py
Python
models/utils.py
clabrugere/numpy-basics
81efb4b8ac58fc17dc8f6c676004bbc3a99a92c3
[ "MIT" ]
1
2020-10-27T18:05:26.000Z
2020-10-27T18:05:26.000Z
models/utils.py
clabrugere/numpy-basics
81efb4b8ac58fc17dc8f6c676004bbc3a99a92c3
[ "MIT" ]
null
null
null
models/utils.py
clabrugere/numpy-basics
81efb4b8ac58fc17dc8f6c676004bbc3a99a92c3
[ "MIT" ]
null
null
null
import numpy as np def confusion_matrix(y_true, y_hat, threshold=.5): def _to_class(y): return np.array([1 if i >= threshold else 0 for i in y]) n_classes = len(np.unique(y_true)) cm = np.zeros((n_classes, n_classes)) y_hat = _to_class(y_hat) for a, p in zip(y_true, y_hat): cm[a, p] += 1 return cm def f1_score(cm): precision = cm[0, 0] / cm[0, :].sum() recall = cm[0, 0] / cm[:, 0].sum() return 2 * (precision * recall) / (precision + recall)
24.714286
64
0.572254
import numpy as np def confusion_matrix(y_true, y_hat, threshold=.5): def _to_class(y): return np.array([1 if i >= threshold else 0 for i in y]) n_classes = len(np.unique(y_true)) cm = np.zeros((n_classes, n_classes)) y_hat = _to_class(y_hat) for a, p in zip(y_true, y_hat): cm[a, p] += 1 return cm def f1_score(cm): precision = cm[0, 0] / cm[0, :].sum() recall = cm[0, 0] / cm[:, 0].sum() return 2 * (precision * recall) / (precision + recall)
true
true
f72cb4e3d578253909cb6f62152c5f20859236b5
276
py
Python
translator/app/modules/speech.py
sharad461/nepali-translator
d35ba1586e4ad14ddae71b24caf49aac66d63a2e
[ "Apache-2.0" ]
29
2019-08-04T03:05:23.000Z
2021-12-14T14:09:57.000Z
translator/app/modules/speech.py
sharad461/nepali-translator
d35ba1586e4ad14ddae71b24caf49aac66d63a2e
[ "Apache-2.0" ]
3
2020-10-09T01:35:45.000Z
2021-06-02T12:24:31.000Z
translator/app/modules/speech.py
sharad461/nepali-translator
d35ba1586e4ad14ddae71b24caf49aac66d63a2e
[ "Apache-2.0" ]
9
2019-11-04T10:01:34.000Z
2021-12-20T02:03:40.000Z
import speech_recognition as sr def rec(): r = sr.Recognizer() with sr.Microphone() as source: audio = r.listen(source) try: text = r.recognize_google(audio) return(text) except: return("Sorry, couldn't recognize your voice. Please try again.")
21.230769
68
0.666667
import speech_recognition as sr def rec(): r = sr.Recognizer() with sr.Microphone() as source: audio = r.listen(source) try: text = r.recognize_google(audio) return(text) except: return("Sorry, couldn't recognize your voice. Please try again.")
true
true
f72cb63e07c6ebb1781cffd6e5ba78d6f5d59509
1,201
py
Python
sonata/datamodules/base_datamodule.py
sergevkim/sonata
2250b60174628ee76fb7d54bf50e4b8b07b505d5
[ "MIT" ]
1
2021-03-15T19:01:43.000Z
2021-03-15T19:01:43.000Z
sonata/datamodules/base_datamodule.py
sergevkim/sonata
2250b60174628ee76fb7d54bf50e4b8b07b505d5
[ "MIT" ]
null
null
null
sonata/datamodules/base_datamodule.py
sergevkim/sonata
2250b60174628ee76fb7d54bf50e4b8b07b505d5
[ "MIT" ]
null
null
null
from abc import ABC, abstractmethod from pathlib import Path import torch from torch import Tensor from torch.utils.data import Dataset, DataLoader class BaseDataModule(ABC): def __init__( self, data_path: Path, batch_size: int, num_workers: int, ): super().__init__() self.data_path = data_path self.batch_size = batch_size self.num_workers = num_workers @staticmethod def prepare_data( data_path: Path, ): pass @abstractmethod def setup( self, val_ratio: float, ) -> None: pass def train_dataloader(self) -> DataLoader: train_dataloader = DataLoader( dataset=self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, ) return train_dataloader def val_dataloader(self) -> DataLoader: val_dataloader = DataLoader( dataset=self.val_dataset, batch_size=self.batch_size, num_workers=self.num_workers, ) return val_dataloader def test_dataloader(self): pass
21.836364
48
0.587843
from abc import ABC, abstractmethod from pathlib import Path import torch from torch import Tensor from torch.utils.data import Dataset, DataLoader class BaseDataModule(ABC): def __init__( self, data_path: Path, batch_size: int, num_workers: int, ): super().__init__() self.data_path = data_path self.batch_size = batch_size self.num_workers = num_workers @staticmethod def prepare_data( data_path: Path, ): pass @abstractmethod def setup( self, val_ratio: float, ) -> None: pass def train_dataloader(self) -> DataLoader: train_dataloader = DataLoader( dataset=self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, ) return train_dataloader def val_dataloader(self) -> DataLoader: val_dataloader = DataLoader( dataset=self.val_dataset, batch_size=self.batch_size, num_workers=self.num_workers, ) return val_dataloader def test_dataloader(self): pass
true
true
f72cb68570a41741af7a25b02a5d19503e0f3386
3,806
py
Python
netket/operator/boson.py
gpescia/MyNetKet
958510966a5870d9d491de0628903cf1fc210921
[ "Apache-2.0" ]
null
null
null
netket/operator/boson.py
gpescia/MyNetKet
958510966a5870d9d491de0628903cf1fc210921
[ "Apache-2.0" ]
11
2021-07-12T15:20:14.000Z
2022-01-17T09:40:41.000Z
netket/operator/boson.py
gpescia/MyNetKet
958510966a5870d9d491de0628903cf1fc210921
[ "Apache-2.0" ]
1
2021-04-25T15:47:32.000Z
2021-04-25T15:47:32.000Z
# Copyright 2021 The NetKet Authors - All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from netket.utils.types import DType from netket.hilbert import AbstractHilbert from ._local_operator import LocalOperator as _LocalOperator def destroy( hilbert: AbstractHilbert, site: int, dtype: DType = float ) -> _LocalOperator: """ Builds the boson destruction operator :math:`\\hat{a}` acting on the `site`-th of the Hilbert space `hilbert`. If `hilbert` is a non-Bosonic space of local dimension M, it is considered as a bosonic space of local dimension M. Args: hilbert: The hilbert space site: the site on which this operator acts Returns: The resulting Local Operator """ import numpy as np N = hilbert.size_at_index(site) D = np.array([np.sqrt(m) for m in np.arange(1, N)]) mat = np.diag(D, 1) return _LocalOperator(hilbert, mat, [site], dtype=dtype) def create(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator: """ Builds the boson creation operator :math:`\\hat{a}^\\dagger` acting on the `site`-th of the Hilbert space `hilbert`. If `hilbert` is a non-Bosonic space of local dimension M, it is considered as a bosonic space of local dimension M. Args: hilbert: The hilbert space site: the site on which this operator acts Returns: The resulting Local Operator """ import numpy as np N = hilbert.size_at_index(site) D = np.array([np.sqrt(m) for m in np.arange(1, N)]) mat = np.diag(D, -1) return _LocalOperator(hilbert, mat, [site], dtype=dtype) def number(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator: """ Builds the number operator :math:`\\hat{a}^\\dagger\\hat{a}` acting on the `site`-th of the Hilbert space `hilbert`. If `hilbert` is a non-Bosonic space of local dimension M, it is considered as a bosonic space of local dimension M. Args: hilbert: The hilbert space site: the site on which this operator acts Returns: The resulting Local Operator """ import numpy as np N = hilbert.size_at_index(site) D = np.array([m for m in np.arange(0, N)]) mat = np.diag(D, 0) return _LocalOperator(hilbert, mat, [site], dtype=dtype) def proj( hilbert: AbstractHilbert, site: int, n: int, dtype: DType = float ) -> _LocalOperator: """ Builds the projector operator :math:`|n\\rangle\\langle n |` acting on the `site`-th of the Hilbert space `hilbert` and collapsing on the state with `n` bosons. If `hilbert` is a non-Bosonic space of local dimension M, it is considered as a bosonic space of local dimension M. Args: hilbert: The hilbert space site: the site on which this operator acts n: the state on which to project Returns: the resulting operator """ import numpy as np N = hilbert.size_at_index(site) if n >= N: raise ValueError("Cannot project on a state above the cutoff.") D = np.array([0 for m in np.arange(0, N)]) D[n] = 1 mat = np.diag(D, 0) return _LocalOperator(hilbert, mat, [site], dtype=dtype) # clean up the module del AbstractHilbert, DType
29.503876
96
0.672622
from netket.utils.types import DType from netket.hilbert import AbstractHilbert from ._local_operator import LocalOperator as _LocalOperator def destroy( hilbert: AbstractHilbert, site: int, dtype: DType = float ) -> _LocalOperator: import numpy as np N = hilbert.size_at_index(site) D = np.array([np.sqrt(m) for m in np.arange(1, N)]) mat = np.diag(D, 1) return _LocalOperator(hilbert, mat, [site], dtype=dtype) def create(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator: import numpy as np N = hilbert.size_at_index(site) D = np.array([np.sqrt(m) for m in np.arange(1, N)]) mat = np.diag(D, -1) return _LocalOperator(hilbert, mat, [site], dtype=dtype) def number(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator: import numpy as np N = hilbert.size_at_index(site) D = np.array([m for m in np.arange(0, N)]) mat = np.diag(D, 0) return _LocalOperator(hilbert, mat, [site], dtype=dtype) def proj( hilbert: AbstractHilbert, site: int, n: int, dtype: DType = float ) -> _LocalOperator: import numpy as np N = hilbert.size_at_index(site) if n >= N: raise ValueError("Cannot project on a state above the cutoff.") D = np.array([0 for m in np.arange(0, N)]) D[n] = 1 mat = np.diag(D, 0) return _LocalOperator(hilbert, mat, [site], dtype=dtype) del AbstractHilbert, DType
true
true
f72cb81ea991aa6ce3d971ea1b6e47347518c4cb
31
py
Python
day2/oddno1.py
nikhilsamninan/python-files
15198459081097058a939b40b5e8ef754e578fe0
[ "Apache-2.0" ]
null
null
null
day2/oddno1.py
nikhilsamninan/python-files
15198459081097058a939b40b5e8ef754e578fe0
[ "Apache-2.0" ]
null
null
null
day2/oddno1.py
nikhilsamninan/python-files
15198459081097058a939b40b5e8ef754e578fe0
[ "Apache-2.0" ]
null
null
null
print(tuple(range(201,400,2)))
15.5
30
0.709677
print(tuple(range(201,400,2)))
true
true
f72cb9eb47ac3d1bf036724169c33be5cd5d5d60
338
py
Python
dogstatsd/__init__.py
ian28223/datadog-unix-agent
09c75778b512361c83ff10e7cdb37b887bcaa8fe
[ "Apache-2.0" ]
13
2018-08-11T01:40:51.000Z
2022-01-02T09:07:43.000Z
dogstatsd/__init__.py
ian28223/datadog-unix-agent
09c75778b512361c83ff10e7cdb37b887bcaa8fe
[ "Apache-2.0" ]
21
2018-05-28T13:16:23.000Z
2021-08-19T15:43:40.000Z
dogstatsd/__init__.py
ian28223/datadog-unix-agent
09c75778b512361c83ff10e7cdb37b887bcaa8fe
[ "Apache-2.0" ]
15
2018-05-10T15:09:41.000Z
2022-03-21T06:46:21.000Z
# Unless explicitly stated otherwise all files in this repository are licensed # under the Apache License Version 2.0. # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2018 Datadog, Inc. from .server import Server from .reporter import Reporter __all__ = [ "Server", "Reporter", ]
26
83
0.748521
from .server import Server from .reporter import Reporter __all__ = [ "Server", "Reporter", ]
true
true
f72cba691951b7828f5ece31e4d5727f90f7fb13
428
py
Python
gallery/migrations/0006_image_image.py
dennis027/Gallery
282c1807087beb2e2a5ea1d51b5b6891145c20a0
[ "MIT" ]
null
null
null
gallery/migrations/0006_image_image.py
dennis027/Gallery
282c1807087beb2e2a5ea1d51b5b6891145c20a0
[ "MIT" ]
null
null
null
gallery/migrations/0006_image_image.py
dennis027/Gallery
282c1807087beb2e2a5ea1d51b5b6891145c20a0
[ "MIT" ]
null
null
null
# Generated by Django 2.2 on 2021-07-03 12:00 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('gallery', '0005_remove_image_image'), ] operations = [ migrations.AddField( model_name='image', name='image', field=models.CharField(default=1, max_length=255), preserve_default=False, ), ]
21.4
62
0.598131
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('gallery', '0005_remove_image_image'), ] operations = [ migrations.AddField( model_name='image', name='image', field=models.CharField(default=1, max_length=255), preserve_default=False, ), ]
true
true
f72cbb0f0e9ed11c2ce819bd187907cbc6229269
1,190
py
Python
examples/pure_jax.py
kingoflolz/DALL-E
d3f3e9a57a31b1e1cc74a449a9e6e5a0442f0ac7
[ "MIT" ]
7
2021-04-10T15:03:37.000Z
2021-07-05T02:49:51.000Z
examples/pure_jax.py
kingoflolz/DALL-E
d3f3e9a57a31b1e1cc74a449a9e6e5a0442f0ac7
[ "MIT" ]
null
null
null
examples/pure_jax.py
kingoflolz/DALL-E
d3f3e9a57a31b1e1cc74a449a9e6e5a0442f0ac7
[ "MIT" ]
1
2021-10-01T07:47:41.000Z
2021-10-01T07:47:41.000Z
import io import jax import requests import PIL from PIL import ImageOps import numpy as np import jax.numpy as jnp from dall_e_jax import get_encoder, get_decoder, map_pixels, unmap_pixels target_image_size = 256 def download_image(url): resp = requests.get(url) resp.raise_for_status() return PIL.Image.open(io.BytesIO(resp.content)) def preprocess(img): img = ImageOps.fit(img, [target_image_size,] * 2, method=0, bleed=0.0, centering=(0.5, 0.5)) img = np.expand_dims(np.transpose(np.array(img).astype(np.float32)/255, (2, 0, 1)), 0) return map_pixels(img) jax_enc_fn, jax_enc_params = get_encoder("encoder.pkl") jax_dec_fn, jax_dec_params = get_decoder("decoder.pkl") x = preprocess(download_image('https://assets.bwbx.io/images/users/iqjWHBFdfxIU/iKIWgaiJUtss/v2/1000x-1.jpg')) z_logits = jax_enc_fn(jax_enc_params, x) z = jnp.argmax(z_logits, axis=1) z = jnp.transpose(jax.nn.one_hot(z, num_classes=8192), (0, 3, 1, 2)) x_stats = jax_dec_fn(jax_dec_params, z) x_rec = unmap_pixels(jax.nn.sigmoid(x_stats[:, :3])) x_rec = np.transpose((np.array(x_rec[0]) * 255).astype(np.uint8), (1, 2, 0)) PIL.Image.fromarray(x_rec).save('reconstructed.png')
26.444444
110
0.730252
import io import jax import requests import PIL from PIL import ImageOps import numpy as np import jax.numpy as jnp from dall_e_jax import get_encoder, get_decoder, map_pixels, unmap_pixels target_image_size = 256 def download_image(url): resp = requests.get(url) resp.raise_for_status() return PIL.Image.open(io.BytesIO(resp.content)) def preprocess(img): img = ImageOps.fit(img, [target_image_size,] * 2, method=0, bleed=0.0, centering=(0.5, 0.5)) img = np.expand_dims(np.transpose(np.array(img).astype(np.float32)/255, (2, 0, 1)), 0) return map_pixels(img) jax_enc_fn, jax_enc_params = get_encoder("encoder.pkl") jax_dec_fn, jax_dec_params = get_decoder("decoder.pkl") x = preprocess(download_image('https://assets.bwbx.io/images/users/iqjWHBFdfxIU/iKIWgaiJUtss/v2/1000x-1.jpg')) z_logits = jax_enc_fn(jax_enc_params, x) z = jnp.argmax(z_logits, axis=1) z = jnp.transpose(jax.nn.one_hot(z, num_classes=8192), (0, 3, 1, 2)) x_stats = jax_dec_fn(jax_dec_params, z) x_rec = unmap_pixels(jax.nn.sigmoid(x_stats[:, :3])) x_rec = np.transpose((np.array(x_rec[0]) * 255).astype(np.uint8), (1, 2, 0)) PIL.Image.fromarray(x_rec).save('reconstructed.png')
true
true
f72cbbad2bdf77b532dac0c510c9856f9ed9388e
12,421
py
Python
src/run_joint_confidence_cdcOriginalGan.py
williamsashbee/Confident_classifier
cba3ef862b310afc3af6c4a62b524f032f45549e
[ "MIT" ]
null
null
null
src/run_joint_confidence_cdcOriginalGan.py
williamsashbee/Confident_classifier
cba3ef862b310afc3af6c4a62b524f032f45549e
[ "MIT" ]
null
null
null
src/run_joint_confidence_cdcOriginalGan.py
williamsashbee/Confident_classifier
cba3ef862b310afc3af6c4a62b524f032f45549e
[ "MIT" ]
null
null
null
############################################## # This code is based on samples from pytorch # ############################################## # Writer: Kimin Lee from __future__ import print_function import argparse import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import data_loader import numpy as np import torchvision.utils as vutils import models from torchvision import datasets, transforms from torch.autograd import Variable import os os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152 os.environ["CUDA_VISIBLE_DEVICES"] = "5" # Training settings parser = argparse.ArgumentParser(description='Training code - joint confidence') parser.add_argument('--batch-size', type=int, default=128, help='input batch size for training') parser.add_argument('--epochs', type=int, default=100, help='number of epochs to train') parser.add_argument('--lr', type=float, default=0.0002, help='learning rate') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--seed', type=int, default=1, help='random seed') parser.add_argument('--log-interval', type=int, default=100, help='how many batches to wait before logging training status') parser.add_argument('--dataset', default='mnist', help='cifar10 | svhn') parser.add_argument('--dataroot', required=True, help='path to dataset') parser.add_argument('--imageSize', type=int, default=32, help='the height / width of the input image to network') parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints') parser.add_argument('--wd', type=float, default=0.0, help='weight decay') parser.add_argument('--droprate', type=float, default=0.1, help='learning rate decay') parser.add_argument('--decreasing_lr', default='60', help='decreasing strategy') parser.add_argument('--num_classes', type=int, default=10, help='the # of classes') parser.add_argument('--beta', type=float, default=1, help='penalty parameter for KL term') args = parser.parse_args() if args.dataset == 'cifar10': args.beta = 0.1 args.batch_size = 64 print(args) args.cuda = not args.no_cuda and torch.cuda.is_available() print("Random Seed: ", args.seed) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {} print('load data: ', args.dataset) if args.dataset=='mnist': transform = transforms.Compose([ transforms.Scale(32), transforms.ToTensor(), transforms.Lambda(lambda x: x.repeat(3, 1, 1)), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) ]) train_loader = torch.utils.data.DataLoader( datasets.MNIST('data', train=True, download=True, transform=transform), batch_size=128, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.MNIST('data', train=False, download=True, transform=transform), batch_size=128, shuffle=True) else: train_loader, test_loader = data_loader.getTargetDataSet(args.dataset, args.batch_size, args.imageSize, args.dataroot) transform = transforms.Compose([ transforms.Scale(32), transforms.ToTensor(), transforms.Lambda(lambda x: x.repeat(3, 1, 1)), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) ]) train_loader_mnist = torch.utils.data.DataLoader( datasets.MNIST('data', train=True, download=True, transform=transform), batch_size=128, shuffle=True) print('Load model') model = models.vgg13() print(model) print('load GAN') nz = 100 G = models.cdcOriginalGenerator(1, nz, 64, 3) # ngpu, nz, ngf, nc D = models.cdcOriginalDiscriminator(1, 3, 64) # ngpu, nc, ndf G.weight_init(mean=0.0, std=0.02) D.weight_init(mean=0.0, std=0.02) # Initial setup for GAN real_label = 1 fake_label = 0 criterion = nn.BCELoss() nz = 100 print('Setup optimizer') lr = 0.0002 batch_size = 128 optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd) G_optimizer = optim.Adam(G.parameters(), lr=lr, betas=(0.5, 0.999)) D_optimizer = optim.Adam(D.parameters(), lr=lr, betas=(0.5, 0.999)) decreasing_lr = list(map(int, args.decreasing_lr.split(','))) onehot = torch.zeros(10, 10).cuda() onehot = onehot.scatter_(1, torch.cuda.LongTensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).view(10, 1), 1).view(10, 10, 1, 1) img_size = 32 num_labels = 10 fraction = 1 fill = torch.zeros([num_labels, num_labels, img_size / fraction, img_size / fraction]).cuda() for i in range(num_labels): fill[i, i, :, :] = 1 fill = fill.cuda() # os.environ["CUDA_LAUNCH_BLOCKING"]="1" # Binary Cross Entropy loss BCE_loss = nn.BCELoss() # fixed_noise = torch.FloatTensor(64, nz, 1, 1).normal_(0, 1) fixed_noise = torch.randn((64, 100)).view(-1, 100, 1, 1) fixed_label = None if args.cuda: model.cuda() D.cuda() G.cuda() criterion.cuda() fixed_noise = fixed_noise.cuda() first = True def train(epoch): model.train() # D_train_loss = 0 # G_train_loss = 3 trg = 0 trd = 0 i = 0 for batch_idx, (data, y_labels) in enumerate(train_loader): uniform_dist = torch.Tensor(data.size(0), args.num_classes).fill_((1. / args.num_classes)).cuda() x_ = data.cuda() assert x_[0, :, :, :].shape == (3, 32, 32) global first if first: global fixed_noise global fixed_label first = False fixed_label = onehot[y_labels.squeeze()[:64]] print("saving fixed_label!") vutils.save_image(data[:64], '{}/{}jointConfidencerealReference{}.png'.format(args.outf, args.dataset, epoch), normalize=True) # train discriminator D D.zero_grad() y_ = y_labels mini_batch = x_.size()[0] y_real_ = torch.ones(mini_batch) y_fake_ = torch.zeros(mini_batch) y_real_, y_fake_ = Variable(y_real_.cuda()), Variable(y_fake_.cuda()) y_fill_ = fill[y_.squeeze().tolist()] # y_fill_ = fill[y_] assert y_fill_[0, y_.squeeze().tolist()[0], :, :].sum() == (img_size / fraction) ** 2 assert y_fill_.sum() == (img_size / fraction) ** 2 * mini_batch x_, y_fill_ = Variable(x_.cuda()), Variable(y_fill_.cuda()) D_result = D(x_, y_fill_).squeeze() D_real_loss = BCE_loss(D_result, y_real_) z_ = torch.randn((mini_batch, 100)).view(-1, 100, 1, 1) y_ = (torch.rand(mini_batch, 1) * num_labels).type(torch.LongTensor).squeeze() y_label_ = onehot[y_] y_fill_ = fill[y_] assert y_label_[0, y_[0]] == 1 assert y_label_.shape == (mini_batch, 10, 1, 1) assert y_fill_[0, y_[0], :, :].sum() == (img_size / fraction) ** 2 assert y_fill_.sum() == (img_size / fraction) ** 2 * mini_batch z_, y_label_, y_fill_ = Variable(z_.cuda()), Variable(y_label_.cuda()), Variable(y_fill_.cuda()) G_result = G(z_, y_label_) D_result = D(G_result, y_fill_).squeeze() D_fake_loss = BCE_loss(D_result, y_fake_) D_fake_score = D_result.data.mean() D_train_loss = D_real_loss + D_fake_loss trg += 1 if D_train_loss > .1: trd += 1 D_train_loss.backward() D_optimizer.step() # D_losses.append(D_train_loss.item()) # train generator G G.zero_grad() z_ = torch.randn((mini_batch, 100)).view(-1, 100, 1, 1) y_ = (torch.rand(mini_batch, 1) * num_labels).type(torch.LongTensor).squeeze() y_label_ = onehot[y_] y_fill_ = fill[y_] z_, y_label_, y_fill_ = Variable(z_.cuda()), Variable(y_label_.cuda()), Variable(y_fill_.cuda()) assert y_label_[0, y_[0]] == 1 assert y_label_.shape == (mini_batch, 10, 1, 1) assert y_fill_[0, y_[0], :, :].sum() == (img_size / fraction) ** 2 assert y_fill_.sum() == (img_size / fraction) ** 2 * mini_batch G_result = G(z_, y_label_) D_result = D(G_result, y_fill_).squeeze() G_train_loss = BCE_loss(D_result, y_real_) # minimize the true distribution KL_fake_output = F.log_softmax(model(G_result)) errG_KL = F.kl_div(KL_fake_output, uniform_dist) * args.num_classes generator_loss = G_train_loss + args.beta * errG_KL # 12.0, .65, 0e-8 generator_loss.backward() G_optimizer.step() # G_losses.append(G_train_loss.item()) ########################### # (3) Update classifier # ########################### # cross entropy loss optimizer.zero_grad() x_ = Variable(x_) output = F.log_softmax(model(x_)) loss = F.nll_loss(output.cuda(), y_labels.type(torch.cuda.LongTensor).squeeze()) # KL divergence #### z_ = torch.randn((data.shape[0], 100)).view(-1, 100, 1, 1).cuda() y_ = (torch.rand(data.shape[0], 1) * num_labels).type(torch.LongTensor).squeeze().cuda() y_label_ = onehot[y_] y_fill_ = fill[y_] assert y_label_[0, y_[0]] == 1 assert y_label_.shape == (data.shape[0], 10, 1, 1) assert y_fill_[0, y_[0], :, :].sum() == (img_size / fraction) ** 2 assert y_fill_.sum() == (img_size / fraction) ** 2 * data.shape[0] G_result = G(z_, y_label_) # !!!#D_result = D(G_result, y_fill_).squeeze() #### KL_fake_output = F.log_softmax(model(G_result)) KL_loss_fake = F.kl_div(KL_fake_output, uniform_dist) * args.num_classes total_loss = loss + args.beta * KL_loss_fake # total_loss = loss total_loss.backward() optimizer.step() if batch_idx % args.log_interval == 0: print( "Epoch {} , Descriminator loss {:.6f} Generator loss {:.6f} traingenerator {:.6f} traindiscriminator {:.6f}".format( epoch, D_train_loss, G_train_loss, trg, trd)) print('Classification Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, KL fake Loss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.data.item(), KL_loss_fake.data.item())) # print('Classification Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, KL fake Loss: {:.6f}'.format( # epoch, batch_idx * len(data), len(train_loader.dataset), # 100. * batch_idx / len(train_loader), loss.data.item(), KL_loss_fake.data.item())) fake = G(fixed_noise.cuda(), fixed_label) vutils.save_image(fake.data, '%s/MNISTcDCgan_samples_epoch_%03d.png' % (args.outf, epoch), normalize=True) def test(epoch): model.eval() test_loss = 0 correct = 0 total = 0 for data, target in test_loader: total += data.size(0) if args.cuda: data, target = data.cuda(), target.cuda() # data, target = Variable(data, volatile=True), Variable(target) output = F.log_softmax(model(data)) target = target.type( torch.LongTensor) # https://discuss.pytorch.org/t/runtimeerror-multi-target-not-supported-newbie/10216/4 if args.cuda: output = output.cuda() target = target.cuda() target = torch.squeeze(target) test_loss += F.nll_loss(output, target).data.item() pred = output.data.max(1)[1] # get the index of the max log-probability correct += pred.eq(target.data).cpu().sum() test_loss = test_loss test_loss /= len(test_loader) # loss function already averages over batch size print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, total, 100. * correct / total)) for epoch in range(1, args.epochs + 1): train(epoch) test(epoch) if epoch in decreasing_lr: G_optimizer.param_groups[0]['lr'] *= args.droprate D_optimizer.param_groups[0]['lr'] *= args.droprate optimizer.param_groups[0]['lr'] *= args.droprate if epoch % 20 == 0: # do checkpointing torch.save(G.state_dict(), '%s/netG_epoch_%d.pth' % (args.outf, epoch)) torch.save(D.state_dict(), '%s/netD_epoch_%d.pth' % (args.outf, epoch)) torch.save(model.state_dict(), '%s/model_epoch_%d.pth' % (args.outf, epoch))
37.3003
132
0.622494
h.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {} print('load data: ', args.dataset) if args.dataset=='mnist': transform = transforms.Compose([ transforms.Scale(32), transforms.ToTensor(), transforms.Lambda(lambda x: x.repeat(3, 1, 1)), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) ]) train_loader = torch.utils.data.DataLoader( datasets.MNIST('data', train=True, download=True, transform=transform), batch_size=128, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.MNIST('data', train=False, download=True, transform=transform), batch_size=128, shuffle=True) else: train_loader, test_loader = data_loader.getTargetDataSet(args.dataset, args.batch_size, args.imageSize, args.dataroot) transform = transforms.Compose([ transforms.Scale(32), transforms.ToTensor(), transforms.Lambda(lambda x: x.repeat(3, 1, 1)), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) ]) train_loader_mnist = torch.utils.data.DataLoader( datasets.MNIST('data', train=True, download=True, transform=transform), batch_size=128, shuffle=True) print('Load model') model = models.vgg13() print(model) print('load GAN') nz = 100 G = models.cdcOriginalGenerator(1, nz, 64, 3) D = models.cdcOriginalDiscriminator(1, 3, 64) G.weight_init(mean=0.0, std=0.02) D.weight_init(mean=0.0, std=0.02) real_label = 1 fake_label = 0 criterion = nn.BCELoss() nz = 100 print('Setup optimizer') lr = 0.0002 batch_size = 128 optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd) G_optimizer = optim.Adam(G.parameters(), lr=lr, betas=(0.5, 0.999)) D_optimizer = optim.Adam(D.parameters(), lr=lr, betas=(0.5, 0.999)) decreasing_lr = list(map(int, args.decreasing_lr.split(','))) onehot = torch.zeros(10, 10).cuda() onehot = onehot.scatter_(1, torch.cuda.LongTensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).view(10, 1), 1).view(10, 10, 1, 1) img_size = 32 num_labels = 10 fraction = 1 fill = torch.zeros([num_labels, num_labels, img_size / fraction, img_size / fraction]).cuda() for i in range(num_labels): fill[i, i, :, :] = 1 fill = fill.cuda() BCE_loss = nn.BCELoss() fixed_noise = torch.randn((64, 100)).view(-1, 100, 1, 1) fixed_label = None if args.cuda: model.cuda() D.cuda() G.cuda() criterion.cuda() fixed_noise = fixed_noise.cuda() first = True def train(epoch): model.train() trg = 0 trd = 0 i = 0 for batch_idx, (data, y_labels) in enumerate(train_loader): uniform_dist = torch.Tensor(data.size(0), args.num_classes).fill_((1. / args.num_classes)).cuda() x_ = data.cuda() assert x_[0, :, :, :].shape == (3, 32, 32) global first if first: global fixed_noise global fixed_label first = False fixed_label = onehot[y_labels.squeeze()[:64]] print("saving fixed_label!") vutils.save_image(data[:64], '{}/{}jointConfidencerealReference{}.png'.format(args.outf, args.dataset, epoch), normalize=True) D.zero_grad() y_ = y_labels mini_batch = x_.size()[0] y_real_ = torch.ones(mini_batch) y_fake_ = torch.zeros(mini_batch) y_real_, y_fake_ = Variable(y_real_.cuda()), Variable(y_fake_.cuda()) y_fill_ = fill[y_.squeeze().tolist()] assert y_fill_[0, y_.squeeze().tolist()[0], :, :].sum() == (img_size / fraction) ** 2 assert y_fill_.sum() == (img_size / fraction) ** 2 * mini_batch x_, y_fill_ = Variable(x_.cuda()), Variable(y_fill_.cuda()) D_result = D(x_, y_fill_).squeeze() D_real_loss = BCE_loss(D_result, y_real_) z_ = torch.randn((mini_batch, 100)).view(-1, 100, 1, 1) y_ = (torch.rand(mini_batch, 1) * num_labels).type(torch.LongTensor).squeeze() y_label_ = onehot[y_] y_fill_ = fill[y_] assert y_label_[0, y_[0]] == 1 assert y_label_.shape == (mini_batch, 10, 1, 1) assert y_fill_[0, y_[0], :, :].sum() == (img_size / fraction) ** 2 assert y_fill_.sum() == (img_size / fraction) ** 2 * mini_batch z_, y_label_, y_fill_ = Variable(z_.cuda()), Variable(y_label_.cuda()), Variable(y_fill_.cuda()) G_result = G(z_, y_label_) D_result = D(G_result, y_fill_).squeeze() D_fake_loss = BCE_loss(D_result, y_fake_) D_fake_score = D_result.data.mean() D_train_loss = D_real_loss + D_fake_loss trg += 1 if D_train_loss > .1: trd += 1 D_train_loss.backward() D_optimizer.step() G.zero_grad() z_ = torch.randn((mini_batch, 100)).view(-1, 100, 1, 1) y_ = (torch.rand(mini_batch, 1) * num_labels).type(torch.LongTensor).squeeze() y_label_ = onehot[y_] y_fill_ = fill[y_] z_, y_label_, y_fill_ = Variable(z_.cuda()), Variable(y_label_.cuda()), Variable(y_fill_.cuda()) assert y_label_[0, y_[0]] == 1 assert y_label_.shape == (mini_batch, 10, 1, 1) assert y_fill_[0, y_[0], :, :].sum() == (img_size / fraction) ** 2 assert y_fill_.sum() == (img_size / fraction) ** 2 * mini_batch G_result = G(z_, y_label_) D_result = D(G_result, y_fill_).squeeze() G_train_loss = BCE_loss(D_result, y_real_) KL_fake_output = F.log_softmax(model(G_result)) errG_KL = F.kl_div(KL_fake_output, uniform_dist) * args.num_classes generator_loss = G_train_loss + args.beta * errG_KL generator_loss.backward() G_optimizer.step() pe[0] G_result = G(z_, y_label_) x(model(G_result)) KL_loss_fake = F.kl_div(KL_fake_output, uniform_dist) * args.num_classes total_loss = loss + args.beta * KL_loss_fake total_loss.backward() optimizer.step() if batch_idx % args.log_interval == 0: print( "Epoch {} , Descriminator loss {:.6f} Generator loss {:.6f} traingenerator {:.6f} traindiscriminator {:.6f}".format( epoch, D_train_loss, G_train_loss, trg, trd)) print('Classification Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, KL fake Loss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.data.item(), KL_loss_fake.data.item())) fake = G(fixed_noise.cuda(), fixed_label) vutils.save_image(fake.data, '%s/MNISTcDCgan_samples_epoch_%03d.png' % (args.outf, epoch), normalize=True) def test(epoch): model.eval() test_loss = 0 correct = 0 total = 0 for data, target in test_loader: total += data.size(0) if args.cuda: data, target = data.cuda(), target.cuda() output = F.log_softmax(model(data)) target = target.type( torch.LongTensor) if args.cuda: output = output.cuda() target = target.cuda() target = torch.squeeze(target) test_loss += F.nll_loss(output, target).data.item() pred = output.data.max(1)[1] correct += pred.eq(target.data).cpu().sum() test_loss = test_loss test_loss /= len(test_loader) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, total, 100. * correct / total)) for epoch in range(1, args.epochs + 1): train(epoch) test(epoch) if epoch in decreasing_lr: G_optimizer.param_groups[0]['lr'] *= args.droprate D_optimizer.param_groups[0]['lr'] *= args.droprate optimizer.param_groups[0]['lr'] *= args.droprate if epoch % 20 == 0: torch.save(G.state_dict(), '%s/netG_epoch_%d.pth' % (args.outf, epoch)) torch.save(D.state_dict(), '%s/netD_epoch_%d.pth' % (args.outf, epoch)) torch.save(model.state_dict(), '%s/model_epoch_%d.pth' % (args.outf, epoch))
true
true
f72cbd007d1006b7c1318b34026adba9042de0cd
5,497
py
Python
tb_rest_client/models/models_ce/page_data_ota_package_info.py
jernkuan/thingsboard-python-rest-client
3fb25272507494e6d494b27ca2380d3c543562e5
[ "Apache-2.0" ]
null
null
null
tb_rest_client/models/models_ce/page_data_ota_package_info.py
jernkuan/thingsboard-python-rest-client
3fb25272507494e6d494b27ca2380d3c543562e5
[ "Apache-2.0" ]
null
null
null
tb_rest_client/models/models_ce/page_data_ota_package_info.py
jernkuan/thingsboard-python-rest-client
3fb25272507494e6d494b27ca2380d3c543562e5
[ "Apache-2.0" ]
1
2021-11-26T11:24:56.000Z
2021-11-26T11:24:56.000Z
# coding: utf-8 """ ThingsBoard REST API For instructions how to authorize requests please visit <a href='http://thingsboard.io/docs/reference/rest-api/'>REST API documentation page</a>. # noqa: E501 OpenAPI spec version: 2.0 Contact: info@thingsboard.io Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class PageDataOtaPackageInfo(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'data': 'list[OtaPackageInfo]', 'has_next': 'bool', 'total_elements': 'int', 'total_pages': 'int' } attribute_map = { 'data': 'data', 'has_next': 'hasNext', 'total_elements': 'totalElements', 'total_pages': 'totalPages' } def __init__(self, data=None, has_next=None, total_elements=None, total_pages=None): # noqa: E501 """PageDataOtaPackageInfo - a model defined in Swagger""" # noqa: E501 self._data = None self._has_next = None self._total_elements = None self._total_pages = None self.discriminator = None if data is not None: self.data = data if has_next is not None: self.has_next = has_next if total_elements is not None: self.total_elements = total_elements if total_pages is not None: self.total_pages = total_pages @property def data(self): """Gets the data of this PageDataOtaPackageInfo. # noqa: E501 :return: The data of this PageDataOtaPackageInfo. # noqa: E501 :rtype: list[OtaPackageInfo] """ return self._data @data.setter def data(self, data): """Sets the data of this PageDataOtaPackageInfo. :param data: The data of this PageDataOtaPackageInfo. # noqa: E501 :type: list[OtaPackageInfo] """ self._data = data @property def has_next(self): """Gets the has_next of this PageDataOtaPackageInfo. # noqa: E501 :return: The has_next of this PageDataOtaPackageInfo. # noqa: E501 :rtype: bool """ return self._has_next @has_next.setter def has_next(self, has_next): """Sets the has_next of this PageDataOtaPackageInfo. :param has_next: The has_next of this PageDataOtaPackageInfo. # noqa: E501 :type: bool """ self._has_next = has_next @property def total_elements(self): """Gets the total_elements of this PageDataOtaPackageInfo. # noqa: E501 :return: The total_elements of this PageDataOtaPackageInfo. # noqa: E501 :rtype: int """ return self._total_elements @total_elements.setter def total_elements(self, total_elements): """Sets the total_elements of this PageDataOtaPackageInfo. :param total_elements: The total_elements of this PageDataOtaPackageInfo. # noqa: E501 :type: int """ self._total_elements = total_elements @property def total_pages(self): """Gets the total_pages of this PageDataOtaPackageInfo. # noqa: E501 :return: The total_pages of this PageDataOtaPackageInfo. # noqa: E501 :rtype: int """ return self._total_pages @total_pages.setter def total_pages(self, total_pages): """Sets the total_pages of this PageDataOtaPackageInfo. :param total_pages: The total_pages of this PageDataOtaPackageInfo. # noqa: E501 :type: int """ self._total_pages = total_pages def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(PageDataOtaPackageInfo, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, PageDataOtaPackageInfo): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
29.084656
163
0.593778
import pprint import re import six class PageDataOtaPackageInfo(object): swagger_types = { 'data': 'list[OtaPackageInfo]', 'has_next': 'bool', 'total_elements': 'int', 'total_pages': 'int' } attribute_map = { 'data': 'data', 'has_next': 'hasNext', 'total_elements': 'totalElements', 'total_pages': 'totalPages' } def __init__(self, data=None, has_next=None, total_elements=None, total_pages=None): self._data = None self._has_next = None self._total_elements = None self._total_pages = None self.discriminator = None if data is not None: self.data = data if has_next is not None: self.has_next = has_next if total_elements is not None: self.total_elements = total_elements if total_pages is not None: self.total_pages = total_pages @property def data(self): return self._data @data.setter def data(self, data): self._data = data @property def has_next(self): return self._has_next @has_next.setter def has_next(self, has_next): self._has_next = has_next @property def total_elements(self): return self._total_elements @total_elements.setter def total_elements(self, total_elements): self._total_elements = total_elements @property def total_pages(self): return self._total_pages @total_pages.setter def total_pages(self, total_pages): self._total_pages = total_pages def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(PageDataOtaPackageInfo, dict): for key, value in self.items(): result[key] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str() def __eq__(self, other): if not isinstance(other, PageDataOtaPackageInfo): return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
true
true
f72cbd8032bfba00a07e989b6b537df95ff4361b
8,128
py
Python
Chapter2/LFM.py
7125messi/rencommend_system_learning
4a8bcef241c4c0357cfbe4d1a9828b847974b69c
[ "Apache-2.0" ]
3
2019-10-10T15:49:42.000Z
2020-05-31T07:39:10.000Z
Chapter2/LFM.py
7125messi/rencommend_system_learning
4a8bcef241c4c0357cfbe4d1a9828b847974b69c
[ "Apache-2.0" ]
null
null
null
Chapter2/LFM.py
7125messi/rencommend_system_learning
4a8bcef241c4c0357cfbe4d1a9828b847974b69c
[ "Apache-2.0" ]
2
2019-09-18T07:59:48.000Z
2020-01-16T15:00:48.000Z
# 导入包 import random import math import numpy as np import time from tqdm import tqdm from tqdm import trange # 1 通用函数定义 ## 定义装饰器,监控运行时间 def timmer(func): def wrapper(*args, **kwargs): start_time = time.time() res = func(*args, **kwargs) stop_time = time.time() print('Func {},run time:{}'.format(func.__name__,stop_time - start_time)) return res return wrapper ## 数据处理相关 ### load data ### split data class Dataset(): def __init__(self,fp): self.data = self.loadData(fp) @timmer def loadData(self,fp): data = [] for l in open(fp): data.append(tuple(map(int, l.strip().split('::')[:2]))) return data @timmer def splitData(self, M, k, seed=1): ''' :params: data, 加载的所有(user, item)数据条目 :params: M, 划分的数目,最后需要取M折的平均 :params: k, 本次是第几次划分,k~[0, M) :params: seed, random的种子数,对于不同的k应设置成一样的 :return: train, test ''' train , test = [], [] random.seed(seed) for user, item in self.data: # 这里与书中的不一致,本人认为取M-1较为合理,因randint是左右都覆盖的 if random.randint(0, M-1) == k: test.append((user, item)) else: train.append((user, item)) # 处理成字典的形式,user->set(items) def convert_dict(data): data_dict = {} for user, item in data: if user not in data_dict: data_dict[user] = set() data_dict[user].add(item) data_dict = {k: list(data_dict[k]) for k in data_dict} return data_dict return convert_dict(train), convert_dict(test) ## 评价指标 ### Precision ### Recall ### Coverage ### Popularity(Novelty) class Metric(): def __init__(self, train, test, GetRecommendation): ''' :params: train, 训练数据 :params: test, 测试数据 :params: GetRecommendation, 为某个用户获取推荐物品的接口函数 ''' self.train = train self.test = test self.GetRecommendation = GetRecommendation self.recs = self.getRec() # 为test中的每个用户进行推荐 def getRec(self): recs = {} for user in self.test: rank = self.GetRecommendation(user) recs[user] = rank return recs # 定义精确率指标计算方式 def precision(self): all, hit = 0, 0 for user in self.test: test_items = set(self.test[user]) rank = self.recs[user] for item, score in rank: if item in test_items: hit += 1 all += len(rank) return round(hit / all * 100, 2) # 定义召回率指标计算方式 def recall(self): all, hit = 0, 0 for user in self.test: test_items = set(self.test[user]) rank = self.recs[user] for item, score in rank: if item in test_items: hit += 1 all += len(test_items) return round(hit / all * 100, 2) # 定义覆盖率指标计算方式 def coverage(self): all_item, recom_item = set(), set() for user in self.test: for item in self.train[user]: all_item.add(item) rank = self.recs[user] for item, score in rank: recom_item.add(item) return round(len(recom_item) / len(all_item) * 100, 2) # 定义新颖度指标计算方式 def popularity(self): # 计算物品的流行度 item_pop = {} for user in self.train: for item in self.train[user]: if item not in item_pop: item_pop[item] = 0 item_pop[item] += 1 num, pop = 0, 0 for user in self.test: rank = self.recs[user] for item, score in rank: # 取对数,防止因长尾问题带来的被流行物品所主导 pop += math.log(1 + item_pop[item]) num += 1 return round(pop / num, 6) def eval(self): metric = {'Precision': self.precision(), 'Recall': self.recall(), 'Coverage': self.coverage(), 'Popularity': self.popularity()} print('Metric:', metric) return metric # 2 LFM算法实现 def LFM(train,ratio,K,lr,step,lmbda,N): ''' :params: train, 训练数据 :params: ratio, 负采样的正负比例 :params: K, 隐语义个数 :params: lr, 初始学习率 :params: step, 迭代次数 :params: lmbda, 正则化系数 :params: N, 推荐TopN物品的个数 :return: GetRecommendation, 获取推荐结果的接口 ''' all_items = {} for user in train: for item in train[user]: if item not in all_items: all_items[item] = 0 all_items[item] += 1 all_items = list(all_items.items()) items = [x[0] for x in all_items] pops = [x[1] for x in all_items] # 负采样函数(按照流行度就行采样) def nSample(data,ratio): new_data = {} # 正样本 for user in data: if user not in new_data: new_data[user] = {} for item in data[user]: new_data[user][item] = 1 # 负样本 for user in new_data: seen = set(new_data[user]) pos_num = len(seen) item = np.random.choice(items, int(pos_num * ratio * 3), pops) item = [x for x in item if x not in seen][:int(pos_num * ratio)] new_data[user].update({x: 0 for x in item}) return new_data # 训练 P, Q = {}, {} for user in train: P[user] = np.random.random(K) for item in items: Q[item] = np.random.random(K) for s in trange(step): data = nSample(train, ratio) for user in data: for item in data[user]: eui = data[user][item] - (P[user] * Q[item]).sum() P[user] += lr * (Q[item] * eui - lmbda * P[user]) Q[item] += lr * (P[user] * eui - lmbda * Q[item]) lr *= 0.9 # 调整学习率 # 获取接口函数 def GetRecommendation(user): seen_items = set(train[user]) recs = {} for item in items: if item not in seen_items: recs[item] = (P[user] * Q[item]).sum() recs = list(sorted(recs.items(), key=lambda x: x[1], reverse=True))[:N] return recs return GetRecommendation # 3 LFM实验 ## M=8, N=10, ratio=[1, 2, 3, 5, 10, 20] class Experiment(): def __init__(self, M, N, ratio=1, K=100, lr=0.02, step=100, lmbda=0.01, fp='../dataset/ml-1m/ratings.dat'): ''' :params: M, 进行多少次实验 :params: N, TopN推荐物品的个数 :params: ratio, 正负样本比例 :params: K, 隐语义个数 :params: lr, 学习率 :params: step, 训练步数 :params: lmbda, 正则化系数 :params: fp, 数据文件路径 ''' self.M = M self.K = K self.N = N self.ratio = ratio self.lr = lr self.step = step self.lmbda = lmbda self.fp = fp self.alg = LFM # 定义单次实验 @timmer def worker(self, train, test): ''' :params: train, 训练数据集 :params: test, 测试数据集 :return: 各指标的值 ''' getRecommendation = self.alg(train, self.ratio, self.K, self.lr, self.step, self.lmbda, self.N) metric = Metric(train, test, getRecommendation) return metric.eval() # 多次实验取平均 @timmer def run(self): metrics = {'Precision': 0, 'Recall': 0, 'Coverage': 0, 'Popularity': 0} dataset = Dataset(self.fp) for ii in range(self.M): train, test = dataset.splitData(self.M, ii) print('Experiment {}:'.format(ii)) metric = self.worker(train, test) metrics = {k: metrics[k]+metric[k] for k in metrics} metrics = {k: metrics[k] / self.M for k in metrics} print('Average Result (M={}, N={}, ratio={}): {}'.format(\ self.M, self.N, self.ratio, metrics)) # LFM实验(运行时间较长,这里没贴实验结果) M, N = 8, 10 for r in [1, 2, 3, 5, 10, 20]: exp = Experiment(M, N, ratio=r) exp.run()
29.028571
93
0.506275
import random import math import numpy as np import time from tqdm import tqdm from tqdm import trange nc): def wrapper(*args, **kwargs): start_time = time.time() res = func(*args, **kwargs) stop_time = time.time() print('Func {},run time:{}'.format(func.__name__,stop_time - start_time)) return res return wrapper elf.data = self.loadData(fp) @timmer def loadData(self,fp): data = [] for l in open(fp): data.append(tuple(map(int, l.strip().split('::')[:2]))) return data @timmer def splitData(self, M, k, seed=1): train , test = [], [] random.seed(seed) for user, item in self.data: if random.randint(0, M-1) == k: test.append((user, item)) else: train.append((user, item)) def convert_dict(data): data_dict = {} for user, item in data: if user not in data_dict: data_dict[user] = set() data_dict[user].add(item) data_dict = {k: list(data_dict[k]) for k in data_dict} return data_dict return convert_dict(train), convert_dict(test) self.test = test self.GetRecommendation = GetRecommendation self.recs = self.getRec() def getRec(self): recs = {} for user in self.test: rank = self.GetRecommendation(user) recs[user] = rank return recs def precision(self): all, hit = 0, 0 for user in self.test: test_items = set(self.test[user]) rank = self.recs[user] for item, score in rank: if item in test_items: hit += 1 all += len(rank) return round(hit / all * 100, 2) def recall(self): all, hit = 0, 0 for user in self.test: test_items = set(self.test[user]) rank = self.recs[user] for item, score in rank: if item in test_items: hit += 1 all += len(test_items) return round(hit / all * 100, 2) def coverage(self): all_item, recom_item = set(), set() for user in self.test: for item in self.train[user]: all_item.add(item) rank = self.recs[user] for item, score in rank: recom_item.add(item) return round(len(recom_item) / len(all_item) * 100, 2) def popularity(self): item_pop = {} for user in self.train: for item in self.train[user]: if item not in item_pop: item_pop[item] = 0 item_pop[item] += 1 num, pop = 0, 0 for user in self.test: rank = self.recs[user] for item, score in rank: pop += math.log(1 + item_pop[item]) num += 1 return round(pop / num, 6) def eval(self): metric = {'Precision': self.precision(), 'Recall': self.recall(), 'Coverage': self.coverage(), 'Popularity': self.popularity()} print('Metric:', metric) return metric def LFM(train,ratio,K,lr,step,lmbda,N): all_items = {} for user in train: for item in train[user]: if item not in all_items: all_items[item] = 0 all_items[item] += 1 all_items = list(all_items.items()) items = [x[0] for x in all_items] pops = [x[1] for x in all_items] def nSample(data,ratio): new_data = {} for user in data: if user not in new_data: new_data[user] = {} for item in data[user]: new_data[user][item] = 1 for user in new_data: seen = set(new_data[user]) pos_num = len(seen) item = np.random.choice(items, int(pos_num * ratio * 3), pops) item = [x for x in item if x not in seen][:int(pos_num * ratio)] new_data[user].update({x: 0 for x in item}) return new_data P, Q = {}, {} for user in train: P[user] = np.random.random(K) for item in items: Q[item] = np.random.random(K) for s in trange(step): data = nSample(train, ratio) for user in data: for item in data[user]: eui = data[user][item] - (P[user] * Q[item]).sum() P[user] += lr * (Q[item] * eui - lmbda * P[user]) Q[item] += lr * (P[user] * eui - lmbda * Q[item]) lr *= 0.9 def GetRecommendation(user): seen_items = set(train[user]) recs = {} for item in items: if item not in seen_items: recs[item] = (P[user] * Q[item]).sum() recs = list(sorted(recs.items(), key=lambda x: x[1], reverse=True))[:N] return recs return GetRecommendation self, M, N, ratio=1, K=100, lr=0.02, step=100, lmbda=0.01, fp='../dataset/ml-1m/ratings.dat'): self.M = M self.K = K self.N = N self.ratio = ratio self.lr = lr self.step = step self.lmbda = lmbda self.fp = fp self.alg = LFM @timmer def worker(self, train, test): getRecommendation = self.alg(train, self.ratio, self.K, self.lr, self.step, self.lmbda, self.N) metric = Metric(train, test, getRecommendation) return metric.eval() @timmer def run(self): metrics = {'Precision': 0, 'Recall': 0, 'Coverage': 0, 'Popularity': 0} dataset = Dataset(self.fp) for ii in range(self.M): train, test = dataset.splitData(self.M, ii) print('Experiment {}:'.format(ii)) metric = self.worker(train, test) metrics = {k: metrics[k]+metric[k] for k in metrics} metrics = {k: metrics[k] / self.M for k in metrics} print('Average Result (M={}, N={}, ratio={}): {}'.format(\ self.M, self.N, self.ratio, metrics)) M, N = 8, 10 for r in [1, 2, 3, 5, 10, 20]: exp = Experiment(M, N, ratio=r) exp.run()
true
true
f72cbd82ce65ea7deeb9b12673a6fa17f65eaeaa
2,015
py
Python
intake_questgdal/base.py
Aquaveo/intake_questgdal
c11cd111a53b7270391c6923d0e252c4abbbc56b
[ "BSD-3-Clause" ]
null
null
null
intake_questgdal/base.py
Aquaveo/intake_questgdal
c11cd111a53b7270391c6923d0e252c4abbbc56b
[ "BSD-3-Clause" ]
1
2019-06-06T15:28:15.000Z
2019-06-06T15:28:15.000Z
intake_questgdal/base.py
Aquaveo/intake_questgdal
c11cd111a53b7270391c6923d0e252c4abbbc56b
[ "BSD-3-Clause" ]
null
null
null
from intake.source.base import DataSource, Schema import rasterio import xarray as xr import warnings # from . import __version__ class quest_gdal_base(DataSource): """Reads an HDF5 table Parameters ---------- path: str File to load. tablename: str Name of table to load. metadata: Arbitrary information to associate with this source. """ #version = __version__ version = '0.0.1' container = 'dataframe' partition_access = False path = '' # def _get_schema(self): # self._schema = Schema( # datashape=None, # dtype=None, # shape=None, # npartitions=1, # extra_metadata={} # ) # return self._schema def _get_schema(self): if self.path is not '': xarr = xr.open_rasterio(self.path) ds2 = xr.Dataset({'raster': xarr}) metadata = { 'dims': dict(ds2.dims), 'data_vars': {k: list(ds2[k].coords) for k in ds2.data_vars.keys()}, 'coords': tuple(ds2.coords.keys()), 'array': 'raster' } atts = ['transform', 'crs', 'res', 'is_tiled', 'nodatavals'] for att in atts: if att in xarr.attrs: metadata[att] = xarr.attrs[att] return Schema( datashape=None, dtype = str(xarr.dtype), shape=xarr.shape, npartitions=1, extra_metadata=metadata ) else: self._schema = Schema( datashape=None, dtype=None, shape=None, npartitions=1, extra_metadata={} ) return self._schema def _get_partition(self, _): return None def _close(self): pass def raster_data(self, path): return rasterio.open(path)
26.168831
72
0.4933
from intake.source.base import DataSource, Schema import rasterio import xarray as xr import warnings class quest_gdal_base(DataSource): version = '0.0.1' container = 'dataframe' partition_access = False path = '' def _get_schema(self): if self.path is not '': xarr = xr.open_rasterio(self.path) ds2 = xr.Dataset({'raster': xarr}) metadata = { 'dims': dict(ds2.dims), 'data_vars': {k: list(ds2[k].coords) for k in ds2.data_vars.keys()}, 'coords': tuple(ds2.coords.keys()), 'array': 'raster' } atts = ['transform', 'crs', 'res', 'is_tiled', 'nodatavals'] for att in atts: if att in xarr.attrs: metadata[att] = xarr.attrs[att] return Schema( datashape=None, dtype = str(xarr.dtype), shape=xarr.shape, npartitions=1, extra_metadata=metadata ) else: self._schema = Schema( datashape=None, dtype=None, shape=None, npartitions=1, extra_metadata={} ) return self._schema def _get_partition(self, _): return None def _close(self): pass def raster_data(self, path): return rasterio.open(path)
true
true
f72cbdde941379a53be16076b51cf17c429ca67d
6,353
py
Python
mooringlicensing/management/commands/approval_renewal_notices.py
jawaidm/mooringlicensing
b22e74209da8655c8ad3af99e00f36d17c8ef73f
[ "Apache-2.0" ]
null
null
null
mooringlicensing/management/commands/approval_renewal_notices.py
jawaidm/mooringlicensing
b22e74209da8655c8ad3af99e00f36d17c8ef73f
[ "Apache-2.0" ]
2
2021-03-05T06:48:11.000Z
2021-03-26T08:14:17.000Z
mooringlicensing/management/commands/approval_renewal_notices.py
jawaidm/mooringlicensing
b22e74209da8655c8ad3af99e00f36d17c8ef73f
[ "Apache-2.0" ]
2
2021-09-19T15:45:19.000Z
2021-10-05T05:07:41.000Z
from django.core.management.base import BaseCommand from django.utils import timezone from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db.models import Q from mooringlicensing.components.approvals.models import ( Approval, WaitingListAllocation, AnnualAdmissionPermit, AuthorisedUserPermit, MooringLicence, DcvPermit, ) from ledger.accounts.models import EmailUser from datetime import timedelta from mooringlicensing.components.proposals.email import send_approval_renewal_email_notification import logging from mooringlicensing.components.main.models import NumberOfDaysType, NumberOfDaysSetting from mooringlicensing.settings import ( CODE_DAYS_FOR_RENEWAL_WLA, CODE_DAYS_FOR_RENEWAL_AAP, CODE_DAYS_FOR_RENEWAL_AUP, CODE_DAYS_FOR_RENEWAL_ML, CODE_DAYS_FOR_RENEWAL_DCVP, ) logger = logging.getLogger(__name__) class Command(BaseCommand): help = 'Send Approval renewal notice when approval is due to expire in 30 days' def perform_per_type(self, number_of_days_code, approval_class, updates, errors): today = timezone.localtime(timezone.now()).date() # Retrieve the number of days before expiry date of the approvals to email days_type = NumberOfDaysType.objects.get(code=number_of_days_code) days_setting = NumberOfDaysSetting.get_setting_by_date(days_type, today) if not days_setting: # No number of days found raise ImproperlyConfigured("NumberOfDays: {} is not defined for the date: {}".format(days_type.name, today)) expiry_notification_date = today + timedelta(days=days_setting.number_of_days) logger.info('Running command {}'.format(__name__)) # Construct queries queries = Q() if number_of_days_code == CODE_DAYS_FOR_RENEWAL_DCVP: queries &= Q(end_date__lte=expiry_notification_date) queries &= Q(renewal_sent=False) queries &= Q(status__in=[DcvPermit.DCV_PERMIT_STATUS_CURRENT,]) else: queries &= Q(expiry_date__lte=expiry_notification_date) queries &= Q(renewal_sent=False) queries &= Q(replaced_by__isnull=True) queries &= Q(status__in=(Approval.APPROVAL_STATUS_CURRENT, Approval.APPROVAL_STATUS_SUSPENDED)) approvals = approval_class.objects.filter(queries) for a in approvals: try: if approval_class == DcvPermit: # send_approval_renewal_email_notification_dcvp(a) pass else: a.generate_renewal_doc() send_approval_renewal_email_notification(a) a.renewal_sent = True a.save() logger.info('Renewal notice sent for Approval {}'.format(a.id)) updates.append(a.lodgement_number) except Exception as e: err_msg = 'Error sending renewal notice for Approval {}'.format(a.lodgement_number) logger.error('{}\n{}'.format(err_msg, str(e))) errors.append(err_msg) def handle(self, *args, **options): try: user = EmailUser.objects.get(email=settings.CRON_EMAIL) except: user = EmailUser.objects.create(email=settings.CRON_EMAIL, password='') updates, errors = [], [] self.perform_per_type(CODE_DAYS_FOR_RENEWAL_WLA, WaitingListAllocation, updates, errors) self.perform_per_type(CODE_DAYS_FOR_RENEWAL_AAP, AnnualAdmissionPermit, updates, errors) self.perform_per_type(CODE_DAYS_FOR_RENEWAL_AUP, AuthorisedUserPermit, updates, errors) self.perform_per_type(CODE_DAYS_FOR_RENEWAL_ML, MooringLicence, updates, errors) # today = timezone.localtime(timezone.now()).date() # # # Retrieve the number of days before expiry date of the approvals to email # days_type = NumberOfDaysType.objects.get(code=CODE_DAYS_FOR_RENEWAL) # days_setting = NumberOfDaysSetting.get_setting_by_date(days_type, today) # if not days_setting: # # No number of days found # raise ImproperlyConfigured("NumberOfDays: {} is not defined for the date: {}".format(days_type.name, today)) # # expiry_notification_date = today + timedelta(days=days_setting.number_of_days) # # # Construct queries # queries = Q() # queries &= Q(expiry_date__lte=expiry_notification_date) # queries &= Q(renewal_sent=False) # queries &= Q(replaced_by__isnull=True) # queries &= Q(status__in=(Approval.APPROVAL_STATUS_CURRENT, Approval.APPROVAL_STATUS_SUSPENDED)) # # # For debug # # params = options.get('params') # # debug = True if params.get('debug', 'f').lower() in ['true', 't', 'yes', 'y'] else False # # approval_lodgement_number = params.get('approval_renewal_notices_lodgement_number', 'no-lodgement-number') # # if debug: # # queries = queries | Q(lodgement_number__iexact=approval_lodgement_number) # # logger.info('Running command {}'.format(__name__)) # # for a in Approval.objects.filter(**renewal_conditions): # for a in Approval.objects.filter(queries): # # if a.status == Approval.APPROVAL_STATUS_CURRENT or a.status == Approval.APPROVAL_STATUS_SUSPENDED: # try: # a.generate_renewal_doc() # send_approval_renewal_email_notification(a) # a.renewal_sent = True # a.save() # logger.info('Renewal notice sent for Approval {}'.format(a.id)) # updates.append(a.lodgement_number) # except Exception as e: # err_msg = 'Error sending renewal notice for Approval {}'.format(a.lodgement_number) # logger.error('{}\n{}'.format(err_msg, str(e))) # errors.append(err_msg) cmd_name = __name__.split('.')[-1].replace('_', ' ').upper() err_str = '<strong style="color: red;">Errors: {}</strong>'.format(len(errors)) if len(errors)>0 else '<strong style="color: green;">Errors: 0</strong>' msg = '<p>{} completed. {}. IDs updated: {}.</p>'.format(cmd_name, err_str, updates) logger.info(msg) print(msg) # will redirect to cron_tasks.log file, by the parent script
45.705036
160
0.668346
from django.core.management.base import BaseCommand from django.utils import timezone from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db.models import Q from mooringlicensing.components.approvals.models import ( Approval, WaitingListAllocation, AnnualAdmissionPermit, AuthorisedUserPermit, MooringLicence, DcvPermit, ) from ledger.accounts.models import EmailUser from datetime import timedelta from mooringlicensing.components.proposals.email import send_approval_renewal_email_notification import logging from mooringlicensing.components.main.models import NumberOfDaysType, NumberOfDaysSetting from mooringlicensing.settings import ( CODE_DAYS_FOR_RENEWAL_WLA, CODE_DAYS_FOR_RENEWAL_AAP, CODE_DAYS_FOR_RENEWAL_AUP, CODE_DAYS_FOR_RENEWAL_ML, CODE_DAYS_FOR_RENEWAL_DCVP, ) logger = logging.getLogger(__name__) class Command(BaseCommand): help = 'Send Approval renewal notice when approval is due to expire in 30 days' def perform_per_type(self, number_of_days_code, approval_class, updates, errors): today = timezone.localtime(timezone.now()).date() days_type = NumberOfDaysType.objects.get(code=number_of_days_code) days_setting = NumberOfDaysSetting.get_setting_by_date(days_type, today) if not days_setting: raise ImproperlyConfigured("NumberOfDays: {} is not defined for the date: {}".format(days_type.name, today)) expiry_notification_date = today + timedelta(days=days_setting.number_of_days) logger.info('Running command {}'.format(__name__)) queries = Q() if number_of_days_code == CODE_DAYS_FOR_RENEWAL_DCVP: queries &= Q(end_date__lte=expiry_notification_date) queries &= Q(renewal_sent=False) queries &= Q(status__in=[DcvPermit.DCV_PERMIT_STATUS_CURRENT,]) else: queries &= Q(expiry_date__lte=expiry_notification_date) queries &= Q(renewal_sent=False) queries &= Q(replaced_by__isnull=True) queries &= Q(status__in=(Approval.APPROVAL_STATUS_CURRENT, Approval.APPROVAL_STATUS_SUSPENDED)) approvals = approval_class.objects.filter(queries) for a in approvals: try: if approval_class == DcvPermit: pass else: a.generate_renewal_doc() send_approval_renewal_email_notification(a) a.renewal_sent = True a.save() logger.info('Renewal notice sent for Approval {}'.format(a.id)) updates.append(a.lodgement_number) except Exception as e: err_msg = 'Error sending renewal notice for Approval {}'.format(a.lodgement_number) logger.error('{}\n{}'.format(err_msg, str(e))) errors.append(err_msg) def handle(self, *args, **options): try: user = EmailUser.objects.get(email=settings.CRON_EMAIL) except: user = EmailUser.objects.create(email=settings.CRON_EMAIL, password='') updates, errors = [], [] self.perform_per_type(CODE_DAYS_FOR_RENEWAL_WLA, WaitingListAllocation, updates, errors) self.perform_per_type(CODE_DAYS_FOR_RENEWAL_AAP, AnnualAdmissionPermit, updates, errors) self.perform_per_type(CODE_DAYS_FOR_RENEWAL_AUP, AuthorisedUserPermit, updates, errors) self.perform_per_type(CODE_DAYS_FOR_RENEWAL_ML, MooringLicence, updates, errors)
true
true
f72cbe35893af2f1b2c363e8fe4e587be57b909c
6,321
py
Python
InterventionsMIP/main.py
haoxiangyang89/COVID_Staged_Alert
4c2cc5ef1d38c140875380a5f10a0fe1eaf8a47a
[ "MIT" ]
1
2021-06-24T19:27:01.000Z
2021-06-24T19:27:01.000Z
InterventionsMIP/main.py
haoxiangyang89/COVID_Staged_Alert
4c2cc5ef1d38c140875380a5f10a0fe1eaf8a47a
[ "MIT" ]
null
null
null
InterventionsMIP/main.py
haoxiangyang89/COVID_Staged_Alert
4c2cc5ef1d38c140875380a5f10a0fe1eaf8a47a
[ "MIT" ]
3
2021-12-15T13:32:25.000Z
2022-02-24T13:57:07.000Z
from InterventionsMIP import project_path, instances_path import multiprocessing as mp from threshold_policy import threshold_policy_search from interventions import Intervension from epi_params import EpiSetup, ParamDistribution from utils import parse_arguments from reporting.plotting import plot_stoch_simulations from instances import load_instance if __name__ == '__main__': # Parse arguments args = parse_arguments() # Parse city and get corresponding instance instance = load_instance(args.city, setup_file_name=args.f) # TODO Read command line args for n_proc for better integration with crunch n_proc = args.n_proc # TODO: pull out n_replicas_train and n_replicas_test to a config file n_replicas_train = args.train_reps n_replicas_test = args.test_reps # Create the pool (Note: pool needs to be created only once to run on a cluster) mp_pool = mp.Pool(n_proc) if n_proc > 1 else None for sc in [0]: for co in [0.95]: for base_line_train in [0.4]: for base_line_test in [0.4]: for const in ['test']: #[10 * i for i in range(0, 21)] + [215, 1000]: policy_class = 'step' instance_name = f'local_{instance.city}_SC{sc}_CO{co}_BLTrain{base_line_train}_BLTest_{base_line_test}_{policy_class}_{const}' print('\n============================================') print(instance_name) #TODO: This list should be longe to include all possible transmission reduction values # that might come in the instance file interventions_train = [ Intervension(0, 0, 0, instance.epi, instance.N), Intervension(1, 0, 0, instance.epi, instance.N), Intervension(0, 0, base_line_train, instance.epi, instance.N), Intervension(1, 0, base_line_train, instance.epi, instance.N), Intervension(1, 0, 0.9, instance.epi, instance.N), Intervension(0, co, base_line_train, instance.epi, instance.N), Intervension(1, co, base_line_train, instance.epi, instance.N), Intervension(1, co, 0.9, instance.epi, instance.N), Intervension(1, 0, 0.95, instance.epi, instance.N), Intervension(0, 0, 0.95, instance.epi, instance.N) ] interventions_test = [ Intervension(0, 0, 0, instance.epi, instance.N), Intervension(1, 0, 0, instance.epi, instance.N), Intervension(0, 0, base_line_test, instance.epi, instance.N), Intervension(1, 0, base_line_test, instance.epi, instance.N), Intervension(1, 0, 0.9, instance.epi, instance.N), Intervension(0, co, base_line_test, instance.epi, instance.N), Intervension(1, co, base_line_test, instance.epi, instance.N), Intervension(1, co, 0.9, instance.epi, instance.N), Intervension(1, 0, 0.95, instance.epi, instance.N), Intervension(0, 0, 0.95, instance.epi, instance.N) ] sd_levels_train = {'H': 0.9, 'L': base_line_train} sd_levels_test = {'H': 0.9, 'L': base_line_test} best_policy_replicas, policy_params = threshold_policy_search(instance, interventions_train, interventions_test, sd_levels_train, sd_levels_test, cocooning=co, school_closure=sc, mp_pool=mp_pool, n_replicas_train=n_replicas_train, n_replicas_test=n_replicas_test, instance_name=instance_name, policy={ 'class': policy_class, 'vals': [120, 216, 9] }, policy_class=policy_class) n_replicas = len(best_policy_replicas) plot_stoch_simulations( instance_name, best_policy_replicas, ['sim'] * n_replicas, plot_left_axis=['IH'], plot_right_axis=[], T=instance.T, #437, hosp_beds=instance.hosp_beds, population=instance.N.sum(), interventions=interventions_test, calendar=instance.cal, policy_params=policy_params, plot_triggers=True, plot_legend=True, show=True, align_axes=True, n_replicas=5, BL=base_line_test)
64.5
150
0.424933
from InterventionsMIP import project_path, instances_path import multiprocessing as mp from threshold_policy import threshold_policy_search from interventions import Intervension from epi_params import EpiSetup, ParamDistribution from utils import parse_arguments from reporting.plotting import plot_stoch_simulations from instances import load_instance if __name__ == '__main__': args = parse_arguments() instance = load_instance(args.city, setup_file_name=args.f) n_proc = args.n_proc n_replicas_train = args.train_reps n_replicas_test = args.test_reps mp_pool = mp.Pool(n_proc) if n_proc > 1 else None for sc in [0]: for co in [0.95]: for base_line_train in [0.4]: for base_line_test in [0.4]: for const in ['test']: policy_class = 'step' instance_name = f'local_{instance.city}_SC{sc}_CO{co}_BLTrain{base_line_train}_BLTest_{base_line_test}_{policy_class}_{const}' print('\n============================================') print(instance_name) interventions_train = [ Intervension(0, 0, 0, instance.epi, instance.N), Intervension(1, 0, 0, instance.epi, instance.N), Intervension(0, 0, base_line_train, instance.epi, instance.N), Intervension(1, 0, base_line_train, instance.epi, instance.N), Intervension(1, 0, 0.9, instance.epi, instance.N), Intervension(0, co, base_line_train, instance.epi, instance.N), Intervension(1, co, base_line_train, instance.epi, instance.N), Intervension(1, co, 0.9, instance.epi, instance.N), Intervension(1, 0, 0.95, instance.epi, instance.N), Intervension(0, 0, 0.95, instance.epi, instance.N) ] interventions_test = [ Intervension(0, 0, 0, instance.epi, instance.N), Intervension(1, 0, 0, instance.epi, instance.N), Intervension(0, 0, base_line_test, instance.epi, instance.N), Intervension(1, 0, base_line_test, instance.epi, instance.N), Intervension(1, 0, 0.9, instance.epi, instance.N), Intervension(0, co, base_line_test, instance.epi, instance.N), Intervension(1, co, base_line_test, instance.epi, instance.N), Intervension(1, co, 0.9, instance.epi, instance.N), Intervension(1, 0, 0.95, instance.epi, instance.N), Intervension(0, 0, 0.95, instance.epi, instance.N) ] sd_levels_train = {'H': 0.9, 'L': base_line_train} sd_levels_test = {'H': 0.9, 'L': base_line_test} best_policy_replicas, policy_params = threshold_policy_search(instance, interventions_train, interventions_test, sd_levels_train, sd_levels_test, cocooning=co, school_closure=sc, mp_pool=mp_pool, n_replicas_train=n_replicas_train, n_replicas_test=n_replicas_test, instance_name=instance_name, policy={ 'class': policy_class, 'vals': [120, 216, 9] }, policy_class=policy_class) n_replicas = len(best_policy_replicas) plot_stoch_simulations( instance_name, best_policy_replicas, ['sim'] * n_replicas, plot_left_axis=['IH'], plot_right_axis=[], T=instance.T, hosp_beds=instance.hosp_beds, population=instance.N.sum(), interventions=interventions_test, calendar=instance.cal, policy_params=policy_params, plot_triggers=True, plot_legend=True, show=True, align_axes=True, n_replicas=5, BL=base_line_test)
true
true
f72cbe73762b18771ed1651cd35031464722fae9
19,152
py
Python
official/nlp/transformer/transformer_main.py
873040/Abhishek
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
[ "Apache-2.0" ]
4
2020-03-13T14:01:32.000Z
2021-05-31T17:17:32.000Z
official/nlp/transformer/transformer_main.py
873040/Abhishek
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
[ "Apache-2.0" ]
10
2019-12-28T21:31:19.000Z
2020-04-12T20:01:58.000Z
official/nlp/transformer/transformer_main.py
873040/Abhishek
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
[ "Apache-2.0" ]
8
2020-04-12T04:30:33.000Z
2021-09-17T20:54:44.000Z
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Train and evaluate the Transformer model. See README for description of setting the training schedule and evaluating the BLEU score. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tempfile from absl import app from absl import flags from absl import logging import tensorflow as tf from official.modeling import performance from official.nlp.transformer import compute_bleu from official.nlp.transformer import data_pipeline from official.nlp.transformer import metrics from official.nlp.transformer import misc from official.nlp.transformer import optimizer from official.nlp.transformer import transformer from official.nlp.transformer import translate from official.nlp.transformer.utils import tokenizer from official.utils.flags import core as flags_core from official.utils.logs import logger from official.utils.misc import distribution_utils from official.utils.misc import keras_utils INF = int(1e9) BLEU_DIR = "bleu" _SINGLE_SAMPLE = 1 def translate_and_compute_bleu(model, params, subtokenizer, bleu_source, bleu_ref, distribution_strategy=None): """Translate file and report the cased and uncased bleu scores. Args: model: A Keras model, used to generate the translations. params: A dictionary, containing the translation related parameters. subtokenizer: A subtokenizer object, used for encoding and decoding source and translated lines. bleu_source: A file containing source sentences for translation. bleu_ref: A file containing the reference for the translated sentences. distribution_strategy: A platform distribution strategy, used for TPU based translation. Returns: uncased_score: A float, the case insensitive BLEU score. cased_score: A float, the case sensitive BLEU score. """ # Create temporary file to store translation. tmp = tempfile.NamedTemporaryFile(delete=False) tmp_filename = tmp.name translate.translate_file( model, params, subtokenizer, bleu_source, output_file=tmp_filename, print_all_translations=False, distribution_strategy=distribution_strategy) # Compute uncased and cased bleu scores. uncased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, False) cased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, True) os.remove(tmp_filename) return uncased_score, cased_score def evaluate_and_log_bleu(model, params, bleu_source, bleu_ref, vocab_file, distribution_strategy=None): """Calculate and record the BLEU score. Args: model: A Keras model, used to generate the translations. params: A dictionary, containing the translation related parameters. bleu_source: A file containing source sentences for translation. bleu_ref: A file containing the reference for the translated sentences. vocab_file: A file containing the vocabulary for translation. distribution_strategy: A platform distribution strategy, used for TPU based translation. Returns: uncased_score: A float, the case insensitive BLEU score. cased_score: A float, the case sensitive BLEU score. """ subtokenizer = tokenizer.Subtokenizer(vocab_file) uncased_score, cased_score = translate_and_compute_bleu( model, params, subtokenizer, bleu_source, bleu_ref, distribution_strategy) logging.info("Bleu score (uncased): %s", uncased_score) logging.info("Bleu score (cased): %s", cased_score) return uncased_score, cased_score class TransformerTask(object): """Main entry of Transformer model.""" def __init__(self, flags_obj): """Init function of TransformerMain. Args: flags_obj: Object containing parsed flag values, i.e., FLAGS. Raises: ValueError: if not using static batch for input data on TPU. """ self.flags_obj = flags_obj self.predict_model = None # Add flag-defined parameters to params object num_gpus = flags_core.get_num_gpus(flags_obj) self.params = params = misc.get_model_params(flags_obj.param_set, num_gpus) params["num_gpus"] = num_gpus params["use_ctl"] = flags_obj.use_ctl params["data_dir"] = flags_obj.data_dir params["model_dir"] = flags_obj.model_dir params["static_batch"] = flags_obj.static_batch params["max_length"] = flags_obj.max_length params["decode_batch_size"] = flags_obj.decode_batch_size params["decode_max_length"] = flags_obj.decode_max_length params["padded_decode"] = flags_obj.padded_decode params["num_parallel_calls"] = ( flags_obj.num_parallel_calls or tf.data.experimental.AUTOTUNE) params["use_synthetic_data"] = flags_obj.use_synthetic_data params["batch_size"] = flags_obj.batch_size or params["default_batch_size"] params["repeat_dataset"] = None params["dtype"] = flags_core.get_tf_dtype(flags_obj) params["enable_tensorboard"] = flags_obj.enable_tensorboard params["enable_metrics_in_training"] = flags_obj.enable_metrics_in_training params["steps_between_evals"] = flags_obj.steps_between_evals params["enable_checkpointing"] = flags_obj.enable_checkpointing self.distribution_strategy = distribution_utils.get_distribution_strategy( distribution_strategy=flags_obj.distribution_strategy, num_gpus=num_gpus, all_reduce_alg=flags_obj.all_reduce_alg, num_packs=flags_obj.num_packs, tpu_address=flags_obj.tpu or "") if self.use_tpu: params["num_replicas"] = self.distribution_strategy.num_replicas_in_sync if not params["static_batch"]: raise ValueError("TPU requires static batch for input data.") else: logging.info("Running transformer with num_gpus = %d", num_gpus) if self.distribution_strategy: logging.info("For training, using distribution strategy: %s", self.distribution_strategy) else: logging.info("Not using any distribution strategy.") performance.set_mixed_precision_policy( params["dtype"], flags_core.get_loss_scale(flags_obj, default_for_fp16="dynamic")) @property def use_tpu(self): if self.distribution_strategy: return isinstance(self.distribution_strategy, tf.distribute.experimental.TPUStrategy) return False def train(self): """Trains the model.""" params = self.params flags_obj = self.flags_obj # Sets config options. keras_utils.set_session_config(enable_xla=flags_obj.enable_xla) _ensure_dir(flags_obj.model_dir) with distribution_utils.get_strategy_scope(self.distribution_strategy): model = transformer.create_model(params, is_train=True) opt = self._create_optimizer() current_step = 0 checkpoint = tf.train.Checkpoint(model=model, optimizer=opt) latest_checkpoint = tf.train.latest_checkpoint(flags_obj.model_dir) if latest_checkpoint: checkpoint.restore(latest_checkpoint) logging.info("Loaded checkpoint %s", latest_checkpoint) current_step = opt.iterations.numpy() if params["use_ctl"]: train_loss_metric = tf.keras.metrics.Mean( "training_loss", dtype=tf.float32) if params["enable_tensorboard"]: summary_writer = tf.compat.v2.summary.create_file_writer( flags_obj.model_dir) else: summary_writer = tf.compat.v2.summary.create_noop_writer() train_metrics = [train_loss_metric] if params["enable_metrics_in_training"]: train_metrics = train_metrics + model.metrics else: model.compile(opt) model.summary() if self.use_tpu: # Different from experimental_distribute_dataset, # experimental_distribute_datasets_from_function requires # per-replica/local batch size. params["batch_size"] /= self.distribution_strategy.num_replicas_in_sync train_ds = ( self.distribution_strategy .experimental_distribute_datasets_from_function( lambda ctx: data_pipeline.train_input_fn(params, ctx))) else: train_ds = data_pipeline.train_input_fn(params) map_data_fn = data_pipeline.map_data_for_transformer_fn train_ds = train_ds.map( map_data_fn, num_parallel_calls=params["num_parallel_calls"]) if params["use_ctl"]: train_ds_iterator = iter(train_ds) callbacks = self._create_callbacks(flags_obj.model_dir, 0, params) # Only TimeHistory callback is supported for CTL if params["use_ctl"]: callbacks = [cb for cb in callbacks if isinstance(cb, keras_utils.TimeHistory)] # TODO(b/139418525): Refactor the custom training loop logic. @tf.function def train_steps(iterator, steps): """Training steps function for TPU runs. Args: iterator: The input iterator of the training dataset. steps: An integer, the number of training steps. Returns: A float, the loss value. """ def _step_fn(inputs): """Per-replica step function.""" inputs, targets = inputs with tf.GradientTape() as tape: logits = model([inputs, targets], training=True) loss = metrics.transformer_loss(logits, targets, params["label_smoothing"], params["vocab_size"]) # Scales the loss, which results in using the average loss across all # of the replicas for backprop. scaled_loss = loss / self.distribution_strategy.num_replicas_in_sync # De-dupes variables due to keras tracking issues. tvars = list({id(v): v for v in model.trainable_variables}.values()) grads = tape.gradient(scaled_loss, tvars) opt.apply_gradients(zip(grads, tvars)) # For reporting, the metric takes the mean of losses. train_loss_metric.update_state(loss) for _ in tf.range(steps): train_loss_metric.reset_states() self.distribution_strategy.run( _step_fn, args=(next(iterator),)) cased_score, uncased_score = None, None cased_score_history, uncased_score_history = [], [] while current_step < flags_obj.train_steps: remaining_steps = flags_obj.train_steps - current_step train_steps_per_eval = ( remaining_steps if remaining_steps < flags_obj.steps_between_evals else flags_obj.steps_between_evals) current_iteration = current_step // flags_obj.steps_between_evals logging.info( "Start train iteration at global step:{}".format(current_step)) history = None if params["use_ctl"]: if not self.use_tpu: raise NotImplementedError( "Custom training loop on GPUs is not implemented.") # Runs training steps. with summary_writer.as_default(): for cb in callbacks: cb.on_epoch_begin(current_iteration) cb.on_batch_begin(0) train_steps( train_ds_iterator, tf.convert_to_tensor(train_steps_per_eval, dtype=tf.int32)) current_step += train_steps_per_eval train_loss = train_loss_metric.result().numpy().astype(float) logging.info("Train Step: %d/%d / loss = %s", current_step, flags_obj.train_steps, train_loss) for cb in callbacks: cb.on_batch_end(train_steps_per_eval - 1) cb.on_epoch_end(current_iteration) if params["enable_tensorboard"]: for metric_obj in train_metrics: tf.compat.v2.summary.scalar(metric_obj.name, metric_obj.result(), current_step) summary_writer.flush() for cb in callbacks: cb.on_train_end() if flags_obj.enable_checkpointing: # avoid check-pointing when running for benchmarking. checkpoint_name = checkpoint.save( os.path.join(flags_obj.model_dir, "ctl_step_{}.ckpt".format(current_step))) logging.info("Saved checkpoint to %s", checkpoint_name) else: if self.use_tpu: raise NotImplementedError( "Keras model.fit on TPUs is not implemented.") history = model.fit( train_ds, initial_epoch=current_iteration, epochs=current_iteration + 1, steps_per_epoch=train_steps_per_eval, callbacks=callbacks, # If TimeHistory is enabled, progress bar would be messy. Increase # the verbose level to get rid of it. verbose=(2 if flags_obj.enable_time_history else 1)) current_step += train_steps_per_eval logging.info("Train history: {}".format(history.history)) logging.info("End train iteration at global step:{}".format(current_step)) if (flags_obj.bleu_source and flags_obj.bleu_ref): uncased_score, cased_score = self.eval() cased_score_history.append([current_iteration + 1, cased_score]) uncased_score_history.append([current_iteration + 1, uncased_score]) stats = ({ "loss": train_loss } if history is None else misc.build_stats(history, callbacks)) if uncased_score and cased_score: stats["bleu_uncased"] = uncased_score stats["bleu_cased"] = cased_score stats["bleu_uncased_history"] = uncased_score_history stats["bleu_cased_history"] = cased_score_history return stats def eval(self): """Evaluates the model.""" distribution_strategy = self.distribution_strategy if self.use_tpu else None # We only want to create the model under DS scope for TPU case. # When 'distribution_strategy' is None, a no-op DummyContextManager will # be used. with distribution_utils.get_strategy_scope(distribution_strategy): if not self.predict_model: self.predict_model = transformer.create_model(self.params, False) self._load_weights_if_possible( self.predict_model, tf.train.latest_checkpoint(self.flags_obj.model_dir)) self.predict_model.summary() return evaluate_and_log_bleu( self.predict_model, self.params, self.flags_obj.bleu_source, self.flags_obj.bleu_ref, self.flags_obj.vocab_file, distribution_strategy) def predict(self): """Predicts result from the model.""" params = self.params flags_obj = self.flags_obj with tf.name_scope("model"): model = transformer.create_model(params, is_train=False) self._load_weights_if_possible( model, tf.train.latest_checkpoint(self.flags_obj.model_dir)) model.summary() subtokenizer = tokenizer.Subtokenizer(flags_obj.vocab_file) ds = data_pipeline.eval_input_fn(params) ds = ds.map(lambda x, y: x).take(_SINGLE_SAMPLE) ret = model.predict(ds) val_outputs, _ = ret length = len(val_outputs) for i in range(length): translate.translate_from_input(val_outputs[i], subtokenizer) def _create_callbacks(self, cur_log_dir, init_steps, params): """Creates a list of callbacks.""" sfunc = optimizer.LearningRateFn(params["learning_rate"], params["hidden_size"], params["learning_rate_warmup_steps"]) scheduler_callback = optimizer.LearningRateScheduler(sfunc, init_steps) callbacks = misc.get_callbacks(params["steps_between_evals"]) callbacks.append(scheduler_callback) if params["enable_checkpointing"]: ckpt_full_path = os.path.join(cur_log_dir, "cp-{epoch:04d}.ckpt") callbacks.append( tf.keras.callbacks.ModelCheckpoint( ckpt_full_path, save_weights_only=True)) return callbacks def _load_weights_if_possible(self, model, init_weight_path=None): """Loads model weights when it is provided.""" if init_weight_path: logging.info("Load weights: {}".format(init_weight_path)) # TODO(b/139414977): Having the same variable restoring method for both # TPU and GPU. if self.use_tpu: checkpoint = tf.train.Checkpoint( model=model, optimizer=self._create_optimizer()) checkpoint.restore(init_weight_path) else: model.load_weights(init_weight_path) else: logging.info("Weights not loaded from path:{}".format(init_weight_path)) def _create_optimizer(self): """Creates optimizer.""" params = self.params lr_schedule = optimizer.LearningRateSchedule( params["learning_rate"], params["hidden_size"], params["learning_rate_warmup_steps"]) opt = tf.keras.optimizers.Adam( lr_schedule if self.use_tpu else params["learning_rate"], params["optimizer_adam_beta1"], params["optimizer_adam_beta2"], epsilon=params["optimizer_adam_epsilon"]) opt = performance.configure_optimizer( opt, use_float16=params["dtype"] == tf.float16, use_graph_rewrite=self.flags_obj.fp16_implementation == "graph_rewrite", loss_scale=flags_core.get_loss_scale( self.flags_obj, default_for_fp16="dynamic")) return opt def _ensure_dir(log_dir): """Makes log dir if not existed.""" if not tf.io.gfile.exists(log_dir): tf.io.gfile.makedirs(log_dir) def main(_): flags_obj = flags.FLAGS with logger.benchmark_context(flags_obj): task = TransformerTask(flags_obj) # Execute flag override logic for better model performance if flags_obj.tf_gpu_thread_mode: keras_utils.set_gpu_thread_mode_and_count( per_gpu_thread_count=flags_obj.per_gpu_thread_count, gpu_thread_mode=flags_obj.tf_gpu_thread_mode, num_gpus=flags_obj.num_gpus, datasets_num_private_threads=flags_obj.datasets_num_private_threads) if flags_obj.mode == "train": task.train() elif flags_obj.mode == "predict": task.predict() elif flags_obj.mode == "eval": task.eval() else: raise ValueError("Invalid mode {}".format(flags_obj.mode)) if __name__ == "__main__": logging.set_verbosity(logging.INFO) misc.define_transformer_flags() app.run(main)
38.457831
80
0.688753
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tempfile from absl import app from absl import flags from absl import logging import tensorflow as tf from official.modeling import performance from official.nlp.transformer import compute_bleu from official.nlp.transformer import data_pipeline from official.nlp.transformer import metrics from official.nlp.transformer import misc from official.nlp.transformer import optimizer from official.nlp.transformer import transformer from official.nlp.transformer import translate from official.nlp.transformer.utils import tokenizer from official.utils.flags import core as flags_core from official.utils.logs import logger from official.utils.misc import distribution_utils from official.utils.misc import keras_utils INF = int(1e9) BLEU_DIR = "bleu" _SINGLE_SAMPLE = 1 def translate_and_compute_bleu(model, params, subtokenizer, bleu_source, bleu_ref, distribution_strategy=None): tmp = tempfile.NamedTemporaryFile(delete=False) tmp_filename = tmp.name translate.translate_file( model, params, subtokenizer, bleu_source, output_file=tmp_filename, print_all_translations=False, distribution_strategy=distribution_strategy) uncased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, False) cased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, True) os.remove(tmp_filename) return uncased_score, cased_score def evaluate_and_log_bleu(model, params, bleu_source, bleu_ref, vocab_file, distribution_strategy=None): subtokenizer = tokenizer.Subtokenizer(vocab_file) uncased_score, cased_score = translate_and_compute_bleu( model, params, subtokenizer, bleu_source, bleu_ref, distribution_strategy) logging.info("Bleu score (uncased): %s", uncased_score) logging.info("Bleu score (cased): %s", cased_score) return uncased_score, cased_score class TransformerTask(object): def __init__(self, flags_obj): self.flags_obj = flags_obj self.predict_model = None num_gpus = flags_core.get_num_gpus(flags_obj) self.params = params = misc.get_model_params(flags_obj.param_set, num_gpus) params["num_gpus"] = num_gpus params["use_ctl"] = flags_obj.use_ctl params["data_dir"] = flags_obj.data_dir params["model_dir"] = flags_obj.model_dir params["static_batch"] = flags_obj.static_batch params["max_length"] = flags_obj.max_length params["decode_batch_size"] = flags_obj.decode_batch_size params["decode_max_length"] = flags_obj.decode_max_length params["padded_decode"] = flags_obj.padded_decode params["num_parallel_calls"] = ( flags_obj.num_parallel_calls or tf.data.experimental.AUTOTUNE) params["use_synthetic_data"] = flags_obj.use_synthetic_data params["batch_size"] = flags_obj.batch_size or params["default_batch_size"] params["repeat_dataset"] = None params["dtype"] = flags_core.get_tf_dtype(flags_obj) params["enable_tensorboard"] = flags_obj.enable_tensorboard params["enable_metrics_in_training"] = flags_obj.enable_metrics_in_training params["steps_between_evals"] = flags_obj.steps_between_evals params["enable_checkpointing"] = flags_obj.enable_checkpointing self.distribution_strategy = distribution_utils.get_distribution_strategy( distribution_strategy=flags_obj.distribution_strategy, num_gpus=num_gpus, all_reduce_alg=flags_obj.all_reduce_alg, num_packs=flags_obj.num_packs, tpu_address=flags_obj.tpu or "") if self.use_tpu: params["num_replicas"] = self.distribution_strategy.num_replicas_in_sync if not params["static_batch"]: raise ValueError("TPU requires static batch for input data.") else: logging.info("Running transformer with num_gpus = %d", num_gpus) if self.distribution_strategy: logging.info("For training, using distribution strategy: %s", self.distribution_strategy) else: logging.info("Not using any distribution strategy.") performance.set_mixed_precision_policy( params["dtype"], flags_core.get_loss_scale(flags_obj, default_for_fp16="dynamic")) @property def use_tpu(self): if self.distribution_strategy: return isinstance(self.distribution_strategy, tf.distribute.experimental.TPUStrategy) return False def train(self): params = self.params flags_obj = self.flags_obj keras_utils.set_session_config(enable_xla=flags_obj.enable_xla) _ensure_dir(flags_obj.model_dir) with distribution_utils.get_strategy_scope(self.distribution_strategy): model = transformer.create_model(params, is_train=True) opt = self._create_optimizer() current_step = 0 checkpoint = tf.train.Checkpoint(model=model, optimizer=opt) latest_checkpoint = tf.train.latest_checkpoint(flags_obj.model_dir) if latest_checkpoint: checkpoint.restore(latest_checkpoint) logging.info("Loaded checkpoint %s", latest_checkpoint) current_step = opt.iterations.numpy() if params["use_ctl"]: train_loss_metric = tf.keras.metrics.Mean( "training_loss", dtype=tf.float32) if params["enable_tensorboard"]: summary_writer = tf.compat.v2.summary.create_file_writer( flags_obj.model_dir) else: summary_writer = tf.compat.v2.summary.create_noop_writer() train_metrics = [train_loss_metric] if params["enable_metrics_in_training"]: train_metrics = train_metrics + model.metrics else: model.compile(opt) model.summary() if self.use_tpu: params["batch_size"] /= self.distribution_strategy.num_replicas_in_sync train_ds = ( self.distribution_strategy .experimental_distribute_datasets_from_function( lambda ctx: data_pipeline.train_input_fn(params, ctx))) else: train_ds = data_pipeline.train_input_fn(params) map_data_fn = data_pipeline.map_data_for_transformer_fn train_ds = train_ds.map( map_data_fn, num_parallel_calls=params["num_parallel_calls"]) if params["use_ctl"]: train_ds_iterator = iter(train_ds) callbacks = self._create_callbacks(flags_obj.model_dir, 0, params) if params["use_ctl"]: callbacks = [cb for cb in callbacks if isinstance(cb, keras_utils.TimeHistory)] @tf.function def train_steps(iterator, steps): def _step_fn(inputs): inputs, targets = inputs with tf.GradientTape() as tape: logits = model([inputs, targets], training=True) loss = metrics.transformer_loss(logits, targets, params["label_smoothing"], params["vocab_size"]) scaled_loss = loss / self.distribution_strategy.num_replicas_in_sync tvars = list({id(v): v for v in model.trainable_variables}.values()) grads = tape.gradient(scaled_loss, tvars) opt.apply_gradients(zip(grads, tvars)) train_loss_metric.update_state(loss) for _ in tf.range(steps): train_loss_metric.reset_states() self.distribution_strategy.run( _step_fn, args=(next(iterator),)) cased_score, uncased_score = None, None cased_score_history, uncased_score_history = [], [] while current_step < flags_obj.train_steps: remaining_steps = flags_obj.train_steps - current_step train_steps_per_eval = ( remaining_steps if remaining_steps < flags_obj.steps_between_evals else flags_obj.steps_between_evals) current_iteration = current_step // flags_obj.steps_between_evals logging.info( "Start train iteration at global step:{}".format(current_step)) history = None if params["use_ctl"]: if not self.use_tpu: raise NotImplementedError( "Custom training loop on GPUs is not implemented.") with summary_writer.as_default(): for cb in callbacks: cb.on_epoch_begin(current_iteration) cb.on_batch_begin(0) train_steps( train_ds_iterator, tf.convert_to_tensor(train_steps_per_eval, dtype=tf.int32)) current_step += train_steps_per_eval train_loss = train_loss_metric.result().numpy().astype(float) logging.info("Train Step: %d/%d / loss = %s", current_step, flags_obj.train_steps, train_loss) for cb in callbacks: cb.on_batch_end(train_steps_per_eval - 1) cb.on_epoch_end(current_iteration) if params["enable_tensorboard"]: for metric_obj in train_metrics: tf.compat.v2.summary.scalar(metric_obj.name, metric_obj.result(), current_step) summary_writer.flush() for cb in callbacks: cb.on_train_end() if flags_obj.enable_checkpointing: checkpoint_name = checkpoint.save( os.path.join(flags_obj.model_dir, "ctl_step_{}.ckpt".format(current_step))) logging.info("Saved checkpoint to %s", checkpoint_name) else: if self.use_tpu: raise NotImplementedError( "Keras model.fit on TPUs is not implemented.") history = model.fit( train_ds, initial_epoch=current_iteration, epochs=current_iteration + 1, steps_per_epoch=train_steps_per_eval, callbacks=callbacks, verbose=(2 if flags_obj.enable_time_history else 1)) current_step += train_steps_per_eval logging.info("Train history: {}".format(history.history)) logging.info("End train iteration at global step:{}".format(current_step)) if (flags_obj.bleu_source and flags_obj.bleu_ref): uncased_score, cased_score = self.eval() cased_score_history.append([current_iteration + 1, cased_score]) uncased_score_history.append([current_iteration + 1, uncased_score]) stats = ({ "loss": train_loss } if history is None else misc.build_stats(history, callbacks)) if uncased_score and cased_score: stats["bleu_uncased"] = uncased_score stats["bleu_cased"] = cased_score stats["bleu_uncased_history"] = uncased_score_history stats["bleu_cased_history"] = cased_score_history return stats def eval(self): distribution_strategy = self.distribution_strategy if self.use_tpu else None with distribution_utils.get_strategy_scope(distribution_strategy): if not self.predict_model: self.predict_model = transformer.create_model(self.params, False) self._load_weights_if_possible( self.predict_model, tf.train.latest_checkpoint(self.flags_obj.model_dir)) self.predict_model.summary() return evaluate_and_log_bleu( self.predict_model, self.params, self.flags_obj.bleu_source, self.flags_obj.bleu_ref, self.flags_obj.vocab_file, distribution_strategy) def predict(self): params = self.params flags_obj = self.flags_obj with tf.name_scope("model"): model = transformer.create_model(params, is_train=False) self._load_weights_if_possible( model, tf.train.latest_checkpoint(self.flags_obj.model_dir)) model.summary() subtokenizer = tokenizer.Subtokenizer(flags_obj.vocab_file) ds = data_pipeline.eval_input_fn(params) ds = ds.map(lambda x, y: x).take(_SINGLE_SAMPLE) ret = model.predict(ds) val_outputs, _ = ret length = len(val_outputs) for i in range(length): translate.translate_from_input(val_outputs[i], subtokenizer) def _create_callbacks(self, cur_log_dir, init_steps, params): sfunc = optimizer.LearningRateFn(params["learning_rate"], params["hidden_size"], params["learning_rate_warmup_steps"]) scheduler_callback = optimizer.LearningRateScheduler(sfunc, init_steps) callbacks = misc.get_callbacks(params["steps_between_evals"]) callbacks.append(scheduler_callback) if params["enable_checkpointing"]: ckpt_full_path = os.path.join(cur_log_dir, "cp-{epoch:04d}.ckpt") callbacks.append( tf.keras.callbacks.ModelCheckpoint( ckpt_full_path, save_weights_only=True)) return callbacks def _load_weights_if_possible(self, model, init_weight_path=None): if init_weight_path: logging.info("Load weights: {}".format(init_weight_path)) if self.use_tpu: checkpoint = tf.train.Checkpoint( model=model, optimizer=self._create_optimizer()) checkpoint.restore(init_weight_path) else: model.load_weights(init_weight_path) else: logging.info("Weights not loaded from path:{}".format(init_weight_path)) def _create_optimizer(self): params = self.params lr_schedule = optimizer.LearningRateSchedule( params["learning_rate"], params["hidden_size"], params["learning_rate_warmup_steps"]) opt = tf.keras.optimizers.Adam( lr_schedule if self.use_tpu else params["learning_rate"], params["optimizer_adam_beta1"], params["optimizer_adam_beta2"], epsilon=params["optimizer_adam_epsilon"]) opt = performance.configure_optimizer( opt, use_float16=params["dtype"] == tf.float16, use_graph_rewrite=self.flags_obj.fp16_implementation == "graph_rewrite", loss_scale=flags_core.get_loss_scale( self.flags_obj, default_for_fp16="dynamic")) return opt def _ensure_dir(log_dir): if not tf.io.gfile.exists(log_dir): tf.io.gfile.makedirs(log_dir) def main(_): flags_obj = flags.FLAGS with logger.benchmark_context(flags_obj): task = TransformerTask(flags_obj) if flags_obj.tf_gpu_thread_mode: keras_utils.set_gpu_thread_mode_and_count( per_gpu_thread_count=flags_obj.per_gpu_thread_count, gpu_thread_mode=flags_obj.tf_gpu_thread_mode, num_gpus=flags_obj.num_gpus, datasets_num_private_threads=flags_obj.datasets_num_private_threads) if flags_obj.mode == "train": task.train() elif flags_obj.mode == "predict": task.predict() elif flags_obj.mode == "eval": task.eval() else: raise ValueError("Invalid mode {}".format(flags_obj.mode)) if __name__ == "__main__": logging.set_verbosity(logging.INFO) misc.define_transformer_flags() app.run(main)
true
true
f72cbea6b5b5fb4a9f0c9efd4d8092605bb087d6
18,884
py
Python
src/sentry/models/dsymfile.py
percipient/sentry
84c6f75ab40e12677c81d9210c3fe8ad66d7a0c3
[ "BSD-3-Clause" ]
null
null
null
src/sentry/models/dsymfile.py
percipient/sentry
84c6f75ab40e12677c81d9210c3fe8ad66d7a0c3
[ "BSD-3-Clause" ]
8
2019-12-28T23:49:55.000Z
2022-03-02T04:34:18.000Z
src/sentry/models/dsymfile.py
percipient/sentry
84c6f75ab40e12677c81d9210c3fe8ad66d7a0c3
[ "BSD-3-Clause" ]
null
null
null
""" sentry.models.dsymfile ~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2016 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import import os import shutil import hashlib import six import tempfile from requests.exceptions import RequestException from jsonfield import JSONField from itertools import chain from django.db import models, router, transaction, connection, IntegrityError from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from symsynd.macho.arch import get_macho_uuids from sentry.db.models import FlexibleForeignKey, Model, BoundedBigIntegerField, \ sane_repr, BaseManager, BoundedPositiveIntegerField from sentry.models.file import File from sentry.utils.zip import safe_extract_zip from sentry.utils.db import is_sqlite from sentry.utils.native import parse_addr from sentry.constants import KNOWN_DSYM_TYPES from sentry.reprocessing import resolve_processing_issue class VersionDSymFile(Model): __core__ = False objects = BaseManager() dsym_file = FlexibleForeignKey('sentry.ProjectDSymFile', null=True) dsym_app = FlexibleForeignKey('sentry.DSymApp') version = models.CharField(max_length=32) build = models.CharField(max_length=32, null=True) date_added = models.DateTimeField(default=timezone.now) class Meta: app_label = 'sentry' db_table = 'sentry_versiondsymfile' unique_together = (('dsym_file', 'version', 'build'),) # TODO(dcramer): pull in enum library class DSymPlatform(object): GENERIC = 0 APPLE = 1 ANDROID = 2 DSYM_PLATFORMS = { 'generic': DSymPlatform.GENERIC, 'apple': DSymPlatform.APPLE, 'android': DSymPlatform.ANDROID, } def _auto_enrich_data(data, app_id, platform): # If we don't have an icon URL we can try to fetch one from iTunes if 'icon_url' not in data and platform == DSymPlatform.APPLE: from sentry.http import safe_urlopen try: rv = safe_urlopen('http://itunes.apple.com/lookup', params={ 'bundleId': app_id, }) except RequestException: pass else: if rv.ok: rv = rv.json() if rv.get('results'): data['icon_url'] = rv['results'][0]['artworkUrl512'] class DSymAppManager(BaseManager): def create_or_update_app(self, sync_id, app_id, project, data=None, platform=DSymPlatform.GENERIC): if data is None: data = {} _auto_enrich_data(data, app_id, platform) existing_app = DSymApp.objects.filter( app_id=app_id, project=project).first() if existing_app is not None: now = timezone.now() existing_app.update( sync_id=sync_id, data=data, last_synced=now, ) return existing_app return BaseManager.create(self, sync_id=sync_id, app_id=app_id, data=data, project=project, platform=platform ) class DSymApp(Model): __core__ = False objects = DSymAppManager() project = FlexibleForeignKey('sentry.Project') app_id = models.CharField(max_length=64) sync_id = models.CharField(max_length=64, null=True) data = JSONField() platform = BoundedPositiveIntegerField(default=0, choices=( (DSymPlatform.GENERIC, _('Generic')), (DSymPlatform.APPLE, _('Apple')), (DSymPlatform.ANDROID, _('Android')), )) last_synced = models.DateTimeField(default=timezone.now) date_added = models.DateTimeField(default=timezone.now) class Meta: app_label = 'sentry' db_table = 'sentry_dsymapp' unique_together = (('project', 'platform', 'app_id'),) class DSymSDKManager(BaseManager): def enumerate_sdks(self, sdk=None, version=None): """Return a grouped list of SDKs.""" filter = '' args = [] if version is not None: for col, val in zip(['major', 'minor', 'patchlevel'], version.split('.')): if not val.isdigit(): return [] filter += ' and k.version_%s = %d' % ( col, int(val) ) if sdk is not None: filter += ' and k.sdk_name = %s' args.append(sdk) cur = connection.cursor() cur.execute(''' select distinct k.*, count(*) as bundle_count, o.cpu_name from sentry_dsymsdk k, sentry_dsymbundle b, sentry_dsymobject o where b.sdk_id = k.id and b.object_id = o.id %s group by k.id, k.sdk_name, o.cpu_name ''' % filter, args) rv = [] for row in cur.fetchall(): row = dict(zip([x[0] for x in cur.description], row)) ver = '%s.%s.%s' % ( row['version_major'], row['version_minor'], row['version_patchlevel'] ) rv.append({ 'sdk_name': row['sdk_name'], 'version': ver, 'build': row['version_build'], 'bundle_count': row['bundle_count'], 'cpu_name': row['cpu_name'], }) return sorted(rv, key=lambda x: (x['sdk_name'], x['version'], x['build'], x['cpu_name'])) class DSymSDK(Model): __core__ = False dsym_type = models.CharField(max_length=20, db_index=True) sdk_name = models.CharField(max_length=20) version_major = models.IntegerField() version_minor = models.IntegerField() version_patchlevel = models.IntegerField() version_build = models.CharField(max_length=40) objects = DSymSDKManager() class Meta: app_label = 'sentry' db_table = 'sentry_dsymsdk' index_together = [ ('version_major', 'version_minor', 'version_patchlevel', 'version_build'), ] class DSymObject(Model): __core__ = False cpu_name = models.CharField(max_length=40) object_path = models.TextField(db_index=True) uuid = models.CharField(max_length=36, db_index=True) vmaddr = BoundedBigIntegerField(null=True) vmsize = BoundedBigIntegerField(null=True) class Meta: app_label = 'sentry' db_table = 'sentry_dsymobject' class DSymBundle(Model): __core__ = False sdk = FlexibleForeignKey('sentry.DSymSDK') object = FlexibleForeignKey('sentry.DSymObject') class Meta: app_label = 'sentry' db_table = 'sentry_dsymbundle' class DSymSymbolManager(BaseManager): def bulk_insert(self, items): db = router.db_for_write(DSymSymbol) items = list(items) if not items: return # On SQLite we don't do this. Two reasons: one, it does not # seem significantly faster and you're an idiot if you import # huge amounts of system symbols into sqlite anyways. secondly # because of the low parameter limit if not is_sqlite(): try: with transaction.atomic(using=db): cur = connection.cursor() cur.execute(''' insert into sentry_dsymsymbol (object_id, address, symbol) values %s ''' % ', '.join(['(%s, %s, %s)'] * len(items)), list(chain(*items))) cur.close() return except IntegrityError: pass cur = connection.cursor() for item in items: cur.execute(''' insert into sentry_dsymsymbol (object_id, address, symbol) select %(object_id)s, %(address)s, %(symbol)s where not exists ( select 1 from sentry_dsymsymbol where object_id = %(object_id)s and address = %(address)s); ''', { 'object_id': item[0], 'address': item[1], 'symbol': item[2], }) cur.close() def lookup_symbol(self, instruction_addr, image_addr, uuid, cpu_name=None, object_path=None, sdk_info=None, image_vmaddr=None): """Finds a system symbol.""" # If we use the "none" dsym type we never return a symbol here. if sdk_info is not None and sdk_info['dsym_type'] == 'none': return instruction_addr = parse_addr(instruction_addr) image_addr = parse_addr(image_addr) addr_abs = None if image_vmaddr is not None: image_vmaddr = parse_addr(image_vmaddr) addr_abs = image_vmaddr + instruction_addr - image_addr addr_rel = instruction_addr - image_addr uuid = six.text_type(uuid).lower() cur = connection.cursor() try: # First try: exact match on uuid (addr_rel) cur.execute(''' select s.symbol from sentry_dsymsymbol s, sentry_dsymobject o where o.uuid = %s and s.object_id = o.id and s.address <= o.vmaddr + %s and s.address >= o.vmaddr order by address desc limit 1; ''', [uuid, addr_rel]) rv = cur.fetchone() if rv: return rv[0] # Second try: exact match on uuid (addr_abs) if addr_abs is not None: cur.execute(''' select s.symbol from sentry_dsymsymbol s, sentry_dsymobject o where o.uuid = %s and s.object_id = o.id and s.address <= %s and s.address >= %s order by address desc limit 1; ''', [uuid, addr_abs, image_vmaddr]) rv = cur.fetchone() if rv: return rv[0] # Third try: exact match on path and arch (addr_rel) if sdk_info is None or \ cpu_name is None or \ object_path is None: return cur.execute(''' select s.symbol from sentry_dsymsymbol s, sentry_dsymobject o, sentry_dsymsdk k, sentry_dsymbundle b where b.sdk_id = k.id and b.object_id = o.id and s.object_id = o.id and k.sdk_name = %s and k.dsym_type = %s and k.version_major = %s and k.version_minor = %s and k.version_patchlevel = %s and o.cpu_name = %s and o.object_path = %s and s.address <= o.vmaddr + %s and s.address >= o.vmaddr order by address desc limit 1; ''', [sdk_info['sdk_name'], sdk_info['dsym_type'], sdk_info['version_major'], sdk_info['version_minor'], sdk_info['version_patchlevel'], cpu_name, object_path, addr_rel]) rv = cur.fetchone() if rv: return rv[0] # Fourth try: exact match on path and arch (addr_abs) if addr_abs is not None: cur.execute(''' select s.symbol from sentry_dsymsymbol s, sentry_dsymobject o, sentry_dsymsdk k, sentry_dsymbundle b where b.sdk_id = k.id and b.object_id = o.id and s.object_id = o.id and k.sdk_name = %s and k.dsym_type = %s and k.version_major = %s and k.version_minor = %s and k.version_patchlevel = %s and o.cpu_name = %s and o.object_path = %s and s.address <= %s and s.address >= %s order by address desc limit 1; ''', [sdk_info['sdk_name'], sdk_info['dsym_type'], sdk_info['version_major'], sdk_info['version_minor'], sdk_info['version_patchlevel'], cpu_name, object_path, addr_abs, image_vmaddr]) rv = cur.fetchone() if rv: return rv[0] finally: cur.close() class DSymSymbol(Model): __core__ = False object = FlexibleForeignKey('sentry.DSymObject') address = BoundedBigIntegerField(db_index=True) symbol = models.TextField() objects = DSymSymbolManager() class Meta: app_label = 'sentry' db_table = 'sentry_dsymsymbol' unique_together = [ ('object', 'address'), ] class CommonDSymFile(Model): """ A single dsym file that is associated with a project. """ __core__ = False file = FlexibleForeignKey('sentry.File') object_name = models.TextField() cpu_name = models.CharField(max_length=40) __repr__ = sane_repr('object_name', 'cpu_name', 'uuid') class Meta: abstract = True app_label = 'sentry' @property def dsym_type(self): ct = self.file.headers.get('Content-Type').lower() return KNOWN_DSYM_TYPES.get(ct, 'unknown') class ProjectDSymFileManager(BaseManager): def find_missing(self, checksums, project): if not checksums: return[] checksums = [x.lower() for x in checksums] missing = set(checksums) found = ProjectDSymFile.objects.filter( file__checksum__in=checksums, project=project ).values('file__checksum') for values in found: missing.discard(values.values()[0]) return sorted(missing) def find_by_checksums(self, checksums, project): if not checksums: return [] checksums = [x.lower() for x in checksums] return ProjectDSymFile.objects.filter( file__checksum__in=checksums, project=project ) class ProjectDSymFile(CommonDSymFile): project = FlexibleForeignKey('sentry.Project', null=True) uuid = models.CharField(max_length=36) is_global = False objects = ProjectDSymFileManager() class Meta(CommonDSymFile.Meta): unique_together = (('project', 'uuid'),) db_table = 'sentry_projectdsymfile' class GlobalDSymFile(CommonDSymFile): uuid = models.CharField(max_length=36, unique=True) is_global = True class Meta(CommonDSymFile.Meta): db_table = 'sentry_globaldsymfile' def _create_macho_dsym_from_uuid(project, cpu_name, uuid, fileobj, object_name): """This creates a mach dsym file from the given uuid and open file object to a dsym file. This will not verify the uuid. Use `create_files_from_macho_zip` for doing everything. """ extra = {} if project is None: cls = GlobalDSymFile file_type = 'global.dsym' else: cls = ProjectDSymFile extra['project'] = project file_type = 'project.dsym' h = hashlib.sha1() while 1: chunk = fileobj.read(16384) if not chunk: break h.update(chunk) checksum = h.hexdigest() fileobj.seek(0, 0) try: rv = cls.objects.get(uuid=uuid, **extra) if rv.file.checksum == checksum: return rv except cls.DoesNotExist: pass else: # The checksum mismatches. In this case we delete the old object # and perform a re-upload. rv.delete() file = File.objects.create( name=uuid, type=file_type, headers={ 'Content-Type': 'application/x-mach-binary' }, ) file.putfile(fileobj) try: with transaction.atomic(): rv = cls.objects.create( file=file, uuid=uuid, cpu_name=cpu_name, object_name=object_name, **extra ) except IntegrityError: file.delete() rv = cls.objects.get(uuid=uuid, **extra) resolve_processing_issue( project=project, scope='native', object='dsym:%s' % uuid, ) return rv def create_files_from_macho_zip(fileobj, project=None): """Creates all missing dsym files from the given zip file. This returns a list of all files created. """ scratchpad = tempfile.mkdtemp() try: safe_extract_zip(fileobj, scratchpad) to_create = [] for dirpath, dirnames, filenames in os.walk(scratchpad): for fn in filenames: fn = os.path.join(dirpath, fn) try: uuids = get_macho_uuids(fn) except (IOError, ValueError): # Whatever was contained there, was probably not a # macho file. continue for cpu, uuid in uuids: to_create.append((cpu, uuid, fn)) rv = [] for cpu, uuid, filename in to_create: with open(filename, 'rb') as f: rv.append((_create_macho_dsym_from_uuid( project, cpu, uuid, f, os.path.basename(filename)))) return rv finally: shutil.rmtree(scratchpad) def find_dsym_file(project, image_uuid): """Finds a dsym file for the given uuid. Looks both within the project as well the global store. """ image_uuid = image_uuid.lower() try: return ProjectDSymFile.objects.filter( uuid=image_uuid, project=project ).select_related('file').get() except ProjectDSymFile.DoesNotExist: pass try: return GlobalDSymFile.objects.filter( uuid=image_uuid ).select_related('file').get() except GlobalDSymFile.DoesNotExist: return None
32.061121
81
0.54157
from __future__ import absolute_import import os import shutil import hashlib import six import tempfile from requests.exceptions import RequestException from jsonfield import JSONField from itertools import chain from django.db import models, router, transaction, connection, IntegrityError from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from symsynd.macho.arch import get_macho_uuids from sentry.db.models import FlexibleForeignKey, Model, BoundedBigIntegerField, \ sane_repr, BaseManager, BoundedPositiveIntegerField from sentry.models.file import File from sentry.utils.zip import safe_extract_zip from sentry.utils.db import is_sqlite from sentry.utils.native import parse_addr from sentry.constants import KNOWN_DSYM_TYPES from sentry.reprocessing import resolve_processing_issue class VersionDSymFile(Model): __core__ = False objects = BaseManager() dsym_file = FlexibleForeignKey('sentry.ProjectDSymFile', null=True) dsym_app = FlexibleForeignKey('sentry.DSymApp') version = models.CharField(max_length=32) build = models.CharField(max_length=32, null=True) date_added = models.DateTimeField(default=timezone.now) class Meta: app_label = 'sentry' db_table = 'sentry_versiondsymfile' unique_together = (('dsym_file', 'version', 'build'),) class DSymPlatform(object): GENERIC = 0 APPLE = 1 ANDROID = 2 DSYM_PLATFORMS = { 'generic': DSymPlatform.GENERIC, 'apple': DSymPlatform.APPLE, 'android': DSymPlatform.ANDROID, } def _auto_enrich_data(data, app_id, platform): if 'icon_url' not in data and platform == DSymPlatform.APPLE: from sentry.http import safe_urlopen try: rv = safe_urlopen('http://itunes.apple.com/lookup', params={ 'bundleId': app_id, }) except RequestException: pass else: if rv.ok: rv = rv.json() if rv.get('results'): data['icon_url'] = rv['results'][0]['artworkUrl512'] class DSymAppManager(BaseManager): def create_or_update_app(self, sync_id, app_id, project, data=None, platform=DSymPlatform.GENERIC): if data is None: data = {} _auto_enrich_data(data, app_id, platform) existing_app = DSymApp.objects.filter( app_id=app_id, project=project).first() if existing_app is not None: now = timezone.now() existing_app.update( sync_id=sync_id, data=data, last_synced=now, ) return existing_app return BaseManager.create(self, sync_id=sync_id, app_id=app_id, data=data, project=project, platform=platform ) class DSymApp(Model): __core__ = False objects = DSymAppManager() project = FlexibleForeignKey('sentry.Project') app_id = models.CharField(max_length=64) sync_id = models.CharField(max_length=64, null=True) data = JSONField() platform = BoundedPositiveIntegerField(default=0, choices=( (DSymPlatform.GENERIC, _('Generic')), (DSymPlatform.APPLE, _('Apple')), (DSymPlatform.ANDROID, _('Android')), )) last_synced = models.DateTimeField(default=timezone.now) date_added = models.DateTimeField(default=timezone.now) class Meta: app_label = 'sentry' db_table = 'sentry_dsymapp' unique_together = (('project', 'platform', 'app_id'),) class DSymSDKManager(BaseManager): def enumerate_sdks(self, sdk=None, version=None): filter = '' args = [] if version is not None: for col, val in zip(['major', 'minor', 'patchlevel'], version.split('.')): if not val.isdigit(): return [] filter += ' and k.version_%s = %d' % ( col, int(val) ) if sdk is not None: filter += ' and k.sdk_name = %s' args.append(sdk) cur = connection.cursor() cur.execute(''' select distinct k.*, count(*) as bundle_count, o.cpu_name from sentry_dsymsdk k, sentry_dsymbundle b, sentry_dsymobject o where b.sdk_id = k.id and b.object_id = o.id %s group by k.id, k.sdk_name, o.cpu_name ''' % filter, args) rv = [] for row in cur.fetchall(): row = dict(zip([x[0] for x in cur.description], row)) ver = '%s.%s.%s' % ( row['version_major'], row['version_minor'], row['version_patchlevel'] ) rv.append({ 'sdk_name': row['sdk_name'], 'version': ver, 'build': row['version_build'], 'bundle_count': row['bundle_count'], 'cpu_name': row['cpu_name'], }) return sorted(rv, key=lambda x: (x['sdk_name'], x['version'], x['build'], x['cpu_name'])) class DSymSDK(Model): __core__ = False dsym_type = models.CharField(max_length=20, db_index=True) sdk_name = models.CharField(max_length=20) version_major = models.IntegerField() version_minor = models.IntegerField() version_patchlevel = models.IntegerField() version_build = models.CharField(max_length=40) objects = DSymSDKManager() class Meta: app_label = 'sentry' db_table = 'sentry_dsymsdk' index_together = [ ('version_major', 'version_minor', 'version_patchlevel', 'version_build'), ] class DSymObject(Model): __core__ = False cpu_name = models.CharField(max_length=40) object_path = models.TextField(db_index=True) uuid = models.CharField(max_length=36, db_index=True) vmaddr = BoundedBigIntegerField(null=True) vmsize = BoundedBigIntegerField(null=True) class Meta: app_label = 'sentry' db_table = 'sentry_dsymobject' class DSymBundle(Model): __core__ = False sdk = FlexibleForeignKey('sentry.DSymSDK') object = FlexibleForeignKey('sentry.DSymObject') class Meta: app_label = 'sentry' db_table = 'sentry_dsymbundle' class DSymSymbolManager(BaseManager): def bulk_insert(self, items): db = router.db_for_write(DSymSymbol) items = list(items) if not items: return # On SQLite we don't do this. Two reasons: one, it does not # huge amounts of system symbols into sqlite anyways. secondly # because of the low parameter limit if not is_sqlite(): try: with transaction.atomic(using=db): cur = connection.cursor() cur.execute(''' insert into sentry_dsymsymbol (object_id, address, symbol) values %s ''' % ', '.join(['(%s, %s, %s)'] * len(items)), list(chain(*items))) cur.close() return except IntegrityError: pass cur = connection.cursor() for item in items: cur.execute(''' insert into sentry_dsymsymbol (object_id, address, symbol) select %(object_id)s, %(address)s, %(symbol)s where not exists ( select 1 from sentry_dsymsymbol where object_id = %(object_id)s and address = %(address)s); ''', { 'object_id': item[0], 'address': item[1], 'symbol': item[2], }) cur.close() def lookup_symbol(self, instruction_addr, image_addr, uuid, cpu_name=None, object_path=None, sdk_info=None, image_vmaddr=None): # If we use the "none" dsym type we never return a symbol here. if sdk_info is not None and sdk_info['dsym_type'] == 'none': return instruction_addr = parse_addr(instruction_addr) image_addr = parse_addr(image_addr) addr_abs = None if image_vmaddr is not None: image_vmaddr = parse_addr(image_vmaddr) addr_abs = image_vmaddr + instruction_addr - image_addr addr_rel = instruction_addr - image_addr uuid = six.text_type(uuid).lower() cur = connection.cursor() try: # First try: exact match on uuid (addr_rel) cur.execute(''' select s.symbol from sentry_dsymsymbol s, sentry_dsymobject o where o.uuid = %s and s.object_id = o.id and s.address <= o.vmaddr + %s and s.address >= o.vmaddr order by address desc limit 1; ''', [uuid, addr_rel]) rv = cur.fetchone() if rv: return rv[0] # Second try: exact match on uuid (addr_abs) if addr_abs is not None: cur.execute(''' select s.symbol from sentry_dsymsymbol s, sentry_dsymobject o where o.uuid = %s and s.object_id = o.id and s.address <= %s and s.address >= %s order by address desc limit 1; ''', [uuid, addr_abs, image_vmaddr]) rv = cur.fetchone() if rv: return rv[0] # Third try: exact match on path and arch (addr_rel) if sdk_info is None or \ cpu_name is None or \ object_path is None: return cur.execute(''' select s.symbol from sentry_dsymsymbol s, sentry_dsymobject o, sentry_dsymsdk k, sentry_dsymbundle b where b.sdk_id = k.id and b.object_id = o.id and s.object_id = o.id and k.sdk_name = %s and k.dsym_type = %s and k.version_major = %s and k.version_minor = %s and k.version_patchlevel = %s and o.cpu_name = %s and o.object_path = %s and s.address <= o.vmaddr + %s and s.address >= o.vmaddr order by address desc limit 1; ''', [sdk_info['sdk_name'], sdk_info['dsym_type'], sdk_info['version_major'], sdk_info['version_minor'], sdk_info['version_patchlevel'], cpu_name, object_path, addr_rel]) rv = cur.fetchone() if rv: return rv[0] # Fourth try: exact match on path and arch (addr_abs) if addr_abs is not None: cur.execute(''' select s.symbol from sentry_dsymsymbol s, sentry_dsymobject o, sentry_dsymsdk k, sentry_dsymbundle b where b.sdk_id = k.id and b.object_id = o.id and s.object_id = o.id and k.sdk_name = %s and k.dsym_type = %s and k.version_major = %s and k.version_minor = %s and k.version_patchlevel = %s and o.cpu_name = %s and o.object_path = %s and s.address <= %s and s.address >= %s order by address desc limit 1; ''', [sdk_info['sdk_name'], sdk_info['dsym_type'], sdk_info['version_major'], sdk_info['version_minor'], sdk_info['version_patchlevel'], cpu_name, object_path, addr_abs, image_vmaddr]) rv = cur.fetchone() if rv: return rv[0] finally: cur.close() class DSymSymbol(Model): __core__ = False object = FlexibleForeignKey('sentry.DSymObject') address = BoundedBigIntegerField(db_index=True) symbol = models.TextField() objects = DSymSymbolManager() class Meta: app_label = 'sentry' db_table = 'sentry_dsymsymbol' unique_together = [ ('object', 'address'), ] class CommonDSymFile(Model): __core__ = False file = FlexibleForeignKey('sentry.File') object_name = models.TextField() cpu_name = models.CharField(max_length=40) __repr__ = sane_repr('object_name', 'cpu_name', 'uuid') class Meta: abstract = True app_label = 'sentry' @property def dsym_type(self): ct = self.file.headers.get('Content-Type').lower() return KNOWN_DSYM_TYPES.get(ct, 'unknown') class ProjectDSymFileManager(BaseManager): def find_missing(self, checksums, project): if not checksums: return[] checksums = [x.lower() for x in checksums] missing = set(checksums) found = ProjectDSymFile.objects.filter( file__checksum__in=checksums, project=project ).values('file__checksum') for values in found: missing.discard(values.values()[0]) return sorted(missing) def find_by_checksums(self, checksums, project): if not checksums: return [] checksums = [x.lower() for x in checksums] return ProjectDSymFile.objects.filter( file__checksum__in=checksums, project=project ) class ProjectDSymFile(CommonDSymFile): project = FlexibleForeignKey('sentry.Project', null=True) uuid = models.CharField(max_length=36) is_global = False objects = ProjectDSymFileManager() class Meta(CommonDSymFile.Meta): unique_together = (('project', 'uuid'),) db_table = 'sentry_projectdsymfile' class GlobalDSymFile(CommonDSymFile): uuid = models.CharField(max_length=36, unique=True) is_global = True class Meta(CommonDSymFile.Meta): db_table = 'sentry_globaldsymfile' def _create_macho_dsym_from_uuid(project, cpu_name, uuid, fileobj, object_name): extra = {} if project is None: cls = GlobalDSymFile file_type = 'global.dsym' else: cls = ProjectDSymFile extra['project'] = project file_type = 'project.dsym' h = hashlib.sha1() while 1: chunk = fileobj.read(16384) if not chunk: break h.update(chunk) checksum = h.hexdigest() fileobj.seek(0, 0) try: rv = cls.objects.get(uuid=uuid, **extra) if rv.file.checksum == checksum: return rv except cls.DoesNotExist: pass else: # The checksum mismatches. In this case we delete the old object # and perform a re-upload. rv.delete() file = File.objects.create( name=uuid, type=file_type, headers={ 'Content-Type': 'application/x-mach-binary' }, ) file.putfile(fileobj) try: with transaction.atomic(): rv = cls.objects.create( file=file, uuid=uuid, cpu_name=cpu_name, object_name=object_name, **extra ) except IntegrityError: file.delete() rv = cls.objects.get(uuid=uuid, **extra) resolve_processing_issue( project=project, scope='native', object='dsym:%s' % uuid, ) return rv def create_files_from_macho_zip(fileobj, project=None): scratchpad = tempfile.mkdtemp() try: safe_extract_zip(fileobj, scratchpad) to_create = [] for dirpath, dirnames, filenames in os.walk(scratchpad): for fn in filenames: fn = os.path.join(dirpath, fn) try: uuids = get_macho_uuids(fn) except (IOError, ValueError): # Whatever was contained there, was probably not a # macho file. continue for cpu, uuid in uuids: to_create.append((cpu, uuid, fn)) rv = [] for cpu, uuid, filename in to_create: with open(filename, 'rb') as f: rv.append((_create_macho_dsym_from_uuid( project, cpu, uuid, f, os.path.basename(filename)))) return rv finally: shutil.rmtree(scratchpad) def find_dsym_file(project, image_uuid): image_uuid = image_uuid.lower() try: return ProjectDSymFile.objects.filter( uuid=image_uuid, project=project ).select_related('file').get() except ProjectDSymFile.DoesNotExist: pass try: return GlobalDSymFile.objects.filter( uuid=image_uuid ).select_related('file').get() except GlobalDSymFile.DoesNotExist: return None
true
true
f72cbf17e64a21584865047b98978bd2193a31f9
53,060
py
Python
graphics/basic_plot_functions.py
JCSDA/mpas-jedi
e0780d1fd295912ee4cfb758854c52b6764d4ab9
[ "Apache-2.0" ]
2
2021-09-25T01:20:10.000Z
2021-12-17T18:44:53.000Z
graphics/basic_plot_functions.py
JCSDA/mpas-jedi
e0780d1fd295912ee4cfb758854c52b6764d4ab9
[ "Apache-2.0" ]
null
null
null
graphics/basic_plot_functions.py
JCSDA/mpas-jedi
e0780d1fd295912ee4cfb758854c52b6764d4ab9
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 from copy import deepcopy import cartopy.crs as ccrs import datetime as dt import logging from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() import matplotlib matplotlib.use('AGG') import matplotlib.axes as maxes import matplotlib.cm as cm import matplotlib.colors as colors from matplotlib.colors import BoundaryNorm import matplotlib.pyplot as plt import matplotlib.ticker as mticker from mpl_toolkits.axes_grid1 import make_axes_locatable import numpy as np import plot_utils as pu import var_utils as vu import os _logger = logging.getLogger(__name__) cmGray = plt.cm.get_cmap("gist_gray") cmRainbow = plt.cm.get_cmap("gist_rainbow") cmSpectral = plt.cm.get_cmap("nipy_spectral") cmHeat = plt.cm.get_cmap("gist_heat") cmOcean = plt.cm.get_cmap("ocean") cmNCAR = plt.cm.get_cmap("gist_ncar") WhiteBlack1 = cmGray(np.linspace(1.0,0.0,17)) # white to black (-90 to -74 C) BlackRed = cmHeat(np.linspace(0.0,0.5,10)) #black to red (-74 to -65 C) ROYG = cmSpectral(np.linspace(0.9,0.43,27)) # red, orange, yellow, green, blue (-65 to -39 C) #GreenBlue = cmNCAR(np.linspace(0.05,0.1,8)) # green to blue (-39 to -32 C) #BlueCyan = cmRainbow(np.linspace(0.8,0.6,13)) # blue to cyan (-32 to -20 C) GreenBlueCyan = cmNCAR(np.linspace(0.05,0.2,20)) # green to blue (-39 to -20 C) #WhiteBlack2 = cmGray(np.linspace(0.9,0.0,51)) # white to black (-20 to 30 C) MVW = cmNCAR(np.linspace(0.8,0.98,21)) # magenta to violet to white (-20 to 0 C) WhiteBlack2 = cmGray(np.linspace(0.9,0.0,31)) # white to black (0 to 30 C) #btcolors = np.concatenate((WhiteBlack1, BlackRed, ROYG, GreenBlue, BlueCyan, WhiteBlack2)) #btcolors = np.concatenate((WhiteBlack1, BlackRed, ROYG, GreenBlueCyan, WhiteBlack2)) btcolors = np.concatenate((WhiteBlack1, BlackRed, ROYG, GreenBlueCyan, MVW, WhiteBlack2)) btCMap = colors.ListedColormap(btcolors) #This script includes basic plotting functions. distriZooms = {} #Full Earth distriZooms['default'] = { 'cLon': None, 'minLon': -180, 'maxLon': 180, 'minLat': -90, 'maxLat': 90, } distriZooms['abi'] = { 'cLon': -75.2, 'minLon': None, 'maxLon': None, 'minLat': None, 'maxLat': None, } distriZooms['ahi'] = { 'cLon': 140.7, 'minLon': None, 'maxLon': None, 'minLat': None, 'maxLat': None, } def plotDistri(lats,lons,values, \ ObsType,VarName,var_unit,out_name,nstation,levbin, \ dmin=None,dmax=None,dotsize=6,color="rainbow"): #================================================================ #INPUTS: # lats - latitude # lons - longitude # values - values will be plotted # ObsType - observation type # VarName - variable name # var_unit - variable units # out_name - will be included in output file name. It can be experiment name. # nstation - station numbers for sondes. # levbin - plot all levels together (levbin=all); or plot every level. # dmin, dmax - min/max values of colorbars, optional # dotsize - dot size, optional # color - color scheme, optional #================================================================ # For some plots that need to change longitude from [-180,180] to [0,360] # tmp = np.logical_not(lons > 0) # lons[tmp] = lons[tmp] + 360 #set map======================================================================= cLon = distriZooms['default']['cLon'] minLon = distriZooms['default']['minLon'] maxLon = distriZooms['default']['maxLon'] minLat = distriZooms['default']['minLat'] maxLat = distriZooms['default']['maxLat'] for key, val in distriZooms.items(): if key in ObsType: cLon = val['cLon'] minLon = val['minLon'] maxLon = val['maxLon'] minLat = val['minLat'] maxLat = val['maxLat'] if cLon is not None: fig = plt.figure(figsize=(5,5)) ax = fig.add_subplot(projection=ccrs.Orthographic(cLon)) else: fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(projection=ccrs.PlateCarree()) ax.set_global() #draw points onto map ========================================================= if color == "BT": if ("abi" in ObsType or "ahi" in ObsType): cm = btCMap if dmin is None: dmin = 183 if dmax is None: dmax = 303 else: cm = plt.cm.get_cmap("gist_ncar") if dmin is None: dmin = 190 if dmax is None: dmax = 270 else: cm = plt.cm.get_cmap(color) finite = np.isfinite(values) if ((("abi" in ObsType or "ahi" in ObsType) and finite.sum() > 4e4) or "model" in ObsType): # option 1: smoothed contours (note: color bar is not quite right) # sc=m.contourf(lons[finite], lats[finite], values[finite], # cm.N, cmap = cm, vmin = dmin, vmax = dmax, # latlon = True, tri = True, extend='both') # option 2: pixel contours # first sort by longitude to avoid bug for cyclic projections in basemap lonsPlot = lons[finite] lonsPlot[lonsPlot > 180.0] -= 360.0 # fixes latitude swap bug for cyclic projections latsPlot = lats[finite] valuesPlot = values[finite] lonSort = np.argsort(lonsPlot) p = plt.pcolor(lonsPlot[lonSort], latsPlot[lonSort], valuesPlot[lonSort], transform = ccrs.PlateCarree(), cmap = cm, vmin = dmin, vmax = dmax, latlon = True, tri = True) else: p=ax.scatter(lons[finite], lats[finite], c=values[finite], transform = ccrs.PlateCarree(), cmap= cm, s = dotsize) ax.gridlines(draw_labels=True, xlocs=np.arange(-180,180,60),linestyle='--') ax.coastlines() divider = make_axes_locatable(ax) cax = divider.append_axes("bottom",size="5%", pad=0.3,axes_class=plt.Axes) #fig.add_axes(cax) plt.colorbar(p,cax=cax,orientation='horizontal') #,cax=cax,ax=ax,orientation='horizontal') #set title =================================================================== if nstation == 0 or ObsType == 'satwind': plt.text(0.5, 1.15, '%s %s %s nlocs:%s' \ %(ObsType,VarName,var_unit,len(values[~np.isnan(values)])), \ horizontalalignment='center', \ fontsize=12, transform = ax.transAxes) else: if ObsType[:6] == 'gnssro': plt.text(0.5, 1.15, '%s %s %s nlocs:%s nprofile:%s' \ %(ObsType,VarName,var_unit,len(values[~np.isnan(values)]),nstation), \ horizontalalignment='center', \ fontsize=12, transform = ax.transAxes) elif ObsType == 'aircraft': plt.text(0.5, 1.15, '%s %s %s nlocs:%s nflight:%s' \ %(ObsType,VarName,var_unit,len(values[~np.isnan(values)]),nstation), \ horizontalalignment='center', \ fontsize=12, transform = ax.transAxes) else: plt.text(0.5, 1.15, '%s %s %s nlocs:%s nstation:%s' \ %(ObsType,VarName,var_unit,len(values[~np.isnan(values)]),nstation), \ horizontalalignment='center', \ fontsize=12, transform = ax.transAxes) plt.savefig('distri_%s_%s_%s.png'%(VarName,out_name,levbin),dpi=200,bbox_inches='tight') plt.close() def scatterMapFields( lonVals, latVals, fields, filename, minLon = -180., maxLon = 180., minLat = -90., maxLat = 90., cLon = None, projection = 'default', dmin = None, dmax = None, markers = {}, sizes = {}, cmap = 'gist_ncar', cbarType = None, c = {}, logVLim = 1.e-12, ): # setup map cLons = np.asarray([]) lonVals_180 = {} for name in lonVals.keys(): cLon = None # 0 < longitude <= 360 lonVals_360 = deepcopy(lonVals[name]) while np.max(lonVals_360) >= 360.0: lonVals_360[lonVals_360 >= 360.0] -= 360.0 while np.min(lonVals_360) < 0.0: lonVals_360[lonVals_360 < 0.0] += 360.0 # -180 < longitude <= 180 lonVals_180[name] = deepcopy(lonVals_360) lonVals_180[name][lonVals_180[name] > 180.0] -= 360.0 for lon in [lonVals_360, lonVals_180[name]]: if np.max(lon) - np.min(lon) <= 180.0: cLon = 0.5*(np.max(lon) + np.min(lon)) cLons = np.append(cLons, cLon) anycLonNone = np.any([c is None for c in cLons]) if anycLonNone: # plot entire Earth fig = plt.figure(figsize=(5,5)) ax = fig.add_subplot(projection=ccrs.Mollweide(0.0)) else: # plot single projected side of Earth cLon = cLons[0] if cLon > 180.0: cLon-=360.0 fig = plt.figure(figsize=(5,5)) ax = fig.add_subplot(projection=ccrs.Orthographic(cLon)) assert (cbarType is None or cbarType in ['Log', 'SymLog']), \ 'scatterMapFields: invalid cbarType: '+cbarType for name, field in fields.items(): f = c=c.get(name, field) finite = np.isfinite(f) lons = lonVals_180[name][finite] lats = latVals[name][finite] f = f[finite] ## transform to pcolormesh and cartopy conventions # longitude monotonically increasing lonSort = np.argsort(lons) lons = lons[lonSort] lats = lats[lonSort] f = f[lonSort] if dmin is None: vmin = f.min() else: vmin = dmin if dmax is None: vmax = f.max() else: vmax = dmax if cbarType is None: norm = None elif cbarType == 'Log': if vmin <= logVLim: vmin = logVLim f[f < vmin] = vmin norm=colors.LogNorm(vmin=vmin, vmax=vmax) elif cbarType == 'SymLog': norm=colors.SymLogNorm(vmin=vmin, vmax=vmax, linthresh=1.e-4*vmax, linscale=1.0, base=10) sc = ax.scatter(lons, lats, c=f, s = sizes.get(name, 1), cmap = cmap, norm = norm, marker = markers.get(name, '.'), linewidth = 0, transform=ccrs.PlateCarree(), ) # show full projection extent ax.set_global() # add coastlines ax.coastlines() divider = make_axes_locatable(ax) cax = divider.append_axes("bottom",size="5%", pad=0.3,axes_class=plt.Axes) cb = plt.colorbar(sc, cax=cax, orientation='horizontal') plt.savefig(filename, dpi=200, bbox_inches='tight') plt.close() def plotTimeserial2D(Stats,xlabeltime,ylevels,VarName): #================================================================ #INPUTS: # Stats - statistics # xlabeltime - time labels for x-axis # ylevels - vertical levels for y-axis # VarName - variable name #================================================================ zgrid = np.loadtxt("/glade/work/jban/pandac/fix_input/graphics/zgrid_v55.txt") fig, ax1 = plt.subplots() xarray = range(len(xlabeltime)) valuemin = np.amin(Stats) valuemax = np.amax(Stats) # yonggangyu introduce epsilon and xi for plotting absolutely zero field, # solving vmin, vcenter, vmax ascending order issue epsilon = 1.e-8 if (valuemin > 0 or valuemax < 0): color = 'rainbow' plt.contourf(xarray,ylevels,Stats,40,vmin=valuemin, vmax=valuemax,cmap=color) xi=-1 else: cmap = 'coolwarm' if ( -valuemin < epsilon and valuemax < epsilon ): xi=1 valuemin = -epsilon valuemax = epsilon elif ( -valuemin < epsilon and valuemax > epsilon ): xi=2 valuemin = -epsilon elif ( -valuemin > epsilon and valuemax < epsilon ): xi=3 valuemax = epsilon else: xi=4 #print('xi= '+str(xi)+' valuemin= ',str(valuemin)+' valuemax= ',str(valuemax)) norm = matplotlib.colors.DivergingNorm(vmin=valuemin, vcenter=0, vmax=valuemax) plt.contourf(xarray,ylevels,Stats,40,vmin=valuemin, vmax=valuemax,norm=norm,cmap=cmap) xarray = range(len(xlabeltime)) major_ticks = np.arange(0, 56, 5) ax1.set_yticks(major_ticks) ax1.set_ylim([0,54]) ax1.set_ylabel('Vertical level',fontsize=15) ax2 = ax1.twinx() ax2.set_yticks(major_ticks-1) ax2.set_yticklabels((zgrid[::5]).astype(int)) ax2.set_ylabel('Height (m)',fontsize=13) FCDay = ''.join(VarName.split("_")[1:][:-3]) if (FCDay == 'day0.0'): ax1.set_xlabel('Analysis Time',fontsize=15) ax1.set_xticks(xarray[::4]) ax1.set_xticklabels(xlabeltime[::4],rotation=90) elif (FCDay == 'day0.25'): ax1.set_xlabel( '6h Forecast',fontsize=15) ax1.set_xticks(xarray[::4]) ax1.set_xticklabels(xlabeltime[::4],rotation=90) else: ax1.set_xlabel( 'Lead Time',fontsize=15) plt.colorbar(extend='both',orientation="horizontal",pad=0.2) ax1.grid(True) region = ''.join(VarName.split("_")[2:][:-2]) var = ''.join(VarName.split("_")[3:][:-1]) stats = ''.join(VarName.split("_")[4:]) plt.title(stats+' variable:'+vu.varDictModel[var][1]+'('+ vu.varDictModel[var][0]+') '+region, fontsize = 12) plt.savefig(VarName+'_TS_2d.png',dpi=200,bbox_inches='tight') plt.close() maxLegendEntries = 12 ############################################################################### lenWarnSer = 0 nanWarnSer = 0 def plotSeries(fig, \ linesVals, xVals, \ linesLabel, \ title="", dataLabel="y", \ sciticks=False, logscale= False, signdef=False, \ indepLabel="x", invert_ind_axis=False, \ ny=1, nx=1, nplots=1, iplot=0, \ linesValsMinCI=None, linesValsMaxCI=None, \ dmin=np.NaN, dmax=np.NaN, \ lineAttribOffset=0, \ legend_inside=True, interiorLabels=True): # ARGUMENTS # fig - matplotlib figure object # linesVals - dependent variable (list of arrays) # xVals - independent variable on x-axis (array) # linesLabel - legend label for linesVals (list) # title - subplot title, optional # dataLabel - label for linesVals, optional # sciticks - whether linesVals needs scientific formatting for ticks, optional # logscale - y-axis is scaled logarithmically, optional, overrides sciticks # signdef - whether linesVals is positive/negative definite, optional # indepLabel - label for xVals, optional # invert_ind_axis - whether to invert x-axis orientation, optional # ny, nx - number of subplots in x/y direction, optional # nplots - total number of subplots, optional # iplot - this subplot index (starting at 0), optional # linesValsMinCI - minimum error bound for linesVals (list of arrays), optional # linesValsMaxCI - maximum error bound for linesVals (list of arrays), optional # Note: linesValsMinCI and linesValsMaxCI must be specified together # lineAttribOffset - offset for selecting line attributes, optional # dmin, dmax - min/max values of linesVals, optional # legend_inside - whether legend should be placed inside the subplot, optional ax = fig.add_subplot(ny, nx, iplot+1) #title ax.set_title(title,fontsize=5) #add lines plotVals = np.asarray([]) nLines = 0 for iline, lineVals in enumerate(linesVals): if np.all(np.isnan(lineVals)): global nanWarnSer if nanWarnSer==0: _logger.warning("skipping all-NaN data") _logger.warning(title+"; "+indepLabel+"; "+linesLabel[iline]) nanWarnSer=nanWarnSer+1 continue if len(lineVals)!=len(xVals): global lenWarnSer if lenWarnSer==0: _logger.warning("skipping data where len(x)!=len(y)") _logger.warning(title+"; "+indepLabel+"; "+linesLabel[iline]) lenWarnSer=lenWarnSer+1 continue # Plot line for each lineVals that has non-missing data pColor = pu.plotColor(len(linesVals),iline+lineAttribOffset) ax.plot(xVals, lineVals, \ color=pColor, \ label=linesLabel[iline], \ ls=pu.plotLineStyle(len(linesVals),iline+lineAttribOffset), \ linewidth=0.5) nLines += 1 plotVals = np.append(plotVals, lineVals) # Add shaded error regions if specified if linesValsMinCI is not None and \ linesValsMaxCI is not None: # test statistical significance versus zero if signdef: significant = np.empty(len(lineVals)) significant[:] = np.NaN else: significant = np.multiply(linesValsMinCI[iline], linesValsMaxCI[iline]) significant = np.array([x if np.isfinite(x) else -1.0 for x in significant]) lineArr = np.array(lineVals) xArr = np.array(xVals) negsiginds = np.array([i for i,x in enumerate(significant) if (x > 0.0 and lineArr[i] < 0.0)],dtype=int) if len(negsiginds) > 0: ax.plot(xArr[negsiginds], lineArr[negsiginds], \ color=pColor, \ ls='', \ marker='v', \ markersize=1.5) possiginds = np.array([i for i,x in enumerate(significant) if (x > 0.0 and lineArr[i] > 0.0)],dtype=int) if len(possiginds) > 0: ax.plot(xArr[possiginds], lineArr[possiginds], \ color=pColor, \ ls='', \ marker='^', \ markersize=1.5) ax.plot(xVals, linesValsMinCI[iline], \ color=pColor, \ alpha=0.4, \ ls='-', \ linewidth=0.5) ax.plot(xVals, linesValsMaxCI[iline], \ color=pColor, \ alpha=0.4, \ ls='-', \ linewidth=0.5) ax.fill_between(xVals, linesValsMinCI[iline], linesValsMaxCI[iline], \ color=pColor, \ edgecolor=pColor, \ linewidth=0.0, alpha = 0.1) ax.fill_between(xVals, linesValsMinCI[iline], linesValsMaxCI[iline], \ where=significant > 0.0, \ color=pColor, \ edgecolor=pColor, \ linewidth=0.2, alpha = 0.3) if nLines == 0: ax.tick_params(axis='x',labelbottom=False) ax.tick_params(axis='y',labelleft=False) return # add horizontal zero line for unbounded quantities if not signdef: ax.plot([xVals[0], xVals[-1]], [0., 0.], ls="--", c=".3", \ linewidth=0.7,markersize=0) # standardize x-limits mindval, maxdval = pu.get_clean_ax_limits(dmin,dmax,plotVals,signdef) #axes settings ax.xaxis.set_tick_params(labelsize=3) ax.yaxis.set_tick_params(labelsize=3) isLogScale = logscale if logscale: nonzero = np.logical_and(np.greater(np.abs(plotVals), 0.), np.isfinite(plotVals)) if nonzero.sum() > 0: vmin = np.nanmin(np.abs(plotVals[nonzero])) vmax = np.nanmax(np.abs(plotVals[nonzero])) if signdef: # log tick labels look bad for single decade if vmax / vmin > 10.0: ax.set_yscale('log') else: isLogScale = False else: ax.set_yscale('symlog') else: isLogScale = False if isLogScale and np.isfinite(maxdval) and maxdval > 0.: ax.set_ylim(None, maxdval) if np.abs(vmin) > 0.: ax.set_ylim(vmin, None) if not isLogScale: if sciticks: ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) if (np.isfinite(mindval) and np.isfinite(maxdval)): ax.set_ylim(mindval,maxdval) if maxdval-mindval < 1.0 or \ maxdval-mindval > 100.0: ax.tick_params(axis='y',rotation=-35) ax.yaxis.get_offset_text().set_fontsize(3) #handle interior subplot ticks/labels ix = int(iplot)%int(nx) iy = int(iplot)/int(nx) if not interiorLabels \ and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )): ax.tick_params(axis='x',labelbottom=False) if interiorLabels or ix == 0: ax.set_xlabel(indepLabel,fontsize=4) if interiorLabels or iy == ny-1: ax.set_ylabel(dataLabel,fontsize=4) #legend if nLines <= maxLegendEntries: if legend_inside: #INSIDE AXES lh = ax.legend(loc='best',fontsize=3,frameon=True,\ framealpha=0.4,ncol=1) lh.get_frame().set_linewidth(0.0) elif ix==nx-1 or iplot==nplots-1: #OUTSIDE AXES ax.legend(loc='upper left',fontsize=3,frameon=False, \ bbox_to_anchor=(1.02, 1), borderaxespad=0) if invert_ind_axis: ax.invert_xaxis() ax.grid() return ############################################################################### lenWarnProf = 0 nanWarnProf = 0 def plotProfile(fig, \ linesVals, yVals, \ linesLabel, \ title="", dataLabel="x", \ sciticks=False, logscale=False, signdef=False, \ indepLabel="y", invert_ind_axis=False, \ ny=1, nx=1, nplots=1, iplot=0, \ linesValsMinCI=None, linesValsMaxCI=None, \ dmin=np.NaN, dmax=np.NaN, \ lineAttribOffset=0, \ legend_inside=True, interiorLabels=True): # ARGUMENTS # fig - matplotlib figure object # linesVals - dependent variable (list of arrays) # yVals - independent variable on y-axis (array) # linesLabel - legend label for linesVals (list) # title - subplot title, optional # dataLabel - label for linesVals, optional # sciticks - whether linesVals needs scientific formatting for ticks, optional # logscale - x-axis is scaled logarithmically, optional, overrides sciticks # signdef - whether linesVals is positive/negative definite, optional # indepLabel - label for yVals, optional # invert_ind_axis - whether to invert y-axis orientation, optional # ny, nx - number of subplots in x/y direction, optional # nplots - total number of subplots, optional # iplot - this subplot index (starting at 0), optional # linesValsMinCI - minimum error bound for linesVals (list of arrays), optional # linesValsMaxCI - maximum error bound for linesVals (list of arrays), optional # Note: linesValsMinCI and linesValsMaxCI must be specified together # lineAttribOffset - offset for selecting line attributes, optional # dmin, dmax - min/max values of linesVals, optional # legend_inside - whether legend should be placed inside the subplot, optional ax = fig.add_subplot(ny, nx, iplot+1) #title ax.set_title(title,fontsize=5) #add lines plotVals = np.asarray([]) nLines = 0 for iline, lineVals in enumerate(linesVals): if np.all(np.isnan(lineVals)): global nanWarnProf if nanWarnProf==0: _logger.warning("skipping all-NaN data") _logger.warning(title+"; "+dataLabel+"; "+linesLabel[iline]) nanWarnProf=nanWarnProf+1 continue if len(lineVals)!=len(yVals): global lenWarnProf if lenWarnProf==0: _logger.warning("skipping data where len(x)!=len(y)") _logger.warning(title+"; "+dataLabel+"; "+linesLabel[iline]) lenWarnProf=lenWarnProf+1 continue # Plot line for each lineVals that has non-missing data pColor = pu.plotColor(len(linesVals),iline+lineAttribOffset) ax.plot(lineVals, yVals, \ color=pColor, \ label=linesLabel[iline], \ ls=pu.plotLineStyle(len(linesVals),iline+lineAttribOffset), \ linewidth=0.5) nLines += 1 plotVals = np.append(plotVals,lineVals) # Add shaded error regions if specified if linesValsMinCI is not None and \ linesValsMaxCI is not None: # test statistical significance versus zero if signdef: significant = np.empty(len(lineVals)) significant[:] = np.NaN else: significant = np.multiply(linesValsMinCI[iline], linesValsMaxCI[iline]) significant = np.array([x if np.isfinite(x) else -1.0 for x in significant]) lineArr = np.array(lineVals) yArr = np.array(yVals) negsiginds = np.array([i for i,x in enumerate(significant) if (x > 0.0 and lineArr[i] < 0.0)],dtype=int) if len(negsiginds) > 0: ax.plot(lineArr[negsiginds], yArr[negsiginds], \ color=pColor, \ ls='', \ marker='<', \ markersize=1.5) possiginds = np.array([i for i,x in enumerate(significant) if (x > 0.0 and lineArr[i] > 0.0)],dtype=int) if len(possiginds) > 0: ax.plot(lineArr[possiginds], yArr[possiginds], \ color=pColor, \ ls='', \ marker='>', \ markersize=1.5) ax.plot(linesValsMinCI[iline], yVals, \ color=pColor, \ alpha=0.4, \ ls='-', \ linewidth=0.5) ax.plot(linesValsMaxCI[iline], yVals, \ color=pColor, \ alpha=0.4, \ ls='-', \ linewidth=0.5) ax.fill_betweenx(yVals, linesValsMinCI[iline], linesValsMaxCI[iline], \ color=pColor, \ edgecolor=pColor, \ linewidth=0.0, alpha = 0.1) ax.fill_betweenx(yVals, linesValsMinCI[iline], linesValsMaxCI[iline], \ where=significant > 0.0, \ color=pColor, \ edgecolor=pColor, \ linewidth=0.2, alpha = 0.3) if nLines == 0: ax.tick_params(axis='x',labelbottom=False) ax.tick_params(axis='y',labelleft=False) return # add vertical zero line for unbounded quantities if not signdef: ax.plot([0., 0.], [yVals[0], yVals[-1]], ls="--", c=".3", \ linewidth=0.7,markersize=0) # standardize x-limits mindval, maxdval = pu.get_clean_ax_limits(dmin,dmax,plotVals,signdef) #axes settings ax.xaxis.set_tick_params(labelsize=3) ax.yaxis.set_tick_params(labelsize=3) isLogScale = logscale if logscale: nonzero = np.logical_and(np.greater(np.abs(plotVals), 0.), np.isfinite(plotVals)) if nonzero.sum() > 0: vmin = np.nanmin(np.abs(plotVals[nonzero])) vmax = np.nanmax(np.abs(plotVals[nonzero])) if signdef: # log tick labels look bad for single decade if vmax / vmin > 10.0: ax.set_xscale('log') else: isLogScale = False else: ax.set_xscale('symlog') else: isLogScale = False if isLogScale and np.isfinite(maxdval) and maxdval > 0.: ax.set_xlim(None, maxdval) if np.abs(mindval) > 0.: ax.set_xlim(mindval, None) if not isLogScale: if sciticks: ax.ticklabel_format(style='sci', axis='x', scilimits=(0,0)) if (np.isfinite(mindval) and np.isfinite(maxdval)): ax.set_xlim(mindval,maxdval) if maxdval-mindval < 1.0 or \ maxdval-mindval > 100.0: ax.tick_params(axis='x',rotation=-35) ax.xaxis.get_offset_text().set_fontsize(3) #handle interior subplot ticks/labels ix = int(iplot)%int(nx) iy = int(iplot)/int(nx) if not interiorLabels \ and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )): ax.tick_params(axis='x',labelbottom=False) if interiorLabels or ix == 0: ax.set_xlabel(dataLabel,fontsize=4) if interiorLabels or iy == ny-1: ax.set_ylabel(indepLabel,fontsize=4) #legend if nLines <= maxLegendEntries: if legend_inside: #INSIDE AXES lh = ax.legend(loc='best',fontsize=3,frameon=True,\ framealpha=0.4,ncol=1) lh.get_frame().set_linewidth(0.0) elif ix==nx-1 or iplot==nplots-1: #OUTSIDE AXES ax.legend(loc='upper left',fontsize=3,frameon=False, \ bbox_to_anchor=(1.02, 1), borderaxespad=0) if invert_ind_axis: ax.invert_yaxis() ax.grid() return ############################################################################### lenWarnTS=0 nanWarnTS=0 def plotTimeSeries(fig, \ xsDates, linesVals, \ linesLabel, \ title="", dataLabel="", \ sciticks=False, logscale = False, signdef=False, \ ny=1, nx=1, nplots=1, iplot=0, \ linesValsMinCI=None, linesValsMaxCI=None, \ dmin=np.NaN, dmax=np.NaN, \ lineAttribOffset=0, \ legend_inside=True, interiorLabels=True): # ARGUMENTS # fig - matplotlib figure object # xsDates - date x-values (list/array or list of lists/arrays # of float seconds, dt.timedelta, dt.datetime) # linesVals - dependent variable (list of arrays) # linesLabel - legend label for linesVals (list) # title - subplot title, optional # dataLabel - label for linesVals, optional # sciticks - whether linesVals needs scientific formatting for ticks, optional # logscale - y-axis is scaled logarithmically, optional, overrides sciticks # signdef - whether linesVals is positive/negative definite, optional # ny, nx - number of subplots in x/y direction, optional # nplots - total number of subplots, optional # iplot - this subplot index (starting at 0), optional # linesValsMinCI - minimum error bound for linesVals (list of arrays), optional # linesValsMaxCI - maximum error bound for linesVals (list of arrays), optional # Note: linesValsMinCI and linesValsMaxCI must be specified together # lineAttribOffset - offset for selecting line attributes, optional # dmin, dmax - min/max values of linesVals, optional # legend_inside - whether legend should be placed inside the subplot, optional ax = fig.add_subplot(ny, nx, iplot+1) #title ax.set_title(title,fontsize=5) #add lines plotVals = np.asarray([]) nLines = 0 jline = 0 for iline, lineVals in enumerate(linesVals): if np.all(np.isnan(lineVals)): global nanWarnTS if nanWarnTS==0: _logger.warning("skipping all-NaN data") _logger.warning(title+"; "+dataLabel+"; "+linesLabel[iline]) nanWarnTS=nanWarnTS+1 continue #float xVals if isinstance(xsDates[0],(list,np.ndarray)): xVals = pu.TDeltas2Seconds(xsDates[min([iline,len(xsDates)-1])]) else: xVals = pu.TDeltas2Seconds(xsDates) if len(lineVals)!=len(xVals): global lenWarnTS if lenWarnTS==0: _logger.warning("skipping data where len(x)!=len(y)") _logger.warning(title+"; "+dataLabel+"; "+linesLabel[iline]) lenWarnTS=lenWarnTS+1 continue if jline == 0: minX = xVals[0] maxX = xVals[-1] else: minX = min([xVals[0], minX]) maxX = max([xVals[-1], maxX]) jline += 1 # Plot line for each lineVals that has non-missing data pColor = pu.plotColor(len(linesVals),iline+lineAttribOffset) ax.plot(xVals, lineVals, \ label=linesLabel[iline], \ color=pColor, \ ls=pu.plotLineStyle(len(linesVals),iline+lineAttribOffset), \ linewidth=0.5) nLines += 1 plotVals = np.append(plotVals, lineVals) # Add shaded CI regions if specified if linesValsMinCI is not None and \ linesValsMaxCI is not None: # test statistical significance versus zero if signdef: significant = np.empty(len(lineVals)) significant[:] = np.NaN else: significant = np.multiply(linesValsMinCI[iline], linesValsMaxCI[iline]) significant = np.array([x if np.isfinite(x) else -1.0 for x in significant]) lineArr = np.array(lineVals) xArr = np.array(xVals) negsiginds = np.array([i for i,x in enumerate(significant) if (x > 0.0 and lineArr[i] < 0.0)],dtype=int) if len(negsiginds) > 0: ax.plot(xArr[negsiginds], lineArr[negsiginds], \ color=pColor, \ ls='', \ marker='v', \ markersize=1.5) possiginds = np.array([i for i,x in enumerate(significant) if (x > 0.0 and lineArr[i] > 0.0)],dtype=int) if len(possiginds) > 0: ax.plot(xArr[possiginds], lineArr[possiginds], \ color=pColor, \ ls='', \ marker='^', \ markersize=1.5) ax.plot(xVals, linesValsMinCI[iline], \ color=pColor, \ alpha=0.4, \ ls='-', \ linewidth=0.5) ax.plot(xVals, linesValsMaxCI[iline], \ color=pColor, \ alpha=0.4, \ ls='-', \ linewidth=0.5) ax.fill_between(xVals, linesValsMinCI[iline], linesValsMaxCI[iline], \ color=pColor, \ edgecolor=pColor, \ linewidth=0.0, alpha = 0.1) ax.fill_between(xVals, linesValsMinCI[iline], linesValsMaxCI[iline], \ where=significant > 0.0, \ color=pColor, \ edgecolor=pColor, \ linewidth=0.2, alpha = 0.3) if nLines == 0: ax.tick_params(axis='x',labelbottom=False) ax.tick_params(axis='y',labelleft=False) return # standardize y-limits mindval, maxdval = pu.get_clean_ax_limits(dmin,dmax,plotVals,signdef) # add horizontal zero line for unbounded quantities if not signdef: ax.plot([minX, maxX], [0., 0.], ls="--", c=".3", \ linewidth=0.7,markersize=0) #axes settings if isinstance(xsDates[0],(list,np.ndarray)): pu.format_x_for_dates(ax, xsDates[0]) else: pu.format_x_for_dates(ax, xsDates) ax.xaxis.set_tick_params(labelsize=3) ax.yaxis.set_tick_params(labelsize=3) isLogScale = logscale if logscale: nonzero = np.logical_and(np.greater(np.abs(plotVals), 0.), np.isfinite(plotVals)) if nonzero.sum() > 0: vmin = np.nanmin(np.abs(plotVals[nonzero])) vmax = np.nanmax(np.abs(plotVals[nonzero])) if signdef: # log tick labels look bad for single decade if vmax / vmin > 10.0: ax.set_yscale('log') else: isLogScale = False else: ax.set_yscale('symlog') else: isLogScale = False if isLogScale and np.isfinite(maxdval) and maxdval > 0.: ax.set_ylim(None, maxdval) if np.abs(vmin) > 0.: ax.set_ylim(vmin, None) if not isLogScale: if sciticks: ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) if (np.isfinite(mindval) and np.isfinite(maxdval)): ax.set_ylim(mindval,maxdval) if maxdval-mindval < 1.0 or \ maxdval-mindval > 100.0: ax.tick_params(axis='y',rotation=-35) ax.yaxis.get_offset_text().set_fontsize(3) ax.grid() #handle interior subplot ticks/labels ix = int(iplot)%int(nx) iy = int(iplot)/int(nx) if not interiorLabels \ and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )): ax.tick_params(axis='x',labelbottom=False) if interiorLabels or ix == 0: ax.set_ylabel(dataLabel,fontsize=4) #legend if nLines <= maxLegendEntries: if legend_inside: #INSIDE AXES nlcol = np.int(np.ceil(np.sqrt(nLines))) lh = ax.legend(loc='best',fontsize=3,frameon=True,\ framealpha=0.4,ncol=nlcol) lh.get_frame().set_linewidth(0.0) elif ix==nx-1 or iplot==nplots-1: #OUTSIDE AXES ax.legend(loc='upper left',fontsize=3,frameon=False, \ bbox_to_anchor=(1.02, 1), borderaxespad=0) return ############################################################################### def plotTimeSeries2D(fig, \ xDates, yVals, contourVals, \ title="", clabel="", \ sciticks=False, logscale=False, signdef=False, \ dataLabel="y", invert_ind_axis=False, \ ny=1, nx=1, nplots=1, iplot=0, \ dmin=np.NaN, dmax=np.NaN, interiorLabels=True): # ARGUMENTS # fig - matplotlib figure object # xDates - date x-values (array of float seconds, dt.timedelta, dt.datetime) # yVals - second independent variable # contourVals - dependent variable (2d array) # title - subplot title, optional # clabel - label for dependent variable, optional # sciticks - whether contourVals needs scientific formatting for ticks, optional # logscale - whether contours are spaced logarithmically, optional, overrides sciticks # signdef - whether contourVals is positive/negative definite, optional # dataLabel - label for yVals, optional # invert_ind_axis - whether to invert y-axis orientation, optional # ny, nx - number of subplots in x/y direction, optional # nplots - total number of subplots, optional # iplot - this subplot index (starting at 0), optional # dmin, dmax - min/max values of contourVals, optional ax = fig.add_subplot(ny, nx, iplot+1) if (np.isnan(contourVals)).all(): ax.tick_params(axis='x',labelbottom=False) ax.tick_params(axis='y',labelleft=False) return xVals = pu.TDeltas2Seconds(xDates) # standardize c-limits mindval, maxdval = pu.get_clean_ax_limits(dmin,dmax,contourVals,signdef) if signdef: cmapName = 'BuPu' nlevs = 18 # scientific contours cint = contourVals.astype(int) isInt = np.all((contourVals - cint) == 0) if isInt: minscid = np.nanmax(np.array([1., dmin])) else: minscid = maxdval*1.e-5 lognorm = colors.LogNorm(vmin=minscid, vmax=maxdval) else: cmapName = 'seismic' nlevs = 28 # scientific contours lognorm = colors.SymLogNorm(vmin=mindval, vmax=maxdval, linthresh=1.e-3*maxdval, linscale=1.3, base=10) # plot contour # option 1: smoothed contours #cp = ax.contourf(xVals, yVals, contourVals, nlevs, cmap=cmapName, extend='both', \ # vmin=mindval, vmax=maxdval) # option 2: pixel contours cmap = plt.get_cmap(cmapName) cmap.set_bad(color = 'k', alpha = 1.0) if logscale: norm = lognorm else: levels = mticker.MaxNLocator(nbins=nlevs).tick_values(mindval,maxdval) norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True) xVals_pcolor, yVals_pcolor = transformXY_for_pcolor(xVals,yVals) cp = ax.pcolormesh(xVals_pcolor, yVals_pcolor, contourVals, cmap=cmap, norm=norm) #title ax.set_title(title,fontsize=5) #axes settings pu.format_x_for_dates(ax, xDates) ax.xaxis.set_tick_params(labelsize=3) ax.yaxis.set_tick_params(labelsize=3) #handle interior subplot ticks/labels ix = int(iplot)%int(nx) iy = int(iplot)/int(nx) if not interiorLabels \ and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )): ax.tick_params(axis='x',labelbottom=False) if interiorLabels or ix == 0: ax.set_ylabel(dataLabel,fontsize=4) if interiorLabels or ix == nx-1: #colorbar m = plt.cm.ScalarMappable(cmap=cmap) m.set_array(contourVals) m.set_norm(norm) if (np.isfinite(mindval) and np.isfinite(maxdval) and not logscale): m.set_clim(mindval,maxdval) cb = plt.colorbar(m, ax=ax) #scientific formatting if sciticks and not logscale: cb.ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) cb.ax.yaxis.get_offset_text().set_fontsize(3) cb.ax.tick_params(labelsize=3) cb.set_label(clabel,fontsize=5) if invert_ind_axis: ax.invert_yaxis() # optionally add a grid #ax.grid() return ############################################################################### def transformXY_for_pcolor(xs,ys): # adjust centered x and y values to edges to work with pcolormesh # note: works best for regularly spaced data xs_diff = xs[1] - xs[0] # extend xs by 2 # fill in first endpoint xs_extend = [xs[0]-xs_diff] # fill in internal values for x in xs: xs_extend.append(x) # fill in last endpoint xs_extend.append(xs_extend[-1]+(xs[-1]-xs[-2])) # calculate the midpoints xs_pcolormesh_midpoints = [] for ii, x in enumerate(xs_extend[:-1]): xs_pcolormesh_midpoints.append(x+0.5*(xs_extend[ii+1] - xs_extend[ii])) ys_diff = ys[1] - ys[0] # extend ys by 2 # fill in first endpoint ys_extend = [ys[0]-ys_diff] # fill in internal values for y in ys: ys_extend.append(y) # fill in last endpoint ys_extend.append(ys_extend[-1]+(ys[-1]-ys[-2])) # calculate the midpoints ys_pcolormesh_midpoints = [] for ii, y in enumerate(ys_extend[:-1]): ys_pcolormesh_midpoints.append(y+0.5*(ys_extend[ii+1] - ys_extend[ii])) return xs_pcolormesh_midpoints, ys_pcolormesh_midpoints ############################################################################### lenWarnPDF = 0 nanWarnPDF = 0 def plotPDF(fig, countsVals, xVals, countsLabel, title="", indepLabel="x", ny=1, nx=1, nplots=1, iplot=0, lineAttribOffset=1, legend_inside=True, interiorLabels=True): # ARGUMENTS # fig - matplotlib figure object # countsVals - list of arrays, each containing counts across xVals # xVals - independent variable on x-axis (array) # countsLabel - legend label for countsVals (list) # title - subplot title, optional # indepLabel - label for xVals, optional # ny, nx - number of subplots in x/y direction, optional # nplots - total number of subplots, optional # iplot - this subplot index (starting at 0), optional # lineAttribOffset - offset for selecting line attributes, optional # legend_inside - whether legend should be placed inside the subplot, optional ax = fig.add_subplot(ny, nx, iplot+1) #title ax.set_title(title,fontsize=5) #add counts plotVals = [] nPDFs = 0 for ihist, countVals in enumerate(countsVals): if np.all(np.isnan(countVals)): global nanWarnPDF if nanWarnPDF==0: _logger.warning("skipping all-NaN data") _logger.warning(title+"; "+indepLabel+"; "+countsLabel[ihist]) nanWarnPDF=nanWarnPDF+1 continue if len(countVals)!=len(xVals): global lenWarnPDF if lenWarnPDF==0: _logger.warning("skipping data where len(x)!=len(y)") _logger.warning(title+"; "+indepLabel+"; "+countsLabel[ihist]) lenWarnPDF=lenWarnPDF+1 continue # Plot line for each countVals that has non-missing data # assume constant dx between bins dx = xVals[1] - xVals[0] ax.plot(xVals, np.divide(countVals,np.sum(countVals)*dx), color=pu.plotColor(len(countsVals),ihist+lineAttribOffset), label=countsLabel[ihist], ls=pu.plotLineStyle(len(countsVals),ihist+lineAttribOffset), linewidth=0.5) nPDFs = nPDFs + 1 plotVals.append(countVals) if nPDFs == 0: ax.tick_params(axis='x',labelbottom=False) ax.tick_params(axis='y',labelleft=False) return # add a standard normal pdf from scipy.stats import norm ax.plot(xVals, norm.pdf(xVals), color='k', ls='-', linewidth=0.35, label='N(0,1)' ) #axes settings ax.xaxis.set_tick_params(labelsize=3) ax.yaxis.set_tick_params(labelsize=3) plt.yscale('log') ax.set_ylim(bottom=1.e-6) #handle interior subplot ticks/labels ix = int(iplot)%int(nx) iy = int(iplot)/int(nx) if not interiorLabels \ and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )): ax.tick_params(axis='x',labelbottom=False) if interiorLabels or ix == 0: ax.set_xlabel(indepLabel,fontsize=4) ax.set_ylabel('PDF',fontsize=4) #legend if legend_inside: #INSIDE AXES lh = ax.legend(loc='best',fontsize=3,frameon=True,\ framealpha=0.4,ncol=1) lh.get_frame().set_linewidth(0.0) elif ix==nx-1 or iplot==nplots-1: #OUTSIDE AXES ax.legend(loc='upper left',fontsize=3,frameon=False, \ bbox_to_anchor=(1.02, 1), borderaxespad=0) ax.grid() return ############################################################################### lenWarnRamp = 0 nanWarnRamp = 0 def plotfitRampComposite(fig, xVals, countVals, meanVals, rmsVals, stdVals, title="", dataLabel="y", \ indepLabel="x", ny=1, nx=1, nplots=1, iplot=0, lineAttribOffset=1, legend_inside=True, interiorLabels=True): # ARGUMENTS # fig - matplotlib figure object # countVals - Count of quantity (array) # meanVals - Mean of quantity (array) # rmsVals - RMS of quantity (array) # stdVals - STD of quantity (array) # xVals - independent variable on x-axis (array) # title - subplot title, optional # dataLabel - label for y-axis, optional # indepLabel - label for xVals, optional # ny, nx - number of subplots in x/y direction, optional # nplots - total number of subplots, optional # iplot - this subplot index (starting at 0), optional # lineAttribOffset - offset for selecting line attributes, optional # legend_inside - whether legend should be placed inside the subplot, optional ax = fig.add_subplot(ny, nx, iplot+1) ix = int(iplot)%int(nx) iy = int(iplot)/int(nx) #title ax.set_title(title,fontsize=5) #add lines plotVals = [] nLines = 0 linesLabel = ['RMS','STD','Mean'] for iline, lineVals in enumerate([rmsVals,stdVals,meanVals]): if np.all(np.isnan(lineVals)): global nanWarnRamp if nanWarnRamp==0: _logger.warning("skipping all-NaN data") _logger.warning(title+"; "+indepLabel+"; "+linesLabel[iline]) nanWarnRamp=nanWarnRamp+1 continue if len(lineVals)!=len(xVals): global lenWarnRamp if lenWarnRamp==0: _logger.warning("skipping data where len(x)!=len(y)") _logger.warning(title+"; "+indepLabel+"; "+linesLabel[iline]) lenWarnRamp=lenWarnRamp+1 continue # Plot line for each lineVals that has non-missing data pColor = pu.plotColor(4,iline+lineAttribOffset) ax.plot(xVals, lineVals, color=pColor, label=linesLabel[iline], ls=pu.plotLineStyle(4,iline+lineAttribOffset), linewidth=0.6) nLines += 1 plotVals.append(lineVals) if nLines == 0: ax.tick_params(axis='x',labelbottom=False) ax.tick_params(axis='y',labelleft=False) return # Add fit for stdVals here using info from countVals ind0 = np.argmax(countVals) indexMaxX4Std = 0 for ii, std in enumerate(stdVals): if np.isfinite(std): indexMaxX4Std = ii indexMaxX = indexMaxX4Std maxCount = 0 for ii, count in enumerate(countVals): if count > maxCount: maxCount = count if count < 0.002*maxCount: indexMaxX = ii break if indexMaxX > indexMaxX4Std: ind1 = np.argmax(stdVals[0:indexMaxX4Std]) else: ind1 = np.argmax(stdVals[0:indexMaxX]) weights = [0.2]*(ind1-ind0+1) weights[0] = 1.0 p = np.polyfit(xVals[ind0:ind1+1],stdVals[ind0:ind1+1],1, w=weights) X0 = xVals[ind0] ERR0 = X0 * p[0] + p[1] # X1 = xVals[ind1] # ERR1 = X1 * p[0] + p[1] ERR1 = stdVals[ind1] X1 = (ERR1 - p[1]) / p[0] ERRfitDict = { 'bu':{ 'X': [round(X0,2), round(X1,2)], 'ERR': [round(ERR0,2), round(ERR1,2)], }, 'YAML':{ 'X0': [round(X0,2)], 'X1': [round(X1,2)], 'ERR0': [round(ERR0,2)], 'ERR1': [round(ERR1,2)], }, } fitX = np.asarray([0.0] + ERRfitDict['bu']['X'] + [xVals[indexMaxX4Std]]) fitERR = np.asarray([ERR0] + ERRfitDict['bu']['ERR'] + [ERR1]) plotVals.append(fitERR) pColor = pu.plotColor(4,1+lineAttribOffset) ax.plot(fitX, fitERR, color=pColor, label='Fit-STD', ls='-.', linewidth=1.2, marker='+', ms=1.5 ) #axes settings ax.xaxis.set_tick_params(labelsize=3) ax.yaxis.set_tick_params(labelsize=3) # standardize x-limits mindval, maxdval = pu.get_clean_ax_limits(plotVals=plotVals) if (np.isfinite(mindval) and np.isfinite(maxdval)): ax.set_ylim(mindval,maxdval) #handle interior subplot ticks/labels if not interiorLabels \ and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )): ax.tick_params(axis='x',labelbottom=False) if interiorLabels or ix == 0: ax.set_xlabel(indepLabel,fontsize=4) if interiorLabels or iy == ny-1: ax.set_ylabel(dataLabel,fontsize=4) #legend if legend_inside: #INSIDE AXES lh = ax.legend(loc='best',fontsize=3,frameon=True,\ framealpha=0.4,ncol=1) lh.get_frame().set_linewidth(0.0) elif ix==nx-1 or iplot==nplots-1: #OUTSIDE AXES ax.legend(loc='upper left',fontsize=3,frameon=False, \ bbox_to_anchor=(1.02, 1), borderaxespad=0) ax.grid() # Add count on RHS y-axis ax2 = ax.twinx() color = 'black' if interiorLabels or ix == nx: ax2.set_ylabel('Count',fontsize=4,color=color) ax2.plot(xVals[:indexMaxX4Std], countVals[:indexMaxX4Std], color=color, label='Count', ls=':', linewidth=0.5) ax2.tick_params(axis='y', labelcolor=color) ax2.yaxis.set_tick_params(labelsize=3) plt.yscale('log') ax2.set_ylim(bottom=100.) return ERRfitDict
35.827144
115
0.561006
from copy import deepcopy import cartopy.crs as ccrs import datetime as dt import logging from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() import matplotlib matplotlib.use('AGG') import matplotlib.axes as maxes import matplotlib.cm as cm import matplotlib.colors as colors from matplotlib.colors import BoundaryNorm import matplotlib.pyplot as plt import matplotlib.ticker as mticker from mpl_toolkits.axes_grid1 import make_axes_locatable import numpy as np import plot_utils as pu import var_utils as vu import os _logger = logging.getLogger(__name__) cmGray = plt.cm.get_cmap("gist_gray") cmRainbow = plt.cm.get_cmap("gist_rainbow") cmSpectral = plt.cm.get_cmap("nipy_spectral") cmHeat = plt.cm.get_cmap("gist_heat") cmOcean = plt.cm.get_cmap("ocean") cmNCAR = plt.cm.get_cmap("gist_ncar") WhiteBlack1 = cmGray(np.linspace(1.0,0.0,17)) BlackRed = cmHeat(np.linspace(0.0,0.5,10)) ROYG = cmSpectral(np.linspace(0.9,0.43,27)) )) WhiteBlack2 = cmGray(np.linspace(0.9,0.0,31)) btcolors = np.concatenate((WhiteBlack1, BlackRed, ROYG, GreenBlueCyan, MVW, WhiteBlack2)) btCMap = colors.ListedColormap(btcolors) distriZooms = {} distriZooms['default'] = { 'cLon': None, 'minLon': -180, 'maxLon': 180, 'minLat': -90, 'maxLat': 90, } distriZooms['abi'] = { 'cLon': -75.2, 'minLon': None, 'maxLon': None, 'minLat': None, 'maxLat': None, } distriZooms['ahi'] = { 'cLon': 140.7, 'minLon': None, 'maxLon': None, 'minLat': None, 'maxLat': None, } def plotDistri(lats,lons,values, \ ObsType,VarName,var_unit,out_name,nstation,levbin, \ dmin=None,dmax=None,dotsize=6,color="rainbow"): cLon = distriZooms['default']['cLon'] minLon = distriZooms['default']['minLon'] maxLon = distriZooms['default']['maxLon'] minLat = distriZooms['default']['minLat'] maxLat = distriZooms['default']['maxLat'] for key, val in distriZooms.items(): if key in ObsType: cLon = val['cLon'] minLon = val['minLon'] maxLon = val['maxLon'] minLat = val['minLat'] maxLat = val['maxLat'] if cLon is not None: fig = plt.figure(figsize=(5,5)) ax = fig.add_subplot(projection=ccrs.Orthographic(cLon)) else: fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(projection=ccrs.PlateCarree()) ax.set_global() if color == "BT": if ("abi" in ObsType or "ahi" in ObsType): cm = btCMap if dmin is None: dmin = 183 if dmax is None: dmax = 303 else: cm = plt.cm.get_cmap("gist_ncar") if dmin is None: dmin = 190 if dmax is None: dmax = 270 else: cm = plt.cm.get_cmap(color) finite = np.isfinite(values) if ((("abi" in ObsType or "ahi" in ObsType) and finite.sum() > 4e4) or "model" in ObsType): lonsPlot = lons[finite] lonsPlot[lonsPlot > 180.0] -= 360.0 latsPlot = lats[finite] valuesPlot = values[finite] lonSort = np.argsort(lonsPlot) p = plt.pcolor(lonsPlot[lonSort], latsPlot[lonSort], valuesPlot[lonSort], transform = ccrs.PlateCarree(), cmap = cm, vmin = dmin, vmax = dmax, latlon = True, tri = True) else: p=ax.scatter(lons[finite], lats[finite], c=values[finite], transform = ccrs.PlateCarree(), cmap= cm, s = dotsize) ax.gridlines(draw_labels=True, xlocs=np.arange(-180,180,60),linestyle='--') ax.coastlines() divider = make_axes_locatable(ax) cax = divider.append_axes("bottom",size="5%", pad=0.3,axes_class=plt.Axes) plt.colorbar(p,cax=cax,orientation='horizontal') if nstation == 0 or ObsType == 'satwind': plt.text(0.5, 1.15, '%s %s %s nlocs:%s' \ %(ObsType,VarName,var_unit,len(values[~np.isnan(values)])), \ horizontalalignment='center', \ fontsize=12, transform = ax.transAxes) else: if ObsType[:6] == 'gnssro': plt.text(0.5, 1.15, '%s %s %s nlocs:%s nprofile:%s' \ %(ObsType,VarName,var_unit,len(values[~np.isnan(values)]),nstation), \ horizontalalignment='center', \ fontsize=12, transform = ax.transAxes) elif ObsType == 'aircraft': plt.text(0.5, 1.15, '%s %s %s nlocs:%s nflight:%s' \ %(ObsType,VarName,var_unit,len(values[~np.isnan(values)]),nstation), \ horizontalalignment='center', \ fontsize=12, transform = ax.transAxes) else: plt.text(0.5, 1.15, '%s %s %s nlocs:%s nstation:%s' \ %(ObsType,VarName,var_unit,len(values[~np.isnan(values)]),nstation), \ horizontalalignment='center', \ fontsize=12, transform = ax.transAxes) plt.savefig('distri_%s_%s_%s.png'%(VarName,out_name,levbin),dpi=200,bbox_inches='tight') plt.close() def scatterMapFields( lonVals, latVals, fields, filename, minLon = -180., maxLon = 180., minLat = -90., maxLat = 90., cLon = None, projection = 'default', dmin = None, dmax = None, markers = {}, sizes = {}, cmap = 'gist_ncar', cbarType = None, c = {}, logVLim = 1.e-12, ): cLons = np.asarray([]) lonVals_180 = {} for name in lonVals.keys(): cLon = None lonVals_360 = deepcopy(lonVals[name]) while np.max(lonVals_360) >= 360.0: lonVals_360[lonVals_360 >= 360.0] -= 360.0 while np.min(lonVals_360) < 0.0: lonVals_360[lonVals_360 < 0.0] += 360.0 lonVals_180[name] = deepcopy(lonVals_360) lonVals_180[name][lonVals_180[name] > 180.0] -= 360.0 for lon in [lonVals_360, lonVals_180[name]]: if np.max(lon) - np.min(lon) <= 180.0: cLon = 0.5*(np.max(lon) + np.min(lon)) cLons = np.append(cLons, cLon) anycLonNone = np.any([c is None for c in cLons]) if anycLonNone: fig = plt.figure(figsize=(5,5)) ax = fig.add_subplot(projection=ccrs.Mollweide(0.0)) else: cLon = cLons[0] if cLon > 180.0: cLon-=360.0 fig = plt.figure(figsize=(5,5)) ax = fig.add_subplot(projection=ccrs.Orthographic(cLon)) assert (cbarType is None or cbarType in ['Log', 'SymLog']), \ 'scatterMapFields: invalid cbarType: '+cbarType for name, field in fields.items(): f = c=c.get(name, field) finite = np.isfinite(f) lons = lonVals_180[name][finite] lats = latVals[name][finite] f = f[finite] ons[lonSort] lats = lats[lonSort] f = f[lonSort] if dmin is None: vmin = f.min() else: vmin = dmin if dmax is None: vmax = f.max() else: vmax = dmax if cbarType is None: norm = None elif cbarType == 'Log': if vmin <= logVLim: vmin = logVLim f[f < vmin] = vmin norm=colors.LogNorm(vmin=vmin, vmax=vmax) elif cbarType == 'SymLog': norm=colors.SymLogNorm(vmin=vmin, vmax=vmax, linthresh=1.e-4*vmax, linscale=1.0, base=10) sc = ax.scatter(lons, lats, c=f, s = sizes.get(name, 1), cmap = cmap, norm = norm, marker = markers.get(name, '.'), linewidth = 0, transform=ccrs.PlateCarree(), ) ax.set_global() ax.coastlines() divider = make_axes_locatable(ax) cax = divider.append_axes("bottom",size="5%", pad=0.3,axes_class=plt.Axes) cb = plt.colorbar(sc, cax=cax, orientation='horizontal') plt.savefig(filename, dpi=200, bbox_inches='tight') plt.close() def plotTimeserial2D(Stats,xlabeltime,ylevels,VarName): zgrid = np.loadtxt("/glade/work/jban/pandac/fix_input/graphics/zgrid_v55.txt") fig, ax1 = plt.subplots() xarray = range(len(xlabeltime)) valuemin = np.amin(Stats) valuemax = np.amax(Stats) epsilon = 1.e-8 if (valuemin > 0 or valuemax < 0): color = 'rainbow' plt.contourf(xarray,ylevels,Stats,40,vmin=valuemin, vmax=valuemax,cmap=color) xi=-1 else: cmap = 'coolwarm' if ( -valuemin < epsilon and valuemax < epsilon ): xi=1 valuemin = -epsilon valuemax = epsilon elif ( -valuemin < epsilon and valuemax > epsilon ): xi=2 valuemin = -epsilon elif ( -valuemin > epsilon and valuemax < epsilon ): xi=3 valuemax = epsilon else: xi=4 norm = matplotlib.colors.DivergingNorm(vmin=valuemin, vcenter=0, vmax=valuemax) plt.contourf(xarray,ylevels,Stats,40,vmin=valuemin, vmax=valuemax,norm=norm,cmap=cmap) xarray = range(len(xlabeltime)) major_ticks = np.arange(0, 56, 5) ax1.set_yticks(major_ticks) ax1.set_ylim([0,54]) ax1.set_ylabel('Vertical level',fontsize=15) ax2 = ax1.twinx() ax2.set_yticks(major_ticks-1) ax2.set_yticklabels((zgrid[::5]).astype(int)) ax2.set_ylabel('Height (m)',fontsize=13) FCDay = ''.join(VarName.split("_")[1:][:-3]) if (FCDay == 'day0.0'): ax1.set_xlabel('Analysis Time',fontsize=15) ax1.set_xticks(xarray[::4]) ax1.set_xticklabels(xlabeltime[::4],rotation=90) elif (FCDay == 'day0.25'): ax1.set_xlabel( '6h Forecast',fontsize=15) ax1.set_xticks(xarray[::4]) ax1.set_xticklabels(xlabeltime[::4],rotation=90) else: ax1.set_xlabel( 'Lead Time',fontsize=15) plt.colorbar(extend='both',orientation="horizontal",pad=0.2) ax1.grid(True) region = ''.join(VarName.split("_")[2:][:-2]) var = ''.join(VarName.split("_")[3:][:-1]) stats = ''.join(VarName.split("_")[4:]) plt.title(stats+' variable:'+vu.varDictModel[var][1]+'('+ vu.varDictModel[var][0]+') '+region, fontsize = 12) plt.savefig(VarName+'_TS_2d.png',dpi=200,bbox_inches='tight') plt.close() maxLegendEntries = 12 color=pColor, \ alpha=0.4, \ ls='-', \ linewidth=0.5) ax.plot(xVals, linesValsMaxCI[iline], \ color=pColor, \ alpha=0.4, \ ls='-', \ linewidth=0.5) ax.fill_between(xVals, linesValsMinCI[iline], linesValsMaxCI[iline], \ color=pColor, \ edgecolor=pColor, \ linewidth=0.0, alpha = 0.1) ax.fill_between(xVals, linesValsMinCI[iline], linesValsMaxCI[iline], \ where=significant > 0.0, \ color=pColor, \ edgecolor=pColor, \ linewidth=0.2, alpha = 0.3) if nLines == 0: ax.tick_params(axis='x',labelbottom=False) ax.tick_params(axis='y',labelleft=False) return if not signdef: ax.plot([xVals[0], xVals[-1]], [0., 0.], ls="--", c=".3", \ linewidth=0.7,markersize=0) mindval, maxdval = pu.get_clean_ax_limits(dmin,dmax,plotVals,signdef) ax.xaxis.set_tick_params(labelsize=3) ax.yaxis.set_tick_params(labelsize=3) isLogScale = logscale if logscale: nonzero = np.logical_and(np.greater(np.abs(plotVals), 0.), np.isfinite(plotVals)) if nonzero.sum() > 0: vmin = np.nanmin(np.abs(plotVals[nonzero])) vmax = np.nanmax(np.abs(plotVals[nonzero])) if signdef: if vmax / vmin > 10.0: ax.set_yscale('log') else: isLogScale = False else: ax.set_yscale('symlog') else: isLogScale = False if isLogScale and np.isfinite(maxdval) and maxdval > 0.: ax.set_ylim(None, maxdval) if np.abs(vmin) > 0.: ax.set_ylim(vmin, None) if not isLogScale: if sciticks: ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) if (np.isfinite(mindval) and np.isfinite(maxdval)): ax.set_ylim(mindval,maxdval) if maxdval-mindval < 1.0 or \ maxdval-mindval > 100.0: ax.tick_params(axis='y',rotation=-35) ax.yaxis.get_offset_text().set_fontsize(3) ix = int(iplot)%int(nx) iy = int(iplot)/int(nx) if not interiorLabels \ and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )): ax.tick_params(axis='x',labelbottom=False) if interiorLabels or ix == 0: ax.set_xlabel(indepLabel,fontsize=4) if interiorLabels or iy == ny-1: ax.set_ylabel(dataLabel,fontsize=4) if nLines <= maxLegendEntries: if legend_inside: lh = ax.legend(loc='best',fontsize=3,frameon=True,\ framealpha=0.4,ncol=1) lh.get_frame().set_linewidth(0.0) elif ix==nx-1 or iplot==nplots-1: ax.legend(loc='upper left',fontsize=3,frameon=False, \ bbox_to_anchor=(1.02, 1), borderaxespad=0) if invert_ind_axis: ax.invert_xaxis() ax.grid() return als, \ color=pColor, \ alpha=0.4, \ ls='-', \ linewidth=0.5) ax.plot(linesValsMaxCI[iline], yVals, \ color=pColor, \ alpha=0.4, \ ls='-', \ linewidth=0.5) ax.fill_betweenx(yVals, linesValsMinCI[iline], linesValsMaxCI[iline], \ color=pColor, \ edgecolor=pColor, \ linewidth=0.0, alpha = 0.1) ax.fill_betweenx(yVals, linesValsMinCI[iline], linesValsMaxCI[iline], \ where=significant > 0.0, \ color=pColor, \ edgecolor=pColor, \ linewidth=0.2, alpha = 0.3) if nLines == 0: ax.tick_params(axis='x',labelbottom=False) ax.tick_params(axis='y',labelleft=False) return if not signdef: ax.plot([0., 0.], [yVals[0], yVals[-1]], ls="--", c=".3", \ linewidth=0.7,markersize=0) mindval, maxdval = pu.get_clean_ax_limits(dmin,dmax,plotVals,signdef) ax.xaxis.set_tick_params(labelsize=3) ax.yaxis.set_tick_params(labelsize=3) isLogScale = logscale if logscale: nonzero = np.logical_and(np.greater(np.abs(plotVals), 0.), np.isfinite(plotVals)) if nonzero.sum() > 0: vmin = np.nanmin(np.abs(plotVals[nonzero])) vmax = np.nanmax(np.abs(plotVals[nonzero])) if signdef: if vmax / vmin > 10.0: ax.set_xscale('log') else: isLogScale = False else: ax.set_xscale('symlog') else: isLogScale = False if isLogScale and np.isfinite(maxdval) and maxdval > 0.: ax.set_xlim(None, maxdval) if np.abs(mindval) > 0.: ax.set_xlim(mindval, None) if not isLogScale: if sciticks: ax.ticklabel_format(style='sci', axis='x', scilimits=(0,0)) if (np.isfinite(mindval) and np.isfinite(maxdval)): ax.set_xlim(mindval,maxdval) if maxdval-mindval < 1.0 or \ maxdval-mindval > 100.0: ax.tick_params(axis='x',rotation=-35) ax.xaxis.get_offset_text().set_fontsize(3) ix = int(iplot)%int(nx) iy = int(iplot)/int(nx) if not interiorLabels \ and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )): ax.tick_params(axis='x',labelbottom=False) if interiorLabels or ix == 0: ax.set_xlabel(dataLabel,fontsize=4) if interiorLabels or iy == ny-1: ax.set_ylabel(indepLabel,fontsize=4) if nLines <= maxLegendEntries: if legend_inside: lh = ax.legend(loc='best',fontsize=3,frameon=True,\ framealpha=0.4,ncol=1) lh.get_frame().set_linewidth(0.0) elif ix==nx-1 or iplot==nplots-1: ax.legend(loc='upper left',fontsize=3,frameon=False, \ bbox_to_anchor=(1.02, 1), borderaxespad=0) if invert_ind_axis: ax.invert_yaxis() ax.grid() return if (x > 0.0 and lineArr[i] > 0.0)],dtype=int) if len(possiginds) > 0: ax.plot(xArr[possiginds], lineArr[possiginds], \ color=pColor, \ ls='', \ marker='^', \ markersize=1.5) ax.plot(xVals, linesValsMinCI[iline], \ color=pColor, \ alpha=0.4, \ ls='-', \ linewidth=0.5) ax.plot(xVals, linesValsMaxCI[iline], \ color=pColor, \ alpha=0.4, \ ls='-', \ linewidth=0.5) ax.fill_between(xVals, linesValsMinCI[iline], linesValsMaxCI[iline], \ color=pColor, \ edgecolor=pColor, \ linewidth=0.0, alpha = 0.1) ax.fill_between(xVals, linesValsMinCI[iline], linesValsMaxCI[iline], \ where=significant > 0.0, \ color=pColor, \ edgecolor=pColor, \ linewidth=0.2, alpha = 0.3) if nLines == 0: ax.tick_params(axis='x',labelbottom=False) ax.tick_params(axis='y',labelleft=False) return mindval, maxdval = pu.get_clean_ax_limits(dmin,dmax,plotVals,signdef) if not signdef: ax.plot([minX, maxX], [0., 0.], ls="--", c=".3", \ linewidth=0.7,markersize=0) if isinstance(xsDates[0],(list,np.ndarray)): pu.format_x_for_dates(ax, xsDates[0]) else: pu.format_x_for_dates(ax, xsDates) ax.xaxis.set_tick_params(labelsize=3) ax.yaxis.set_tick_params(labelsize=3) isLogScale = logscale if logscale: nonzero = np.logical_and(np.greater(np.abs(plotVals), 0.), np.isfinite(plotVals)) if nonzero.sum() > 0: vmin = np.nanmin(np.abs(plotVals[nonzero])) vmax = np.nanmax(np.abs(plotVals[nonzero])) if signdef: if vmax / vmin > 10.0: ax.set_yscale('log') else: isLogScale = False else: ax.set_yscale('symlog') else: isLogScale = False if isLogScale and np.isfinite(maxdval) and maxdval > 0.: ax.set_ylim(None, maxdval) if np.abs(vmin) > 0.: ax.set_ylim(vmin, None) if not isLogScale: if sciticks: ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) if (np.isfinite(mindval) and np.isfinite(maxdval)): ax.set_ylim(mindval,maxdval) if maxdval-mindval < 1.0 or \ maxdval-mindval > 100.0: ax.tick_params(axis='y',rotation=-35) ax.yaxis.get_offset_text().set_fontsize(3) ax.grid() ix = int(iplot)%int(nx) iy = int(iplot)/int(nx) if not interiorLabels \ and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )): ax.tick_params(axis='x',labelbottom=False) if interiorLabels or ix == 0: ax.set_ylabel(dataLabel,fontsize=4) if nLines <= maxLegendEntries: if legend_inside: nlcol = np.int(np.ceil(np.sqrt(nLines))) lh = ax.legend(loc='best',fontsize=3,frameon=True,\ framealpha=0.4,ncol=nlcol) lh.get_frame().set_linewidth(0.0) elif ix==nx-1 or iplot==nplots-1: ax.legend(loc='upper left',fontsize=3,frameon=False, \ bbox_to_anchor=(1.02, 1), borderaxespad=0) return
true
true
f72cbfd0caae91239053996913ba8621fe6047da
636
py
Python
RestaurantReview/manage.py
sehyun-seankim/Django_project_restaurant_review
5d2eb90486f8064aec16538a71c667d830d3db37
[ "MIT" ]
null
null
null
RestaurantReview/manage.py
sehyun-seankim/Django_project_restaurant_review
5d2eb90486f8064aec16538a71c667d830d3db37
[ "MIT" ]
null
null
null
RestaurantReview/manage.py
sehyun-seankim/Django_project_restaurant_review
5d2eb90486f8064aec16538a71c667d830d3db37
[ "MIT" ]
null
null
null
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'RestaurantReview.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
28.909091
80
0.687107
import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'RestaurantReview.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
true
true
f72cbfdf7f7c4f11bbe1601b54f70466e1e3e688
5,321
py
Python
slackclient/_response.py
lovexi/CodingMonkey-Bot-Python-version
52561d2b15a78119769099d304a98f80da53a010
[ "MIT" ]
null
null
null
slackclient/_response.py
lovexi/CodingMonkey-Bot-Python-version
52561d2b15a78119769099d304a98f80da53a010
[ "MIT" ]
null
null
null
slackclient/_response.py
lovexi/CodingMonkey-Bot-Python-version
52561d2b15a78119769099d304a98f80da53a010
[ "MIT" ]
null
null
null
import json import random import math import os from crawling._twitter import twitter_crawling class Response(object): def __init__(self, token): self.name = "" self.token = token self.greetingList = ['Hello {}, welcome to the Equifax Hackathon channel! Have fun :). You can type help for more details!' , 'Nice to see you here, {} ! What can I do for you (please type help!)' , 'I am willing to do anything for you {} ! Type help so I can help you!'] self.help_msg = {"text": 'Don\'t Worry {} ! I will show you how to communicate with me :).', "attachments":[{"pretext": "Command line:", "color": "#36a64f", "text": "hi: Say hello to me, so that I know you are here!"}, {"color": "#36a64f", "text": "print message: I will grab all detailed ID message for you, such as channel id or user id :)"}, {"color": "#e2ffb6", "text": "help: I can show you all commands I can understand :)"}, {"color": "#415677", "text": "show name or nameID: I can know that your target ID"}, {"color": "#b27485", "text": "select dataLocation: I can know where I can grab data for you"} ]} self.select_msg = {"text": "Where do you want to grab personal information for {} ?", "attachments": [{"pretext": "You can choose:", "color": "#36a64f", "text": "Facebook + limits"}, {"color": "#36a64f", "text": "Twitter + limits"}, {"color": "#415677", "text": "Craigslist"} ]} def response(self, data, channel, sc, user): type = data["type"] user_info = sc.api_read("users.info", token = self.token, user = user) username = user_info["user"]["name"] if type == "hello": sc.rtm_send_message(channel, self.greetingList[int(math.floor(random.random()*3))].format(username)) if "user" in data.keys() and data["user"] == user: if (type == "message"): text = data["text"].lower() if (text.startswith("hi")): sc.rtm_send_message(channel, "I am CodingMonkey Bot. Nice to meet you here {0}!".format(username)) if (text.startswith("print")): sc.rtm_send_message(channel, data[text[5:].strip()]) if (text.startswith("help")): sc.api_call("chat.postMessage", token = self.token, channel = channel, username = "codingmonkey", text = self.help_msg["text"].format(username), attachments = self.help_msg["attachments"]) if (text.startswith("show")): command_msg = str(text).split(' ') self.name = command_msg[1] sc.api_call("chat.postMessage", token = self.token, channel = channel, username = "codingmonkey", text = self.select_msg["text"].format(username), attachments = self.select_msg["attachments"]) if (text.startswith("select")): command_msg = str(text).split(' ') if (command_msg[1].lower() == "twitter"): twi = twitter_crawling() limits = 5 if len(command_msg) == 3: limits = int(command_msg[2]) twitter_info = json.dumps(twi.spiderInfo(self.name, limits)) sc.api_call("chat.postMessage", token = self.token, channel = channel, username = "codingmonkey", text = "Here are the results in Twitter:", attachments = twitter_info) elif (command_msg[1].lower() == "facebook"): root = os.getcwd() relative_path = "slackclient/data/facebookY.json" abs_path = os.path.join(root, relative_path) with open(abs_path) as facebook_file: facebook_info = json.load(facebook_file) facebook_info = json.dumps(facebook_info) sc.api_call("chat.postMessage", token = self.token, channel = channel, username = "codingmonkey", text = "Here are the results in Facebook:", attachments = facebook_info) elif (command_msg[1].lower() == "craigslist"): root = os.getcwd() relative_path = "slackclient/data/craigslist.json" abs_path = os.path.join(root, relative_path) with open(abs_path) as craigslist_file: craigslist_info = json.load(craigslist_file) craigslist_info = json.dumps(craigslist_info) craigslist_info = craigslist_info.replace("'", "%100") sc.api_call("chat.postMessage", token = self.token, channel = channel, username = "codingmonkey", text = "Here are the results in Craigslist:", attachments = craigslist_info)
58.472527
153
0.516256
import json import random import math import os from crawling._twitter import twitter_crawling class Response(object): def __init__(self, token): self.name = "" self.token = token self.greetingList = ['Hello {}, welcome to the Equifax Hackathon channel! Have fun :). You can type help for more details!' , 'Nice to see you here, {} ! What can I do for you (please type help!)' , 'I am willing to do anything for you {} ! Type help so I can help you!'] self.help_msg = {"text": 'Don\'t Worry {} ! I will show you how to communicate with me :).', "attachments":[{"pretext": "Command line:", "color": "#36a64f", "text": "hi: Say hello to me, so that I know you are here!"}, {"color": "#36a64f", "text": "print message: I will grab all detailed ID message for you, such as channel id or user id :)"}, {"color": "#e2ffb6", "text": "help: I can show you all commands I can understand :)"}, {"color": "#415677", "text": "show name or nameID: I can know that your target ID"}, {"color": "#b27485", "text": "select dataLocation: I can know where I can grab data for you"} ]} self.select_msg = {"text": "Where do you want to grab personal information for {} ?", "attachments": [{"pretext": "You can choose:", "color": "#36a64f", "text": "Facebook + limits"}, {"color": "#36a64f", "text": "Twitter + limits"}, {"color": "#415677", "text": "Craigslist"} ]} def response(self, data, channel, sc, user): type = data["type"] user_info = sc.api_read("users.info", token = self.token, user = user) username = user_info["user"]["name"] if type == "hello": sc.rtm_send_message(channel, self.greetingList[int(math.floor(random.random()*3))].format(username)) if "user" in data.keys() and data["user"] == user: if (type == "message"): text = data["text"].lower() if (text.startswith("hi")): sc.rtm_send_message(channel, "I am CodingMonkey Bot. Nice to meet you here {0}!".format(username)) if (text.startswith("print")): sc.rtm_send_message(channel, data[text[5:].strip()]) if (text.startswith("help")): sc.api_call("chat.postMessage", token = self.token, channel = channel, username = "codingmonkey", text = self.help_msg["text"].format(username), attachments = self.help_msg["attachments"]) if (text.startswith("show")): command_msg = str(text).split(' ') self.name = command_msg[1] sc.api_call("chat.postMessage", token = self.token, channel = channel, username = "codingmonkey", text = self.select_msg["text"].format(username), attachments = self.select_msg["attachments"]) if (text.startswith("select")): command_msg = str(text).split(' ') if (command_msg[1].lower() == "twitter"): twi = twitter_crawling() limits = 5 if len(command_msg) == 3: limits = int(command_msg[2]) twitter_info = json.dumps(twi.spiderInfo(self.name, limits)) sc.api_call("chat.postMessage", token = self.token, channel = channel, username = "codingmonkey", text = "Here are the results in Twitter:", attachments = twitter_info) elif (command_msg[1].lower() == "facebook"): root = os.getcwd() relative_path = "slackclient/data/facebookY.json" abs_path = os.path.join(root, relative_path) with open(abs_path) as facebook_file: facebook_info = json.load(facebook_file) facebook_info = json.dumps(facebook_info) sc.api_call("chat.postMessage", token = self.token, channel = channel, username = "codingmonkey", text = "Here are the results in Facebook:", attachments = facebook_info) elif (command_msg[1].lower() == "craigslist"): root = os.getcwd() relative_path = "slackclient/data/craigslist.json" abs_path = os.path.join(root, relative_path) with open(abs_path) as craigslist_file: craigslist_info = json.load(craigslist_file) craigslist_info = json.dumps(craigslist_info) craigslist_info = craigslist_info.replace("'", "%100") sc.api_call("chat.postMessage", token = self.token, channel = channel, username = "codingmonkey", text = "Here are the results in Craigslist:", attachments = craigslist_info)
true
true
f72cc038fe01f625fd75044fc25d9c661707b934
241
py
Python
fabfile/__init__.py
lem-usp/Bio507
67b8f8f677e6c1f39ad257d456f0cc0cac289022
[ "MIT" ]
null
null
null
fabfile/__init__.py
lem-usp/Bio507
67b8f8f677e6c1f39ad257d456f0cc0cac289022
[ "MIT" ]
null
null
null
fabfile/__init__.py
lem-usp/Bio507
67b8f8f677e6c1f39ad257d456f0cc0cac289022
[ "MIT" ]
null
null
null
from fabric.state import output from .development import * # # Fabric configuration # output['debug'] = False # see full command list def help(): ''' Fabfile documentation ''' local('python -c "import fabfile; help(fabfile)"')
16.066667
54
0.680498
from fabric.state import output from .development import * output['debug'] = False def help(): local('python -c "import fabfile; help(fabfile)"')
true
true
f72cc0e34bd07cf91c3cd084ab5e50132bdbe531
5,036
py
Python
sahara/plugins/vanilla/hadoop2/validation.py
hortonworksqe/sahara
b8edeaf2b6a475728bf9fd2ddc3a860dc6c23270
[ "Apache-2.0" ]
1
2016-04-13T17:07:05.000Z
2016-04-13T17:07:05.000Z
sahara/plugins/vanilla/hadoop2/validation.py
hortonworksqe/sahara
b8edeaf2b6a475728bf9fd2ddc3a860dc6c23270
[ "Apache-2.0" ]
null
null
null
sahara/plugins/vanilla/hadoop2/validation.py
hortonworksqe/sahara
b8edeaf2b6a475728bf9fd2ddc3a860dc6c23270
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.plugins.general import exceptions as ex from sahara.plugins.general import utils as u from sahara.plugins.vanilla.hadoop2 import config_helper as cu from sahara.plugins.vanilla import utils as vu from sahara.utils import general as gu def validate_cluster_creating(pctx, cluster): nn_count = _get_inst_count(cluster, 'namenode') if nn_count != 1: raise ex.InvalidComponentCountException('namenode', 1, nn_count) snn_count = _get_inst_count(cluster, 'secondarynamenode') if snn_count not in [0, 1]: raise ex.InvalidComponentCountException('secondarynamenode', '0 or 1', snn_count) rm_count = _get_inst_count(cluster, 'resourcemanager') if rm_count not in [0, 1]: raise ex.InvalidComponentCountException('resourcemanager', '0 or 1', rm_count) hs_count = _get_inst_count(cluster, 'historyserver') if hs_count not in [0, 1]: raise ex.InvalidComponentCountException('historyserver', '0 or 1', hs_count) nm_count = _get_inst_count(cluster, 'nodemanager') if rm_count == 0: if nm_count > 0: raise ex.RequiredServiceMissingException('resourcemanager', required_by='nodemanager') oo_count = _get_inst_count(cluster, 'oozie') dn_count = _get_inst_count(cluster, 'datanode') if oo_count not in [0, 1]: raise ex.InvalidComponentCountException('oozie', '0 or 1', oo_count) if oo_count == 1: if dn_count < 1: raise ex.RequiredServiceMissingException('datanode', required_by='oozie') if nm_count < 1: raise ex.RequiredServiceMissingException('nodemanager', required_by='oozie') if hs_count != 1: raise ex.RequiredServiceMissingException('historyserver', required_by='oozie') rep_factor = cu.get_config_value(pctx, 'HDFS', 'dfs.replication', cluster) if dn_count < rep_factor: raise ex.InvalidComponentCountException( 'datanode', rep_factor, dn_count, 'Number of datanodes must be not' ' less than dfs.replication.') def validate_additional_ng_scaling(cluster, additional): rm = vu.get_resourcemanager(cluster) scalable_processes = _get_scalable_processes() for ng_id in additional: ng = gu.get_by_id(cluster.node_groups, ng_id) if not set(ng.node_processes).issubset(scalable_processes): msg = "Vanilla plugin cannot scale nodegroup with processes: %s" raise ex.NodeGroupCannotBeScaled(ng.name, msg % ' '.join(ng.node_processes)) if not rm and 'nodemanager' in ng.node_processes: msg = ("Vanilla plugin cannot scale node group with processes " "which have no master-processes run in cluster") raise ex.NodeGroupCannotBeScaled(ng.name, msg) def validate_existing_ng_scaling(pctx, cluster, existing): scalable_processes = _get_scalable_processes() dn_to_delete = 0 for ng in cluster.node_groups: if ng.id in existing: if ng.count > existing[ng.id] and "datanode" in ng.node_processes: dn_to_delete += ng.count - existing[ng.id] if not set(ng.node_processes).issubset(scalable_processes): msg = ("Vanilla plugin cannot scale nodegroup " "with processes: %s") raise ex.NodeGroupCannotBeScaled( ng.name, msg % ' '.join(ng.node_processes)) dn_amount = len(vu.get_datanodes(cluster)) rep_factor = cu.get_config_value(pctx, 'HDFS', 'dfs.replication', cluster) if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor: msg = ("Vanilla plugin cannot shrink cluster because it would be not " "enough nodes for replicas (replication factor is %s)") raise ex.ClusterCannotBeScaled( cluster.name, msg % rep_factor) def _get_scalable_processes(): return ['datanode', 'nodemanager'] def _get_inst_count(cluster, process): return sum([ng.count for ng in u.get_node_groups(cluster, process)])
41.619835
79
0.633241
from sahara.plugins.general import exceptions as ex from sahara.plugins.general import utils as u from sahara.plugins.vanilla.hadoop2 import config_helper as cu from sahara.plugins.vanilla import utils as vu from sahara.utils import general as gu def validate_cluster_creating(pctx, cluster): nn_count = _get_inst_count(cluster, 'namenode') if nn_count != 1: raise ex.InvalidComponentCountException('namenode', 1, nn_count) snn_count = _get_inst_count(cluster, 'secondarynamenode') if snn_count not in [0, 1]: raise ex.InvalidComponentCountException('secondarynamenode', '0 or 1', snn_count) rm_count = _get_inst_count(cluster, 'resourcemanager') if rm_count not in [0, 1]: raise ex.InvalidComponentCountException('resourcemanager', '0 or 1', rm_count) hs_count = _get_inst_count(cluster, 'historyserver') if hs_count not in [0, 1]: raise ex.InvalidComponentCountException('historyserver', '0 or 1', hs_count) nm_count = _get_inst_count(cluster, 'nodemanager') if rm_count == 0: if nm_count > 0: raise ex.RequiredServiceMissingException('resourcemanager', required_by='nodemanager') oo_count = _get_inst_count(cluster, 'oozie') dn_count = _get_inst_count(cluster, 'datanode') if oo_count not in [0, 1]: raise ex.InvalidComponentCountException('oozie', '0 or 1', oo_count) if oo_count == 1: if dn_count < 1: raise ex.RequiredServiceMissingException('datanode', required_by='oozie') if nm_count < 1: raise ex.RequiredServiceMissingException('nodemanager', required_by='oozie') if hs_count != 1: raise ex.RequiredServiceMissingException('historyserver', required_by='oozie') rep_factor = cu.get_config_value(pctx, 'HDFS', 'dfs.replication', cluster) if dn_count < rep_factor: raise ex.InvalidComponentCountException( 'datanode', rep_factor, dn_count, 'Number of datanodes must be not' ' less than dfs.replication.') def validate_additional_ng_scaling(cluster, additional): rm = vu.get_resourcemanager(cluster) scalable_processes = _get_scalable_processes() for ng_id in additional: ng = gu.get_by_id(cluster.node_groups, ng_id) if not set(ng.node_processes).issubset(scalable_processes): msg = "Vanilla plugin cannot scale nodegroup with processes: %s" raise ex.NodeGroupCannotBeScaled(ng.name, msg % ' '.join(ng.node_processes)) if not rm and 'nodemanager' in ng.node_processes: msg = ("Vanilla plugin cannot scale node group with processes " "which have no master-processes run in cluster") raise ex.NodeGroupCannotBeScaled(ng.name, msg) def validate_existing_ng_scaling(pctx, cluster, existing): scalable_processes = _get_scalable_processes() dn_to_delete = 0 for ng in cluster.node_groups: if ng.id in existing: if ng.count > existing[ng.id] and "datanode" in ng.node_processes: dn_to_delete += ng.count - existing[ng.id] if not set(ng.node_processes).issubset(scalable_processes): msg = ("Vanilla plugin cannot scale nodegroup " "with processes: %s") raise ex.NodeGroupCannotBeScaled( ng.name, msg % ' '.join(ng.node_processes)) dn_amount = len(vu.get_datanodes(cluster)) rep_factor = cu.get_config_value(pctx, 'HDFS', 'dfs.replication', cluster) if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor: msg = ("Vanilla plugin cannot shrink cluster because it would be not " "enough nodes for replicas (replication factor is %s)") raise ex.ClusterCannotBeScaled( cluster.name, msg % rep_factor) def _get_scalable_processes(): return ['datanode', 'nodemanager'] def _get_inst_count(cluster, process): return sum([ng.count for ng in u.get_node_groups(cluster, process)])
true
true
f72cc0fbea74a83a8775b2c4a4948f97cf3aff29
5,959
py
Python
DeepReinforcementLearning/funcs.py
Christoper-Harvey/1st-Capstone
93630a4d5f4a2d939c8b5f74f11b5b33052e3f72
[ "MIT" ]
1
2019-06-13T13:11:52.000Z
2019-06-13T13:11:52.000Z
DeepReinforcementLearning/funcs.py
Christoper-Harvey/1st-Capstone
93630a4d5f4a2d939c8b5f74f11b5b33052e3f72
[ "MIT" ]
null
null
null
DeepReinforcementLearning/funcs.py
Christoper-Harvey/1st-Capstone
93630a4d5f4a2d939c8b5f74f11b5b33052e3f72
[ "MIT" ]
2
2019-04-30T19:14:11.000Z
2019-06-13T13:11:57.000Z
import numpy as np import random import loggers as lg from game import Game, GameState from model import Residual_CNN from agent import Agent, User import config def playMatchesBetweenVersions(env, run_version, player1version, player2version, EPISODES, logger, turns_until_tau0, goes_first = 0): if player1version == -1: player1 = User('player1', env.state_size, env.action_size) else: player1_NN = Residual_CNN(config.REG_CONST, config.LEARNING_RATE, env.input_shape, env.action_size, config.HIDDEN_CNN_LAYERS) if player1version > 0: player1_network = player1_NN.read(env.name, run_version, player1version) player1_NN.model.set_weights(player1_network.get_weights()) player1 = Agent('player1', env.state_size, env.action_size, config.p1_MCTS_SIMS, config.CPUCT, player1_NN) if player2version == -1: player2 = User('player2', env.state_size, env.action_size) else: player2_NN = Residual_CNN(config.REG_CONST, config.LEARNING_RATE, env.input_shape, env.action_size, config.HIDDEN_CNN_LAYERS) if player2version > 0: player2_network = player2_NN.read(env.name, run_version, player2version) player2_NN.model.set_weights(player2_network.get_weights()) player2 = Agent('player2', env.state_size, env.action_size, config.p2_MCTS_SIMS, config.CPUCT, player2_NN) scores, memory, points, sp_scores = playMatches(player1, player2, EPISODES, logger, turns_until_tau0, None, goes_first) return (scores, memory, points, sp_scores) def playMatches(player1, player2, EPISODES, logger, turns_until_tau0, memory = None, goes_first = 0): env = Game() scores = {player1.name:0, "drawn": 0, player2.name:0} sp_scores = {'sp':0, "drawn": 0, 'nsp':0} points = {player1.name:[], player2.name:[]} for e in range(EPISODES): logger.info('====================') logger.info('EPISODE %d OF %d', e+1, EPISODES) logger.info('====================') print (str(e+1) + ' ', end='') state = env.reset() done = 0 turn = 0 player1.mcts = None player2.mcts = None if goes_first == 0: player1Starts = random.randint(0,1) * 2 - 1 else: player1Starts = goes_first if player1Starts == 1: players = {1:{"agent": player1, "name":player1.name} , -1: {"agent": player2, "name":player2.name} } logger.info(player1.name + ' plays as X') else: players = {1:{"agent": player2, "name":player2.name} , -1: {"agent": player1, "name":player1.name} } logger.info(player2.name + ' plays as X') logger.info('--------------') env.gameState.render(logger) while done == 0: turn = turn + 1 #### Run the MCTS algo and return an action if turn < turns_until_tau0: action, pi, MCTS_value, NN_value = players[state.playerTurn]['agent'].act(state, 1) else: action, pi, MCTS_value, NN_value = players[state.playerTurn]['agent'].act(state, 0) if memory != None: ####Commit the move to memory memory.commit_stmemory(env.identities, state, pi) logger.info('action: %d', action) for r in range(env.grid_shape[0]): logger.info(['----' if x == 0 else '{0:.2f}'.format(np.round(x,2)) for x in pi[env.grid_shape[1]*r : (env.grid_shape[1]*r + env.grid_shape[1])]]) logger.info('MCTS perceived value for %s: %f', state.pieces[str(state.playerTurn)] ,np.round(MCTS_value,2)) logger.info('NN perceived value for %s: %f', state.pieces[str(state.playerTurn)] ,np.round(NN_value,2)) logger.info('====================') ### Do the action state, value, done, _ = env.step(action) #the value of the newState from the POV of the new playerTurn i.e. -1 if the previous player played a winning move env.gameState.render(logger) if done == 1: if memory != None: #### If the game is finished, assign the values correctly to the game moves for move in memory.stmemory: if move['playerTurn'] == state.playerTurn: move['value'] = value else: move['value'] = -value memory.commit_ltmemory() if value == 1: logger.info('%s WINS!', players[state.playerTurn]['name']) scores[players[state.playerTurn]['name']] = scores[players[state.playerTurn]['name']] + 1 if state.playerTurn == 1: sp_scores['sp'] = sp_scores['sp'] + 1 else: sp_scores['nsp'] = sp_scores['nsp'] + 1 elif value == -1: logger.info('%s WINS!', players[-state.playerTurn]['name']) scores[players[-state.playerTurn]['name']] = scores[players[-state.playerTurn]['name']] + 1 if state.playerTurn == 1: sp_scores['nsp'] = sp_scores['nsp'] + 1 else: sp_scores['sp'] = sp_scores['sp'] + 1 else: logger.info('DRAW...') scores['drawn'] = scores['drawn'] + 1 sp_scores['drawn'] = sp_scores['drawn'] + 1 pts = state.score points[players[state.playerTurn]['name']].append(pts[0]) points[players[-state.playerTurn]['name']].append(pts[1]) return (scores, memory, points, sp_scores)
41.096552
167
0.545058
import numpy as np import random import loggers as lg from game import Game, GameState from model import Residual_CNN from agent import Agent, User import config def playMatchesBetweenVersions(env, run_version, player1version, player2version, EPISODES, logger, turns_until_tau0, goes_first = 0): if player1version == -1: player1 = User('player1', env.state_size, env.action_size) else: player1_NN = Residual_CNN(config.REG_CONST, config.LEARNING_RATE, env.input_shape, env.action_size, config.HIDDEN_CNN_LAYERS) if player1version > 0: player1_network = player1_NN.read(env.name, run_version, player1version) player1_NN.model.set_weights(player1_network.get_weights()) player1 = Agent('player1', env.state_size, env.action_size, config.p1_MCTS_SIMS, config.CPUCT, player1_NN) if player2version == -1: player2 = User('player2', env.state_size, env.action_size) else: player2_NN = Residual_CNN(config.REG_CONST, config.LEARNING_RATE, env.input_shape, env.action_size, config.HIDDEN_CNN_LAYERS) if player2version > 0: player2_network = player2_NN.read(env.name, run_version, player2version) player2_NN.model.set_weights(player2_network.get_weights()) player2 = Agent('player2', env.state_size, env.action_size, config.p2_MCTS_SIMS, config.CPUCT, player2_NN) scores, memory, points, sp_scores = playMatches(player1, player2, EPISODES, logger, turns_until_tau0, None, goes_first) return (scores, memory, points, sp_scores) def playMatches(player1, player2, EPISODES, logger, turns_until_tau0, memory = None, goes_first = 0): env = Game() scores = {player1.name:0, "drawn": 0, player2.name:0} sp_scores = {'sp':0, "drawn": 0, 'nsp':0} points = {player1.name:[], player2.name:[]} for e in range(EPISODES): logger.info('====================') logger.info('EPISODE %d OF %d', e+1, EPISODES) logger.info('====================') print (str(e+1) + ' ', end='') state = env.reset() done = 0 turn = 0 player1.mcts = None player2.mcts = None if goes_first == 0: player1Starts = random.randint(0,1) * 2 - 1 else: player1Starts = goes_first if player1Starts == 1: players = {1:{"agent": player1, "name":player1.name} , -1: {"agent": player2, "name":player2.name} } logger.info(player1.name + ' plays as X') else: players = {1:{"agent": player2, "name":player2.name} , -1: {"agent": player1, "name":player1.name} } logger.info(player2.name + ' plays as X') logger.info('--------------') env.gameState.render(logger) while done == 0: turn = turn + 1 t'].act(state, 1) else: action, pi, MCTS_value, NN_value = players[state.playerTurn]['agent'].act(state, 0) if memory != None: logger.info('action: %d', action) for r in range(env.grid_shape[0]): logger.info(['----' if x == 0 else '{0:.2f}'.format(np.round(x,2)) for x in pi[env.grid_shape[1]*r : (env.grid_shape[1]*r + env.grid_shape[1])]]) logger.info('MCTS perceived value for %s: %f', state.pieces[str(state.playerTurn)] ,np.round(MCTS_value,2)) logger.info('NN perceived value for %s: %f', state.pieces[str(state.playerTurn)] ,np.round(NN_value,2)) logger.info('====================') , _ = env.step(action) env.gameState.render(logger) if done == 1: if memory != None: move['value'] = -value memory.commit_ltmemory() if value == 1: logger.info('%s WINS!', players[state.playerTurn]['name']) scores[players[state.playerTurn]['name']] = scores[players[state.playerTurn]['name']] + 1 if state.playerTurn == 1: sp_scores['sp'] = sp_scores['sp'] + 1 else: sp_scores['nsp'] = sp_scores['nsp'] + 1 elif value == -1: logger.info('%s WINS!', players[-state.playerTurn]['name']) scores[players[-state.playerTurn]['name']] = scores[players[-state.playerTurn]['name']] + 1 if state.playerTurn == 1: sp_scores['nsp'] = sp_scores['nsp'] + 1 else: sp_scores['sp'] = sp_scores['sp'] + 1 else: logger.info('DRAW...') scores['drawn'] = scores['drawn'] + 1 sp_scores['drawn'] = sp_scores['drawn'] + 1 pts = state.score points[players[state.playerTurn]['name']].append(pts[0]) points[players[-state.playerTurn]['name']].append(pts[1]) return (scores, memory, points, sp_scores)
true
true
f72cc221951afaa2a1888c4d748a5068c84f56dc
3,916
py
Python
topasgraphsim/src/functions/dp.py
sebasj13/topas-create-graphs
5ccdbcbbe39461917cc015aa59805e518421431c
[ "MIT" ]
1
2021-12-20T10:56:40.000Z
2021-12-20T10:56:40.000Z
topasgraphsim/src/functions/dp.py
sebasj13/topas-create-graphs
5ccdbcbbe39461917cc015aa59805e518421431c
[ "MIT" ]
null
null
null
topasgraphsim/src/functions/dp.py
sebasj13/topas-create-graphs
5ccdbcbbe39461917cc015aa59805e518421431c
[ "MIT" ]
1
2021-12-26T06:29:22.000Z
2021-12-26T06:29:22.000Z
import numpy as np import scipy.integrate as integrate import scipy.interpolate as interpolate def calculate_parameters(axis, dose, cax=False): """ A function to calculate the relevant descriptive parameters of dose profiles. """ interpolated_axis = np.linspace(axis[0], axis[-1], len(axis) * 100) akima_dose_interpolator = interpolate.Akima1DInterpolator(axis, dose) interpolated_dose = np.flip(akima_dose_interpolator.__call__(interpolated_axis)) D0 = ( interpolated_dose[int(len(interpolated_dose) / 2)] + interpolated_dose[int(len(interpolated_dose) / 2) - 1] ) / 2 XL20 = interpolated_axis[: int(len(interpolated_axis) / 2)][ ( np.abs( interpolated_dose[: int(len(interpolated_axis) / 2)] - 0.2 * max(dose) ) ).argmin() ] XL50 = interpolated_axis[: int(len(interpolated_axis) / 2)][ ( np.abs( interpolated_dose[: int(len(interpolated_axis) / 2)] - 0.5 * max(dose) ) ).argmin() ] XL80 = interpolated_axis[: int(len(interpolated_axis) / 2)][ ( np.abs( interpolated_dose[: int(len(interpolated_axis) / 2)] - 0.8 * max(dose) ) ).argmin() ] XR20 = interpolated_axis[int(len(interpolated_axis) / 2) :][ ( np.abs( interpolated_dose[ int(len(interpolated_axis) / 2) : len(interpolated_axis) ] - 0.2 * max(dose) ) ).argmin() ] XR50 = interpolated_axis[int(len(interpolated_axis) / 2) :][ ( np.abs( interpolated_dose[ int(len(interpolated_axis) / 2) : len(interpolated_axis) ] - 0.5 * max(dose) ) ).argmin() ] XR80 = interpolated_axis[int(len(interpolated_axis) / 2) :][ ( np.abs( interpolated_dose[ int(len(interpolated_axis) / 2) : len(interpolated_axis) ] - 0.8 * max(dose) ) ).argmin() ] HWB = round(abs(XR50 - XL50), 3) CAXdev = round(XL50 + 0.5 * HWB, 3) Dose80 = [value for value in dose if value >= 0.8 * max(dose)] if cax == True: return CAXdev flat_krieger = round( max([value for value in dose if value >= 0.95 * max(dose)]) - min([value for value in dose if value >= 0.95 * max(dose)]) / D0, 5, ) flat_stddev = round(np.std(Dose80), 3) if len(Dose80) % 2 != 0: Dose80 = ( Dose80[0 : int(len(Dose80) / 2)] + Dose80[int(len(Dose80) / 2) + 1 : len(Dose80)] ) S = round( max( [Dose80[i - 1] / Dose80[len(Dose80) - i] for i in range(1, len(Dose80) + 1)] ), 3, ) Lpenumbra = round(abs(XL80 - XL20 + CAXdev), 3) Rpenumbra = round(abs(XR80 - XR20 + CAXdev), 3) XL20index = np.where(interpolated_axis == XL20)[0][0] XL80index = np.where(interpolated_axis == XL80)[0][0] XR20index = np.where(interpolated_axis == XR20)[0][0] XR80index = np.where(interpolated_axis == XR80)[0][0] Lintegral = round( abs( integrate.simps( interpolated_dose[XL20index:XL80index], interpolated_axis[XL20index:XL80index], ) ), 3, ) Rintegral = round( abs( integrate.simps( interpolated_dose[XR80index:XR20index], interpolated_axis[XR80index:XR20index], ) ), 3, ) if CAXdev > 150: raise Exception return [ HWB, CAXdev, flat_krieger, flat_stddev, S, Lpenumbra, Rpenumbra, Lintegral, Rintegral, ]
27.77305
88
0.516854
import numpy as np import scipy.integrate as integrate import scipy.interpolate as interpolate def calculate_parameters(axis, dose, cax=False): interpolated_axis = np.linspace(axis[0], axis[-1], len(axis) * 100) akima_dose_interpolator = interpolate.Akima1DInterpolator(axis, dose) interpolated_dose = np.flip(akima_dose_interpolator.__call__(interpolated_axis)) D0 = ( interpolated_dose[int(len(interpolated_dose) / 2)] + interpolated_dose[int(len(interpolated_dose) / 2) - 1] ) / 2 XL20 = interpolated_axis[: int(len(interpolated_axis) / 2)][ ( np.abs( interpolated_dose[: int(len(interpolated_axis) / 2)] - 0.2 * max(dose) ) ).argmin() ] XL50 = interpolated_axis[: int(len(interpolated_axis) / 2)][ ( np.abs( interpolated_dose[: int(len(interpolated_axis) / 2)] - 0.5 * max(dose) ) ).argmin() ] XL80 = interpolated_axis[: int(len(interpolated_axis) / 2)][ ( np.abs( interpolated_dose[: int(len(interpolated_axis) / 2)] - 0.8 * max(dose) ) ).argmin() ] XR20 = interpolated_axis[int(len(interpolated_axis) / 2) :][ ( np.abs( interpolated_dose[ int(len(interpolated_axis) / 2) : len(interpolated_axis) ] - 0.2 * max(dose) ) ).argmin() ] XR50 = interpolated_axis[int(len(interpolated_axis) / 2) :][ ( np.abs( interpolated_dose[ int(len(interpolated_axis) / 2) : len(interpolated_axis) ] - 0.5 * max(dose) ) ).argmin() ] XR80 = interpolated_axis[int(len(interpolated_axis) / 2) :][ ( np.abs( interpolated_dose[ int(len(interpolated_axis) / 2) : len(interpolated_axis) ] - 0.8 * max(dose) ) ).argmin() ] HWB = round(abs(XR50 - XL50), 3) CAXdev = round(XL50 + 0.5 * HWB, 3) Dose80 = [value for value in dose if value >= 0.8 * max(dose)] if cax == True: return CAXdev flat_krieger = round( max([value for value in dose if value >= 0.95 * max(dose)]) - min([value for value in dose if value >= 0.95 * max(dose)]) / D0, 5, ) flat_stddev = round(np.std(Dose80), 3) if len(Dose80) % 2 != 0: Dose80 = ( Dose80[0 : int(len(Dose80) / 2)] + Dose80[int(len(Dose80) / 2) + 1 : len(Dose80)] ) S = round( max( [Dose80[i - 1] / Dose80[len(Dose80) - i] for i in range(1, len(Dose80) + 1)] ), 3, ) Lpenumbra = round(abs(XL80 - XL20 + CAXdev), 3) Rpenumbra = round(abs(XR80 - XR20 + CAXdev), 3) XL20index = np.where(interpolated_axis == XL20)[0][0] XL80index = np.where(interpolated_axis == XL80)[0][0] XR20index = np.where(interpolated_axis == XR20)[0][0] XR80index = np.where(interpolated_axis == XR80)[0][0] Lintegral = round( abs( integrate.simps( interpolated_dose[XL20index:XL80index], interpolated_axis[XL20index:XL80index], ) ), 3, ) Rintegral = round( abs( integrate.simps( interpolated_dose[XR80index:XR20index], interpolated_axis[XR80index:XR20index], ) ), 3, ) if CAXdev > 150: raise Exception return [ HWB, CAXdev, flat_krieger, flat_stddev, S, Lpenumbra, Rpenumbra, Lintegral, Rintegral, ]
true
true
f72cc242a75bff056fc4182f50f291db178b0519
5,780
py
Python
sonarqube/community/user_groups.py
0x646e78/python-sonarqube-api
c641ab4dd180b4184f2663bd28277aa796b36417
[ "MIT" ]
null
null
null
sonarqube/community/user_groups.py
0x646e78/python-sonarqube-api
c641ab4dd180b4184f2663bd28277aa796b36417
[ "MIT" ]
null
null
null
sonarqube/community/user_groups.py
0x646e78/python-sonarqube-api
c641ab4dd180b4184f2663bd28277aa796b36417
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding:utf-8 -*- # @Author: Jialiang Shi from sonarqube.utils.rest_client import RestClient from sonarqube.utils.config import ( API_USER_GROUPS_SEARCH_ENDPOINT, API_USER_GROUPS_CREATE_ENDPOINT, API_USER_GROUPS_DELETE_ENDPOINT, API_USER_GROUPS_UPDATE_ENDPOINT, API_USER_GROUPS_USERS_ENDPOINT, API_USER_GROUPS_ADD_USER_ENDPOINT, API_USER_GROUPS_REMOVE_USER_ENDPOINT ) class SonarQubeUserGroups(RestClient): """ SonarQube user_groups Operations """ def __init__(self, **kwargs): """ :param kwargs: """ super(SonarQubeUserGroups, self).__init__(**kwargs) def __getitem__(self, name): result = list(self.search_user_groups(q=name)) for group in result: if group['name'] == name: return group def search_user_groups(self, fields=None, q=None): """ Search for user groups. :param fields: Comma-separated list of the fields to be returned in response. All the fields are returned by default. Possible values are for: * name * description * membersCount :param q: Limit search to names that contain the supplied string. :return: """ params = {} if fields: params.update({"f": fields}) page_num = 1 page_size = 1 total = 2 if q: params['q'] = q while page_num * page_size < total: resp = self.get(API_USER_GROUPS_SEARCH_ENDPOINT, params=params) response = resp.json() page_num = response['paging']['pageIndex'] page_size = response['paging']['pageSize'] total = response['paging']['total'] params['p'] = page_num + 1 for group in response['groups']: yield group def create_group(self, group_name, description=None): """ Create a group. :param group_name: Name for the new group. A group name cannot be larger than 255 characters and must be unique. The value 'anyone' (whatever the case) is reserved and cannot be used. :param description: Description for the new group. A group description cannot be larger than 200 characters. :return: request response """ params = { 'name': group_name } if description: params.update({'description': description}) return self.post(API_USER_GROUPS_CREATE_ENDPOINT, params=params) def delete_group(self, group_name): """ Delete a group. The default groups cannot be deleted. :param group_name: :return: """ params = { 'name': group_name } self.post(API_USER_GROUPS_DELETE_ENDPOINT, params=params) def update_group(self, group_id, group_name=None, description=None): """ Update a group. :param group_id: Identifier of the group. :param group_name: New optional name for the group. A group name cannot be larger than 255 characters and must be unique. Value 'anyone' (whatever the case) is reserved and cannot be used. If value is empty or not defined, then name is not changed. :param description: New optional description for the group. A group description cannot be larger than 200 characters. If value is not defined, then description is not changed. :return: """ params = {'id': group_id} if group_name: params.update({'name': group_name}) if description: params.update({'description': description}) self.post(API_USER_GROUPS_UPDATE_ENDPOINT, params=params) def add_user_to_group(self, group_name, user_login): """ Add a user to a group. :param group_name: Group name :param user_login: User login :return: """ params = { 'login': user_login, 'name': group_name } self.post(API_USER_GROUPS_ADD_USER_ENDPOINT, params=params) def remove_user_from_group(self, group_name, user_login): """ Remove a user from a group. :param group_name: Group name :param user_login: User login :return: """ params = { 'login': user_login, 'name': group_name } self.post(API_USER_GROUPS_REMOVE_USER_ENDPOINT, params=params) def search_users_belong_to_group(self, group_name, q=None, selected="selected"): """ Search for users with membership information with respect to a group. :param group_name: Group name :param q: Limit search to names or logins that contain the supplied string. :param selected: Depending on the value, show only selected items (selected=selected), deselected items (selected=deselected), or all items with their selection status (selected=all).Possible values are for: * all * deselected * selected default value is selected. :return: """ params = { 'name': group_name, 'selected': selected } page_num = 1 page_size = 1 total = 2 if q: params.update({'q': q}) while page_num * page_size < total: resp = self.get(API_USER_GROUPS_USERS_ENDPOINT, params=params) response = resp.json() page_num = response['p'] page_size = response['ps'] total = response['total'] params['p'] = page_num + 1 for user in response['users']: yield user
30.582011
120
0.59654
from sonarqube.utils.rest_client import RestClient from sonarqube.utils.config import ( API_USER_GROUPS_SEARCH_ENDPOINT, API_USER_GROUPS_CREATE_ENDPOINT, API_USER_GROUPS_DELETE_ENDPOINT, API_USER_GROUPS_UPDATE_ENDPOINT, API_USER_GROUPS_USERS_ENDPOINT, API_USER_GROUPS_ADD_USER_ENDPOINT, API_USER_GROUPS_REMOVE_USER_ENDPOINT ) class SonarQubeUserGroups(RestClient): def __init__(self, **kwargs): super(SonarQubeUserGroups, self).__init__(**kwargs) def __getitem__(self, name): result = list(self.search_user_groups(q=name)) for group in result: if group['name'] == name: return group def search_user_groups(self, fields=None, q=None): params = {} if fields: params.update({"f": fields}) page_num = 1 page_size = 1 total = 2 if q: params['q'] = q while page_num * page_size < total: resp = self.get(API_USER_GROUPS_SEARCH_ENDPOINT, params=params) response = resp.json() page_num = response['paging']['pageIndex'] page_size = response['paging']['pageSize'] total = response['paging']['total'] params['p'] = page_num + 1 for group in response['groups']: yield group def create_group(self, group_name, description=None): params = { 'name': group_name } if description: params.update({'description': description}) return self.post(API_USER_GROUPS_CREATE_ENDPOINT, params=params) def delete_group(self, group_name): params = { 'name': group_name } self.post(API_USER_GROUPS_DELETE_ENDPOINT, params=params) def update_group(self, group_id, group_name=None, description=None): params = {'id': group_id} if group_name: params.update({'name': group_name}) if description: params.update({'description': description}) self.post(API_USER_GROUPS_UPDATE_ENDPOINT, params=params) def add_user_to_group(self, group_name, user_login): params = { 'login': user_login, 'name': group_name } self.post(API_USER_GROUPS_ADD_USER_ENDPOINT, params=params) def remove_user_from_group(self, group_name, user_login): params = { 'login': user_login, 'name': group_name } self.post(API_USER_GROUPS_REMOVE_USER_ENDPOINT, params=params) def search_users_belong_to_group(self, group_name, q=None, selected="selected"): params = { 'name': group_name, 'selected': selected } page_num = 1 page_size = 1 total = 2 if q: params.update({'q': q}) while page_num * page_size < total: resp = self.get(API_USER_GROUPS_USERS_ENDPOINT, params=params) response = resp.json() page_num = response['p'] page_size = response['ps'] total = response['total'] params['p'] = page_num + 1 for user in response['users']: yield user
true
true
f72cc2a756c43756ba71fb67aa4ae3e1efa74f2f
5,550
py
Python
userbot/modules/locks.py
RiSecID/Auto
d06ef712666a35ddbf0c123dbb86705096cbbb56
[ "Naumen", "Condor-1.1", "MS-PL" ]
1
2020-04-10T13:11:46.000Z
2020-04-10T13:11:46.000Z
userbot/modules/locks.py
RiSecID/Auto
d06ef712666a35ddbf0c123dbb86705096cbbb56
[ "Naumen", "Condor-1.1", "MS-PL" ]
null
null
null
userbot/modules/locks.py
RiSecID/Auto
d06ef712666a35ddbf0c123dbb86705096cbbb56
[ "Naumen", "Condor-1.1", "MS-PL" ]
1
2020-12-02T14:59:04.000Z
2020-12-02T14:59:04.000Z
# Copyright (C) 2019 The Raphielscape Company LLC. # # Licensed under the Raphielscape Public License, Version 1.c (the "License"); # you may not use this file except in compliance with the License. from telethon.tl.functions.messages import EditChatDefaultBannedRightsRequest from telethon.tl.types import ChatBannedRights from userbot import CMD_HELP from userbot.events import register @register(outgoing=True, pattern=r"^.lock ?(.*)") async def locks(event): input_str = event.pattern_match.group(1).lower() peer_id = event.chat_id msg = None media = None sticker = None gif = None gamee = None ainline = None gpoll = None adduser = None cpin = None changeinfo = None if input_str == "msg": msg = True what = "messages" elif input_str == "media": media = True what = "media" elif input_str == "sticker": sticker = True what = "stickers" elif input_str == "gif": gif = True what = "GIFs" elif input_str == "game": gamee = True what = "games" elif input_str == "inline": ainline = True what = "inline bots" elif input_str == "poll": gpoll = True what = "polls" elif input_str == "invite": adduser = True what = "invites" elif input_str == "pin": cpin = True what = "pins" elif input_str == "info": changeinfo = True what = "chat info" elif input_str == "all": msg = True media = True sticker = True gif = True gamee = True ainline = True gpoll = True adduser = True cpin = True changeinfo = True what = "everything" else: if not input_str: return await event.edit("`I can't lock nothing !!`") else: return await event.edit(f"`Invalid lock type:` {input_str}") lock_rights = ChatBannedRights( until_date=None, send_messages=msg, send_media=media, send_stickers=sticker, send_gifs=gif, send_games=gamee, send_inline=ainline, send_polls=gpoll, invite_users=adduser, pin_messages=cpin, change_info=changeinfo, ) try: await event.client( EditChatDefaultBannedRightsRequest(peer=peer_id, banned_rights=lock_rights)) await event.edit(f"`Locked {what} for this chat !!`") except BaseException as e: return await event.edit( f"`Do I have proper rights for that ??`\n**Error:** {str(e)}") @register(outgoing=True, pattern=r"^.unlock ?(.*)") async def rem_locks(event): input_str = event.pattern_match.group(1).lower() peer_id = event.chat_id msg = None media = None sticker = None gif = None gamee = None ainline = None gpoll = None adduser = None cpin = None changeinfo = None if input_str == "msg": msg = False what = "messages" elif input_str == "media": media = False what = "media" elif input_str == "sticker": sticker = False what = "stickers" elif input_str == "gif": gif = False what = "GIFs" elif input_str == "game": gamee = False what = "games" elif input_str == "inline": ainline = False what = "inline bots" elif input_str == "poll": gpoll = False what = "polls" elif input_str == "invite": adduser = False what = "invites" elif input_str == "pin": cpin = False what = "pins" elif input_str == "info": changeinfo = False what = "chat info" elif input_str == "all": msg = False media = False sticker = False gif = False gamee = False ainline = False gpoll = False adduser = False cpin = False changeinfo = False what = "everything" else: if not input_str: return await event.edit("`I can't unlock nothing !!`") else: return await event.edit(f"`Invalid unlock type:` {input_str}") unlock_rights = ChatBannedRights( until_date=None, send_messages=msg, send_media=media, send_stickers=sticker, send_gifs=gif, send_games=gamee, send_inline=ainline, send_polls=gpoll, invite_users=adduser, pin_messages=cpin, change_info=changeinfo, ) try: await event.client( EditChatDefaultBannedRightsRequest(peer=peer_id, banned_rights=unlock_rights)) await event.edit(f"`Unlocked {what} for this chat !!`") except BaseException as e: return await event.edit( f"`Do I have proper rights for that ??`\n**Error:** {str(e)}") CMD_HELP.update({ "locks": ">`.lock <all (or) type(s)>` or >`.unlock <all (or) type(s)>`" "\nUsage: Allows you to lock/unlock some common message types in the chat." "\n[NOTE: Requires proper admin rights in the chat !!]" "\n\nAvailable message types to lock/unlock are: " "\n`all, msg, media, sticker, gif, game, inline, poll, invite, pin, info`" })
29.057592
80
0.544505
from telethon.tl.functions.messages import EditChatDefaultBannedRightsRequest from telethon.tl.types import ChatBannedRights from userbot import CMD_HELP from userbot.events import register @register(outgoing=True, pattern=r"^.lock ?(.*)") async def locks(event): input_str = event.pattern_match.group(1).lower() peer_id = event.chat_id msg = None media = None sticker = None gif = None gamee = None ainline = None gpoll = None adduser = None cpin = None changeinfo = None if input_str == "msg": msg = True what = "messages" elif input_str == "media": media = True what = "media" elif input_str == "sticker": sticker = True what = "stickers" elif input_str == "gif": gif = True what = "GIFs" elif input_str == "game": gamee = True what = "games" elif input_str == "inline": ainline = True what = "inline bots" elif input_str == "poll": gpoll = True what = "polls" elif input_str == "invite": adduser = True what = "invites" elif input_str == "pin": cpin = True what = "pins" elif input_str == "info": changeinfo = True what = "chat info" elif input_str == "all": msg = True media = True sticker = True gif = True gamee = True ainline = True gpoll = True adduser = True cpin = True changeinfo = True what = "everything" else: if not input_str: return await event.edit("`I can't lock nothing !!`") else: return await event.edit(f"`Invalid lock type:` {input_str}") lock_rights = ChatBannedRights( until_date=None, send_messages=msg, send_media=media, send_stickers=sticker, send_gifs=gif, send_games=gamee, send_inline=ainline, send_polls=gpoll, invite_users=adduser, pin_messages=cpin, change_info=changeinfo, ) try: await event.client( EditChatDefaultBannedRightsRequest(peer=peer_id, banned_rights=lock_rights)) await event.edit(f"`Locked {what} for this chat !!`") except BaseException as e: return await event.edit( f"`Do I have proper rights for that ??`\n**Error:** {str(e)}") @register(outgoing=True, pattern=r"^.unlock ?(.*)") async def rem_locks(event): input_str = event.pattern_match.group(1).lower() peer_id = event.chat_id msg = None media = None sticker = None gif = None gamee = None ainline = None gpoll = None adduser = None cpin = None changeinfo = None if input_str == "msg": msg = False what = "messages" elif input_str == "media": media = False what = "media" elif input_str == "sticker": sticker = False what = "stickers" elif input_str == "gif": gif = False what = "GIFs" elif input_str == "game": gamee = False what = "games" elif input_str == "inline": ainline = False what = "inline bots" elif input_str == "poll": gpoll = False what = "polls" elif input_str == "invite": adduser = False what = "invites" elif input_str == "pin": cpin = False what = "pins" elif input_str == "info": changeinfo = False what = "chat info" elif input_str == "all": msg = False media = False sticker = False gif = False gamee = False ainline = False gpoll = False adduser = False cpin = False changeinfo = False what = "everything" else: if not input_str: return await event.edit("`I can't unlock nothing !!`") else: return await event.edit(f"`Invalid unlock type:` {input_str}") unlock_rights = ChatBannedRights( until_date=None, send_messages=msg, send_media=media, send_stickers=sticker, send_gifs=gif, send_games=gamee, send_inline=ainline, send_polls=gpoll, invite_users=adduser, pin_messages=cpin, change_info=changeinfo, ) try: await event.client( EditChatDefaultBannedRightsRequest(peer=peer_id, banned_rights=unlock_rights)) await event.edit(f"`Unlocked {what} for this chat !!`") except BaseException as e: return await event.edit( f"`Do I have proper rights for that ??`\n**Error:** {str(e)}") CMD_HELP.update({ "locks": ">`.lock <all (or) type(s)>` or >`.unlock <all (or) type(s)>`" "\nUsage: Allows you to lock/unlock some common message types in the chat." "\n[NOTE: Requires proper admin rights in the chat !!]" "\n\nAvailable message types to lock/unlock are: " "\n`all, msg, media, sticker, gif, game, inline, poll, invite, pin, info`" })
true
true
f72cc5c07d47e87c78a7d4236d54674e5f436c66
230
py
Python
pycones/sponsorship/managers.py
python-spain/PyConES2015
af78ad7f1d7df747a2f5428be87a5b061457dd24
[ "MIT" ]
null
null
null
pycones/sponsorship/managers.py
python-spain/PyConES2015
af78ad7f1d7df747a2f5428be87a5b061457dd24
[ "MIT" ]
null
null
null
pycones/sponsorship/managers.py
python-spain/PyConES2015
af78ad7f1d7df747a2f5428be87a5b061457dd24
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models class SponsorManager(models.Manager): def active(self): return self.get_query_set().filter(active=True).order_by("level")
23
73
0.726087
from __future__ import unicode_literals from django.db import models class SponsorManager(models.Manager): def active(self): return self.get_query_set().filter(active=True).order_by("level")
true
true
f72cc5f2ca3bea87b59576ba3da7939aab82e2af
116
py
Python
URI/1 - INICIANTE/Python/1759 - HoHoHo.py
william-james-pj/LogicaProgramacao
629f746e34da2e829dc7ea2e489ac36bb1b1fb13
[ "MIT" ]
1
2020-04-14T16:48:16.000Z
2020-04-14T16:48:16.000Z
URI/1 - INICIANTE/Python/1759 - HoHoHo.py
william-james-pj/LogicaProgramacao
629f746e34da2e829dc7ea2e489ac36bb1b1fb13
[ "MIT" ]
null
null
null
URI/1 - INICIANTE/Python/1759 - HoHoHo.py
william-james-pj/LogicaProgramacao
629f746e34da2e829dc7ea2e489ac36bb1b1fb13
[ "MIT" ]
null
null
null
n = int(input()) for y in range(0, n): if(y == n-1): print('Ho!') else: print('Ho', end=' ')
19.333333
28
0.413793
n = int(input()) for y in range(0, n): if(y == n-1): print('Ho!') else: print('Ho', end=' ')
true
true