blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
4
721
content_id
stringlengths
40
40
detected_licenses
listlengths
0
57
license_type
stringclasses
2 values
repo_name
stringlengths
5
91
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
321 values
visit_date
timestamp[ns]date
2016-08-12 09:31:09
2023-09-06 10:45:07
revision_date
timestamp[ns]date
2010-09-28 14:01:40
2023-09-06 06:22:19
committer_date
timestamp[ns]date
2010-09-28 14:01:40
2023-09-06 06:22:19
github_id
int64
426
681M
star_events_count
int64
101
243k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
23 values
gha_event_created_at
timestamp[ns]date
2012-06-28 18:51:49
2023-09-14 21:59:16
gha_created_at
timestamp[ns]date
2008-02-11 22:55:26
2023-08-10 11:14:58
gha_language
stringclasses
147 values
src_encoding
stringclasses
26 values
language
stringclasses
2 values
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
6
10.2M
extension
stringclasses
115 values
filename
stringlengths
3
113
content
stringlengths
6
10.2M
f106f0dd2f35b12a8d622c6fb2ff9b2b2d8443aa
d6aae799e18e907fb413b715200c7832252a87e5
/responsible_ai/prejudice_remover_regularizer_images/utils/utils.py
0383d6ce7ad7824e74d7f66dd2d1030c5691e573
[ "BSD-3-Clause", "MIT", "LicenseRef-scancode-proprietary-license", "Apache-2.0", "CC-BY-NC-4.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
sony/nnabla-examples
0d0bbd5df3028996e790bcf07248fdb0932697d1
41f71faa6efff7774a76bbd5af3198322a90a6ab
refs/heads/master
2023-09-04T03:45:54.023899
2023-08-22T03:31:21
2023-08-22T03:31:21
109,625,584
308
108
Apache-2.0
2023-08-22T03:31:23
2017-11-05T23:30:40
Python
UTF-8
Python
false
false
8,144
py
utils.py
# Copyright 2022 Sony Group Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import average_precision_score, f1_score, recall_score def get_auc_bias(gt_label, pred_label): """ compute bias amplification for attribute-task References: [1] Directional Bias Amplification(https://arxiv.org/abs/2102.12594) [2] Fair Attribute Classification through Latent Space De-biasing(https://arxiv.org/abs/2012.01469) Args: gt_label: ground truth labels pred_label: predicated labels Returns: bias amplification """ bog_tilde = np.zeros((2, 2)) bog_gt_g = np.zeros((2, 2)) for i, objs in enumerate([gt_label, pred_label]): female = np.where(objs[:, 1] == 0)[0] # Unprivileged class male = np.where(objs[:, 1] == 1)[0] # Privileged class kitchen = np.where(objs[:, 0] == 0)[0] # Unfavourable class sports = np.where(objs[:, 0] == 1)[0] # Favourable class if i == 0: bog_tilde[0][0] = len(set(kitchen) & set(female)) bog_tilde[0][1] = len(set(kitchen) & set(male)) bog_tilde[1][0] = len(set(sports) & set(female)) bog_tilde[1][1] = len(set(sports) & set(male)) elif i == 1: bog_gt_g[0][0] = len(set(kitchen) & set(female)) bog_gt_g[0][1] = len(set(kitchen) & set(male)) bog_gt_g[1][0] = len(set(sports) & set(female)) bog_gt_g[1][1] = len(set(sports) & set(male)) total_images_train = np.sum(bog_tilde) data_bog = bog_tilde / np.sum(bog_tilde, axis=0) pred_bog = bog_gt_g / np.sum(bog_tilde, axis=0) p_t_a = bog_tilde / np.sum(bog_tilde, axis=0) p_t = np.sum(bog_tilde, axis=1) / total_images_train diff = np.zeros_like(data_bog) for i in range(len(data_bog)): for j in range(len(data_bog[0])): diff[i][j] = pred_bog[i][j] - data_bog[i][j] indicator = np.sign(p_t_a[i][j] - p_t[i]) if indicator == 0: diff[i][j] = 0 elif indicator == -1: diff[i][j] = - diff[i][j] value = np.nanmean(diff) return value def get_bias_amplification(domain, targets, pred): """ BA measures how much more often a target attribute is predicted with a protected attribute than the ground truth value. References: [1] Directional Bias Amplification(https://arxiv.org/abs/2102.12594) [2] Fair Attribute Classification through Latent Space De-biasing(https://arxiv.org/abs/2012.01469) Args: domain (numpy.ndarray): protected attribute targets (numpy.ndarray): target label pred (numpy.ndarray): predicted label Returns: Bias amplification """ test_labels = np.zeros((targets.shape[0], 2)) test_labels[:, 0] = targets test_labels[:, 1] = domain test_pred = np.zeros((targets.shape[0], 2)) test_pred[:, 0] = pred test_pred[:, 1] = domain auc_bias = get_auc_bias(test_labels, test_pred) return auc_bias def get_diff_in_equal_opportunity(domain, targets, pred): """ Compute the absolute difference in FNR between the protected attribute group. Args: domain (numpy.ndarray) : actual protected attribute targets (numpy.ndarray) : actual target label pred (numpy.ndarray) : predicted label Returns: difference_in_equal_opportunity(float) """ g0 = np.argwhere(domain == 0) g1 = np.argwhere(domain == 1) deo = np.abs( (1 - recall_score(targets[g0], pred[g0])) - (1 - recall_score(targets[g1], pred[g1]))) return np.median(deo) def get_average_precision(targets, scores): """ Compute average precision score Args: targets (numpy.ndarray) : actual target label scores (numpy.ndarray) : predicted scores Returns: Average precission """ avg_prec = average_precision_score(targets, scores) return np.median(avg_prec) def get_f1_threshold(targets_all, scores_all): """ get the f1 threshold and accuracy Args: targets_all (numpy.ndarray) : actual target label scores : predicted scores Returns: best_acc : best accuracy best_t : best threshold """ best_t = -1.0 best_acc = 0.0 for t in range(1, 10): thresh = 0.1 * t curr_scores = np.where(scores_all > thresh, 1, 0) acc = f1_score(targets_all, curr_scores) if acc > best_acc: best_acc = acc best_t = thresh one_dec = best_t for t in range(1, 20): thresh = (one_dec - 0.1) + 0.01 * t curr_scores = np.where(scores_all > thresh, 1, 0) acc = f1_score(targets_all, curr_scores) # print(thresh, acc, best_acc, flush=True) if acc > best_acc: best_acc = acc best_t = thresh return best_acc, best_t def calibrated_threshold(targets, scores): """ Calibrated threshold Args: targets (numpy.ndarray) : actual target label scores (numpy.ndarray) : predicted scores Returns: calibrated threshold """ cp = int(targets.sum()) scores_copy = np.copy(scores) scores_copy.sort() thresh = scores_copy[-cp] return thresh def get_cvs(output_f, output_m, cal_thresh): """ Calders and Verwer defined a discrimination score, by subtracting the conditional probability of the positive class given a sensitive value from that given a non-sensitive value. Args: output_f (list): output of unprivileged class output_m (list) : output of privileged class cal_thresh (float) : calibrated threshold Returns: CV Score (float) """ yf_pred = (output_f >= cal_thresh) ym_pred = (output_m >= cal_thresh) corr_f = np.sum(yf_pred == True) corr_m = np.sum(ym_pred == True) P_y1_s1 = corr_f / output_f.shape[0] P_y1_s0 = corr_m / output_m.shape[0] CV_score = np.abs(P_y1_s0 - P_y1_s1) return round(CV_score.item(), 4) def plot_fairness(fairness, ax, metric="DPD", title="fairness metric", bar_x_axis="Original"): """ plot single fairness metric """ ax.set_ylim([-0.6, 0.6]) ax.axhline(y=0.0, color='r', linestyle='-') ax.bar([bar_x_axis], fairness, color="blue", width=2) ax.set_ylabel(metric) ax.set_title(title, fontsize=10) for index, value in enumerate(fairness): if value < 0: ax.text(index, value - 0.1, str(round(value, 3)), fontweight='bold', color='red', bbox=dict(facecolor='red', alpha=0.4)) else: ax.text(index, value + 0.1, str(round(value, 3)), fontweight='bold', color='red', bbox=dict(facecolor='red', alpha=0.4)) def plot_fairness_multi(DEO, CV_score, BA, accuracy, bar_x_axis="original"): """ plot fairness metrics """ fig, axes = plt.subplots(1, 3, figsize=(10, 4), sharey=True) fig.suptitle("Model Fairness", fontsize=16) plot_fairness([DEO], ax=axes[0], metric="DEO", title="Difference in Equal opportunity(DEO)", bar_x_axis=bar_x_axis) plot_fairness([CV_score], ax=axes[1], metric="CV Score", title="CV score", bar_x_axis=bar_x_axis) plot_fairness([BA], ax=axes[2], metric="BA", title="Bias Amplification (BA)", bar_x_axis=bar_x_axis) fig.text(0.92, 0.65, '\n'.join( ["Average precession score:", f"- AP : {accuracy:.3f}"]), fontsize='15') plt.show()
441edf929b91f5317349d8d0fa0f5f762d903359
a3d6556180e74af7b555f8d47d3fea55b94bcbda
/chrome/updater/test/service/win/answer_uac.py
5691da6f7d624f74695aa78250dcf9a602d253d5
[ "BSD-3-Clause" ]
permissive
chromium/chromium
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
refs/heads/main
2023-08-24T00:35:12.585945
2023-08-23T22:01:11
2023-08-23T22:01:11
120,360,765
17,408
7,102
BSD-3-Clause
2023-09-10T23:44:27
2018-02-05T20:55:32
null
UTF-8
Python
false
false
1,937
py
answer_uac.py
# Copyright 2021 The Chromium Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A simple command to answer UAC prompt. To interact with UAC, we create this standalone program so that the process can run with the same security token as the desired winlogon.exe, and on the same desktop that UAC prompts. Sample usage: # Accepts UAC twice, deny once, and then accepts twice with each operation # timeouts in 20 seconds. python answer_uac.py --timeout=20 --actions=AADAA """ import argparse import logging import os import sys import uac def _ParseCommandLine(): """Parse the command line arguments.""" cmd_parser = argparse.ArgumentParser( description='Window UAC prompt handler') cmd_parser.add_argument( '--actions', dest='actions', type=str, default='A', help='How to handle UAC prompt, A for accept, D for deny.') cmd_parser.add_argument( '--timeout', default=30, type=float, help='Time to wait for each UAC prompt before giving up.') cmd_parser.add_argument( '--source', default='', help='Name of the source that triggers UAC, optional (for logging).') return cmd_parser.parse_args() def main(): flags = _ParseCommandLine() logging.info('Command run: %s', sys.argv) actions = flags.actions.upper() for action in actions: if action == 'A': # Perform action 'Accept' logging.info('Next UAC prompt will be accepted.') accept = True elif action == 'D': # Perform action 'Deny' logging.error('Next UAC prompt will be denied.') accept = False else: logging.error('Unknown action for UAC prompt: [%s]', action) continue uac.AnswerUpcomingUACPrompt(accept, flags.timeout) if __name__ == '__main__': main()
0c77908762865bcbc74b2a863bebe274eb50ae29
a1bc4de84d65226e5b2e9d06818dd407a5fb4d20
/flinck.py
277cc5f9fc06013f120d6f1917a40764499b6728
[ "MIT" ]
permissive
Kraymer/flinck
80961a6776900bfd7a1bf1b954bccdd0625dc91a
b3d0076e2b3ab74c0a8f4a3c8abf5631ee362438
refs/heads/master
2021-01-18T22:10:51.422411
2021-01-08T19:25:32
2021-01-08T19:25:32
54,346,527
155
9
null
2016-06-08T07:01:40
2016-03-20T23:42:26
Python
UTF-8
Python
false
false
231
py
flinck.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2016 Fabrice Laporte - kray.me # The MIT License http://www.opensource.org/licenses/mit-license.php import flinck if __name__ == "__main__": flinck.flinck_cli()
71c7062c0485a8854bb2976d9a2f4c12d9063178
40dd8330e5f78c4348bbddc2c5acfd59d793dd51
/configs/upernet/upernet_r18_4xb4-160k_ade20k-512x512.py
9ac6c35527b588a8cd3abd3a63f913896a9c5b07
[ "Apache-2.0" ]
permissive
open-mmlab/mmsegmentation
0d12092312e2c465ede1fd7dd9847b6f2b37049c
30a3f94f3e2916e27fa38c67cc3b8c69c1893fe8
refs/heads/main
2023-09-04T10:54:52.299711
2023-07-24T07:28:21
2023-07-24T07:28:21
272,133,018
6,534
2,375
Apache-2.0
2023-09-14T01:22:32
2020-06-14T04:32:33
Python
UTF-8
Python
false
false
377
py
upernet_r18_4xb4-160k_ade20k-512x512.py
_base_ = [ '../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] model = dict( pretrained='open-mmlab://resnet18_v1c', backbone=dict(depth=18), decode_head=dict(in_channels=[64, 128, 256, 512], num_classes=150), auxiliary_head=dict(in_channels=256, num_classes=150))
92764efefc130a62ab768a3adca7d020f1f4e070
2181883c8faac55bfc969a97d22d9b24a3e81ab3
/Pythonwin/pywin/mfc/activex.py
a9828e78dcfbc6cbdeec68abb1f0d2518ea9043b
[ "PSF-2.0" ]
permissive
mhammond/pywin32
574bf121cfeac8c7a9d28f94ee0f2069a425e8ab
2a7137f21965013020ef9e4f27565db6dea59003
refs/heads/main
2023-09-02T13:16:52.307262
2023-08-17T19:42:26
2023-08-17T19:42:26
108,187,130
4,757
907
null
2023-08-23T01:45:49
2017-10-24T21:44:27
C++
UTF-8
Python
false
false
2,611
py
activex.py
"""Support for ActiveX control hosting in Pythonwin. """ import win32ui import win32uiole from . import window class Control(window.Wnd): """An ActiveX control base class. A new class must be derived from both this class and the Events class. See the demos for more details. """ def __init__(self): self.__dict__["_dispobj_"] = None window.Wnd.__init__(self) def _GetControlCLSID(self): return self.CLSID def _GetDispatchClass(self): return self.default_interface def _GetEventMap(self): return self.default_source._dispid_to_func_ def CreateControl(self, windowTitle, style, rect, parent, id, lic_string=None): clsid = str(self._GetControlCLSID()) self.__dict__["_obj_"] = win32ui.CreateControl( clsid, windowTitle, style, rect, parent, id, None, False, lic_string ) klass = self._GetDispatchClass() dispobj = klass(win32uiole.GetIDispatchForWindow(self._obj_)) self.HookOleEvents() self.__dict__["_dispobj_"] = dispobj def HookOleEvents(self): dict = self._GetEventMap() for dispid, methodName in dict.items(): if hasattr(self, methodName): self._obj_.HookOleEvent(getattr(self, methodName), dispid) def __getattr__(self, attr): # Delegate attributes to the windows and the Dispatch object for this class try: return window.Wnd.__getattr__(self, attr) except AttributeError: pass return getattr(self._dispobj_, attr) def __setattr__(self, attr, value): if hasattr(self.__dict__, attr): self.__dict__[attr] = value return try: if self._dispobj_: self._dispobj_.__setattr__(attr, value) return except AttributeError: pass self.__dict__[attr] = value def MakeControlClass(controlClass, name=None): """Given a CoClass in a generated .py file, this function will return a Class object which can be used as an OCX control. This function is used when you do not want to handle any events from the OCX control. If you need events, then you should derive a class from both the activex.Control class and the CoClass """ if name is None: name = controlClass.__name__ return type("OCX" + name, (Control, controlClass), {}) def MakeControlInstance(controlClass, name=None): """As for MakeControlClass(), but returns an instance of the class.""" return MakeControlClass(controlClass, name)()
a0762b17c279878ed09cc2aa41bf19fa44152496
8188f026dcfa3ca6c4e2d58e6c56d04d24e37a18
/projectq/meta/_util_test.py
a60d4ac51dd0b7c221c9ad2c6d5229a74fe1fcb1
[ "Apache-2.0" ]
permissive
ProjectQ-Framework/ProjectQ
2e342da0622d4b5d513c15504556e95d3d0e2aea
67c660ca18725d23ab0b261a45e34873b6a58d03
refs/heads/develop
2023-09-04T02:18:25.581119
2023-03-09T16:03:57
2023-03-09T16:03:57
77,520,796
886
335
Apache-2.0
2023-07-24T07:07:15
2016-12-28T09:31:53
Python
UTF-8
Python
false
false
1,928
py
_util_test.py
# Copyright 2017 ProjectQ-Framework (www.projectq.ch) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from projectq import MainEngine from projectq.cengines import DummyEngine from . import _util def test_insert_then_drop(): d1 = DummyEngine() d2 = DummyEngine() d3 = DummyEngine() eng = MainEngine(backend=d3, engine_list=[d1]) assert d1.next_engine is d3 assert d2.next_engine is None assert d3.next_engine is None assert d1.main_engine is eng assert d2.main_engine is None assert d3.main_engine is eng assert eng.n_engines == 2 _util.insert_engine(d1, d2) assert d1.next_engine is d2 assert d2.next_engine is d3 assert d3.next_engine is None assert d1.main_engine is eng assert d2.main_engine is eng assert d3.main_engine is eng assert eng.n_engines == 3 _util.drop_engine_after(d1) assert d1.next_engine is d3 assert d2.next_engine is None assert d3.next_engine is None assert d1.main_engine is eng assert d2.main_engine is None assert d3.main_engine is eng assert eng.n_engines == 2 def test_too_many_engines(): N = 10 eng = MainEngine(backend=DummyEngine(), engine_list=[]) eng.n_engines_max = N for _ in range(N - 1): _util.insert_engine(eng, DummyEngine()) with pytest.raises(RuntimeError): _util.insert_engine(eng, DummyEngine())
7b9d1f5be22fa7937cb3012a2a3cfcd9a9752c65
a61bf859ceeb1ba98de3863225e07b29e1d7ce8a
/thonny/plugins/goto_definition.py
bd9f4ec32f016bf90720adfc5c15e15149d5e3ce
[ "MIT", "LicenseRef-scancode-warranty-disclaimer" ]
permissive
thonny/thonny
3974b1860703e8450b837863682117f525a886c6
8fc9f5c7cbbe1d1c82aa5503ec4b684e28aa608c
refs/heads/master
2023-08-31T03:04:34.685140
2023-08-24T11:38:36
2023-08-24T11:38:36
163,728,962
2,788
1,048
MIT
2023-08-10T18:59:37
2019-01-01T10:29:50
Python
UTF-8
Python
false
false
4,520
py
goto_definition.py
import os.path import tkinter as tk from logging import getLogger from tkinter import messagebox from typing import Set, cast from thonny import get_runner, get_workbench from thonny.codeview import CodeViewText, SyntaxText from thonny.common import InlineCommand from thonny.editor_helpers import get_relevant_source_and_cursor_position, get_text_filename from thonny.languages import tr from thonny.misc_utils import running_on_mac_os from thonny.ui_utils import command_is_pressed, control_is_pressed, get_hyperlink_cursor logger = getLogger(__name__) class GotoHandler: def __init__(self): wb = get_workbench() wb.bind_class("EditorCodeViewText", "<1>", self.request_definitions, True) wb.bind_class("EditorCodeViewText", "<Any-Motion>", self.on_motion, True) wb.bind_class("EditorCodeViewText", "<Any-Leave>", self.remove_underline, True) if running_on_mac_os(): wb.bind_class("EditorCodeViewText", "<Command-KeyRelease>", self.remove_underline, True) else: wb.bind_class("EditorCodeViewText", "<Control-KeyRelease>", self.remove_underline, True) wb.bind("get_definitions_response", self.handle_definitions_response, True) def request_definitions(self, event=None): if not self.proper_modifier_is_pressed(event): return assert isinstance(event.widget, CodeViewText) text = event.widget source, row, column = get_relevant_source_and_cursor_position(text) filename = get_text_filename(text) if not get_runner() or not get_runner().get_backend_proxy(): return get_runner().send_command( InlineCommand( "get_definitions", source=source, row=row, column=column, filename=filename ) ) def proper_modifier_is_pressed(self, event: tk.Event) -> bool: if running_on_mac_os(): return command_is_pressed(event) else: return control_is_pressed(event) def handle_definitions_response(self, msg): defs = msg.definitions if len(defs) != 1: messagebox.showerror( tr("Problem"), tr("Could not find definition"), master=get_workbench() ) return # TODO: handle multiple results like PyCharm module_path = str(defs[0].module_path) if not os.path.isfile(module_path): logger.warning("%s is not a file", module_path) return module_name = defs[0].module_name row = defs[0].row if module_path and row is not None: get_workbench().get_editor_notebook().show_file(module_path, row) elif module_name == "" and row is not None: # current editor get_workbench().get_editor_notebook().get_current_editor().select_range(row) def on_motion(self, event): text = cast(SyntaxText, event.widget) if self.proper_modifier_is_pressed(event): self.remove_underline(event) start_index = text.index(f"@{event.x},{event.y} wordstart") end_index = text.index(f"@{event.x},{event.y} wordend") # sometimes, start_index will contain wrong line number start_line, start_col = start_index.split(".") end_line, end_col = end_index.split(".") if start_line != end_line: start_index = end_line + "." + start_col word = text.get(start_index, end_index) if ( word and (word[0].isalpha() or word[0] == "_") # and not iskeyword(word) and self._index_doesnt_have_tags( text, start_index, {"string", "string3", "open_string", "open_string3", "comment"}, ) ): text.tag_add("name_link", start_index, end_index) text["cursor"] = get_hyperlink_cursor() text.underlined = True else: if getattr(text, "underlined", False): self.remove_underline(event) def _index_doesnt_have_tags(self, text, index, tags: Set[str]) -> bool: return not (set(text.tag_names(index)) & tags) def remove_underline(self, event=None): text = cast(SyntaxText, event.widget) text.tag_remove("name_link", "1.0", "end") text["cursor"] = "" text.underlined = False def load_plugin() -> None: goto_handler = GotoHandler()
c8bb6235160a2e3b163f3904b6f31d933f5bce6a
a198aa98679ae1fc70388f8376a9a41444040319
/deprecated-tools/better-bus-buffers/BBB_CountTripsAtStopsByRouteAndDirection.py
f057bf54e87ea291947c17f3ed2ad26f5311f468
[ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
Esri/public-transit-tools
8437831328e94fa9fe74469922c204eb6fb74b22
47cbc3de67a7b1bf9255e07e88cba7b051db0505
refs/heads/master
2023-09-05T17:01:23.375328
2023-08-30T19:58:50
2023-08-30T19:58:50
42,553,165
155
67
Apache-2.0
2023-08-30T19:58:51
2015-09-15T23:38:22
Python
UTF-8
Python
false
false
14,437
py
BBB_CountTripsAtStopsByRouteAndDirection.py
############################################################################ ## Tool name: BetterBusBuffers - Count Trips at Stops by Route and Direction ## Created by: David Wasserman, https://github.com/d-wasserman and Melinda Morang, Esri ## This tool was developed as part of Transit R&D Efforts from Fehr & Peers. ## Fehr & Peers contributes this tool to the BBB Toolset to further more ## informed planning. ## Last updated: 25 September 2021 ############################################################################ ''' BetterBusBuffers - Count Trips at Stops by Route and Direction BetterBusBuffers provides a quantitative measure of access to public transit in your city by counting the transit trip frequency at various locations. The Count Trips at Stops by Route and Direction outputs a feature class where every GTFS stop is duplicated for every route-direction combination that uses that stop during the analysis time windows. Each point will represent a unique combination of stop id, route id, and direction id, and the frequency statistics that relate to each of them for the analyzed time window. ''' ################################################################################ '''Copyright 2021 Esri Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.''' ################################################################################ """Copyright 2020 Fehr & Peers Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ ################################################################################ """Copyright 2020 David Wasserman Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ ################################################################################ import arcpy import BBB_SharedFunctions import sqlite3 def runTool(output_stop_file, SQLDbase, time_window_value_table, snap_to_nearest_5_minutes): def RetrieveFrequencyStatsForStop(stop_id, stoptimedict, start_sec, end_sec): '''For a given stop, query the dictionary and return the NumTrips, NumTripsPerHr, MaxWaitTime, and AvgHeadway given a specific route_id and direction. If snap to nearest five minutes is true, then this function will return headways snapped to the closest 5 minute interval.''' # Make a list of stop_times StopTimesAtThisPoint = [] try: for trip in stoptimedict[stop_id]: StopTimesAtThisPoint.append(trip[1]) except KeyError: pass StopTimesAtThisPoint.sort() # Calculate the number of trips NumTrips = len(StopTimesAtThisPoint) NumTripsPerHr = round(float(NumTrips) / ((end_sec - start_sec) / 3600), 2) # Get the max wait time and the average headway MaxWaitTime = BBB_SharedFunctions.CalculateMaxWaitTime(StopTimesAtThisPoint, start_sec, end_sec) if snap_to_nearest_5_minutes: round_to = 5 else: round_to = None AvgHeadway = BBB_SharedFunctions.CalculateAvgHeadway(StopTimesAtThisPoint, round_to) return NumTrips, NumTripsPerHr, MaxWaitTime, AvgHeadway # ----- Get input parameters and set things up. ----- # Check software version and fail out quickly if it's not sufficient. BBB_SharedFunctions.CheckArcVersion(min_version_pro="1.2") arcpy.AddMessage("Reading data...") # Connect to SQL database of preprocessed GTFS from Step 1 conn = BBB_SharedFunctions.conn = sqlite3.connect(SQLDbase) c = BBB_SharedFunctions.c = conn.cursor() # Store frequencies if relevant frequencies_dict = BBB_SharedFunctions.MakeFrequenciesDict() # Get unique route_id/direction_id pairs and calculate the trips used in each # Some GTFS datasets use the same route_id to identify trips traveling in # either direction along a route. Others identify it as a different route. # We will consider each direction separately if there is more than one. trip_route_dict = {} # {(route_id, direction_id): [(trip_id, service_id),..]} triproutefetch = '''SELECT DISTINCT route_id,direction_id FROM trips;''' c.execute(triproutefetch) for rtpair in c.fetchall(): route_id = rtpair[0] direction_id = rtpair[1] if str(direction_id).strip() == "": # Handle blanks direction_id = None # Get list of trips # Ignore direction if this route doesn't have a direction if direction_id is not None and str(direction_id).strip(): triproutefetch = ''' SELECT trip_id, service_id FROM trips WHERE route_id = '{0}' AND direction_id = {1};'''.format(route_id, direction_id) else: triproutefetch = ''' SELECT trip_id, service_id FROM trips WHERE route_id = '{0}';'''.format(route_id) c.execute(triproutefetch) triproutelist = c.fetchall() key = (route_id, direction_id) trip_route_dict[key] = triproutelist # ----- For each time window, calculate the stop frequency ----- final_stop_freq_dict = {} # {(stop_id, route_id, direction_id): {prefix: (NumTrips, NumTripsPerHour, MaxWaitTimeSec, AvgHeadwayMin)}} # The time_window_value_table will be a list of nested lists of strings like: # [[Weekday name or YYYYMMDD date, HH: MM, HH: MM, Departures / Arrivals, Prefix], [], ...] for time_window in time_window_value_table: # Prefix/identifier associated with this time window prefix = time_window[4] arcpy.AddMessage("Calculating statistics for time window %s..." % prefix) # Clean up date and determine whether it's a date or a weekday Specific, day = BBB_SharedFunctions.CheckSpecificDate(time_window[0]) # Convert times to seconds start_time = time_window[1] end_time = time_window[2] if not start_time: start_time = "00:00" if not end_time: end_time = "23:59" start_sec, end_sec = BBB_SharedFunctions.ConvertTimeWindowToSeconds(start_time, end_time) # Clean up arrival/departure time choice DepOrArr = BBB_SharedFunctions.CleanUpDepOrArr(time_window[3]) # Get the trips running in this time window for each route/direction pair # Get the service_ids serving the correct days serviceidlist, serviceidlist_yest, serviceidlist_tom = \ BBB_SharedFunctions.GetServiceIDListsAndNonOverlaps(day, start_sec, end_sec, DepOrArr, Specific) # Retrieve the stop_times for the time window broken out by route/direction for rtdirpair in trip_route_dict: # Get trips running with these service_ids trip_serv_list = trip_route_dict[rtdirpair] triplist = [] for tripserv in trip_serv_list: # Only keep trips running on the correct day if tripserv[1] in serviceidlist or tripserv[1] in serviceidlist_tom or \ tripserv[1] in serviceidlist_yest: triplist.append(tripserv[0]) # Get the stop_times that occur during this time window for these trips try: stoptimedict = BBB_SharedFunctions.GetStopTimesForStopsInTimeWindow( start_sec, end_sec, DepOrArr, triplist, "today", frequencies_dict) except KeyError: # No trips pass try: stoptimedict_yest = BBB_SharedFunctions.GetStopTimesForStopsInTimeWindow( start_sec, end_sec, DepOrArr, triplist, "yesterday", frequencies_dict) except KeyError: # No trips pass try: stoptimedict_tom = BBB_SharedFunctions.GetStopTimesForStopsInTimeWindow( start_sec, end_sec, DepOrArr, triplist, "tomorrow", frequencies_dict) except KeyError: # No trips pass # Combine the three dictionaries into one master for stop in stoptimedict_yest: stoptimedict[stop] = stoptimedict.setdefault(stop, []) + stoptimedict_yest[stop] for stop in stoptimedict_tom: stoptimedict[stop] = stoptimedict.setdefault(stop, []) + stoptimedict_tom[stop] for stop in stoptimedict.keys(): # Get Stop-Route-Dir Frequencies by time period vals = RetrieveFrequencyStatsForStop(stop, stoptimedict, start_sec, end_sec) key = (stop, rtdirpair[0], rtdirpair[1],) if key not in final_stop_freq_dict: final_stop_freq_dict[key] = {prefix: vals} else: final_stop_freq_dict[key][prefix] = vals # ----- Write the stops and stats to the output feature class ----- arcpy.AddMessage("Writing outputs...") # Make the basic feature class for stops with correct gtfs fields with arcpy.EnvManager(overwriteOutput=True): output_coords = BBB_SharedFunctions.CreateStopsFeatureClass(output_stop_file) # Add fields specific to this tool's outputs arcpy.management.AddField(output_stop_file, 'route_id', "TEXT") arcpy.management.AddField(output_stop_file, 'direction_id', "SHORT") # Create fields for stats for each time window using prefix base_field_names = ['_NumTrips', '_NumTripsPerHr', '_MaxWaitTime', '_AvgHeadway'] new_fields = [] for time_window in time_window_value_table: for base_field in base_field_names: new_field = time_window[4] + base_field new_fields.append(new_field) arcpy.management.AddField(output_stop_file, new_field, "DOUBLE") # Get the stop info from the GTFS SQL file StopTable = BBB_SharedFunctions.GetStopsData() stop_dict = {stop[0]: stop for stop in StopTable} # Make a dictionary to track whether we have inserted all stops at least once into the output used_stops = {stop[0]: False for stop in StopTable} # Store stop geometries in dictionary so they can be inserted multiple times without recalculating stop_geoms = {stop[0]: BBB_SharedFunctions.MakeStopGeometry(stop[4], stop[5], output_coords) for stop in StopTable} # Add the stops with stats to the feature class fields = [ "SHAPE@", "stop_id", "stop_code", "stop_name", "stop_desc", "zone_id", "stop_url", "location_type", "parent_station", "route_id", "direction_id" ] + new_fields with arcpy.da.InsertCursor(output_stop_file, fields) as cur3: # Iterate over all unique stop, route_id, direction_id groups and insert values for key in sorted(final_stop_freq_dict.keys(), key=lambda x: (x[0], x[1], x[2] if x[2] is not None else -1)): stop_id = key[0] used_stops[stop_id] = True route_id = key[1] direction_id = key[2] stop_data = stop_dict[stop_id] # Schema of StopTable ## 0 - stop_id ## 1 - stop_code ## 2 - stop_name ## 3 - stop_desc ## 4 - stop_lat ## 5 - stop_lon ## 6 - zone_id ## 7 - stop_url ## 8 - location_type ## 9 - parent_station row = [ stop_geoms[stop_id], # Geometry stop_data[0], stop_data[1], stop_data[2], stop_data[3], stop_data[6], stop_data[7], stop_data[8], stop_data[9], # GTFS data route_id, direction_id # route and direction IDs ] # Populate stats fields for each prefix for time_window in time_window_value_table: prefix = time_window[4] try: vals = final_stop_freq_dict[key][prefix] except KeyError: # This stop/route/direction group had no service for this time window vals = [0, 0, None, None] row += vals # Insert the row cur3.insertRow(row) # Insert row for any remaining stops that were not used at all for stop_id in used_stops: if used_stops[stop_id]: # This one was already inserted continue stop_data = stop_dict[stop_id] row = [ stop_geoms[stop_id], # Geometry stop_data[0], stop_data[1], stop_data[2], stop_data[3], stop_data[6], stop_data[7], stop_data[8], stop_data[9], # GTFS data None, None # route and direction IDs - None because not used ] # Populate stats fields for each prefix for time_window in time_window_value_table: row += [0, 0, None, None] # Insert the row cur3.insertRow(row) # Close Connection conn.close() arcpy.AddMessage("Finished!") arcpy.AddMessage("Calculated trip counts, frequency, max wait time, and \ headway were written to an output stops file by route-direction pairs.")
467d231c19ad651966772e8f4086682ebadcde07
edc1134436a79ca883a0d25f3c8dfffc4235c514
/pyro/poutine/broadcast_messenger.py
f12ec13f48ceeaf64c0179d806c90ed19231fac0
[ "Apache-2.0" ]
permissive
pyro-ppl/pyro
2283d8ca528fc090c724a3a6e0f344e505ebbf77
0e82cad30f75b892a07e6c9a5f9e24f2cb5d0d81
refs/heads/dev
2023-08-18T00:35:28.014919
2023-08-06T21:01:36
2023-08-06T21:01:36
94,506,832
3,647
606
Apache-2.0
2023-09-14T13:52:14
2017-06-16T05:03:47
Python
UTF-8
Python
false
false
3,528
py
broadcast_messenger.py
# Copyright (c) 2017-2019 Uber Technologies, Inc. # SPDX-License-Identifier: Apache-2.0 from pyro.util import ignore_jit_warnings from .messenger import Messenger class BroadcastMessenger(Messenger): """ Automatically broadcasts the batch shape of the stochastic function at a sample site when inside a single or nested plate context. The existing `batch_shape` must be broadcastable with the size of the :class:`~pyro.plate` contexts installed in the `cond_indep_stack`. Notice how `model_automatic_broadcast` below automates expanding of distribution batch shapes. This makes it easy to modularize a Pyro model as the sub-components are agnostic of the wrapping :class:`~pyro.plate` contexts. >>> def model_broadcast_by_hand(): ... with IndepMessenger("batch", 100, dim=-2): ... with IndepMessenger("components", 3, dim=-1): ... sample = pyro.sample("sample", dist.Bernoulli(torch.ones(3) * 0.5) ... .expand_by(100)) ... assert sample.shape == torch.Size((100, 3)) ... return sample >>> @poutine.broadcast ... def model_automatic_broadcast(): ... with IndepMessenger("batch", 100, dim=-2): ... with IndepMessenger("components", 3, dim=-1): ... sample = pyro.sample("sample", dist.Bernoulli(torch.tensor(0.5))) ... assert sample.shape == torch.Size((100, 3)) ... return sample """ @staticmethod @ignore_jit_warnings(["Converting a tensor to a Python boolean"]) def _pyro_sample(msg): """ :param msg: current message at a trace site. """ if msg["done"] or msg["type"] != "sample": return dist = msg["fn"] actual_batch_shape = getattr(dist, "batch_shape", None) if actual_batch_shape is not None: target_batch_shape = [ None if size == 1 else size for size in actual_batch_shape ] for f in msg["cond_indep_stack"]: if f.dim is None or f.size == -1: continue assert f.dim < 0 target_batch_shape = [None] * ( -f.dim - len(target_batch_shape) ) + target_batch_shape if ( target_batch_shape[f.dim] is not None and target_batch_shape[f.dim] != f.size ): raise ValueError( "Shape mismatch inside plate('{}') at site {} dim {}, {} vs {}".format( f.name, msg["name"], f.dim, f.size, target_batch_shape[f.dim], ) ) target_batch_shape[f.dim] = f.size # Starting from the right, if expected size is None at an index, # set it to the actual size if it exists, else 1. for i in range(-len(target_batch_shape) + 1, 1): if target_batch_shape[i] is None: target_batch_shape[i] = ( actual_batch_shape[i] if len(actual_batch_shape) >= -i else 1 ) msg["fn"] = dist.expand(target_batch_shape) if msg["fn"].has_rsample != dist.has_rsample: msg["fn"].has_rsample = dist.has_rsample # copy custom attribute
8df8a303cbf897effaf98f9cdf1b0b61498fe7a3
fe0f171663b30275f084dd4f87580972cbf62b21
/tests/test_client.py
4463afbc88c452cc9d557f737b7fb39f39a5c322
[ "Apache-2.0" ]
permissive
VirusTotal/vt-py
625237cef9ac3a2982424e3506a28e51ff0bc060
2376f96156a1359f0f7b8981268f5fa5438f0fba
refs/heads/master
2023-09-04T00:54:41.380224
2023-08-23T12:36:42
2023-08-23T12:36:42
190,611,801
417
107
Apache-2.0
2023-09-12T09:41:44
2019-06-06T16:11:24
Python
UTF-8
Python
false
false
12,922
py
test_client.py
# Copyright 2019 The vt-py authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Client tests.""" import datetime import io import json import pytest from vt import APIError from vt import Client from vt import Object def new_client(httpserver): return Client( "dummy_api_key", host="http://" + httpserver.host + ":" + str(httpserver.port), timeout=500, ) def test_object_from_dict(): obj = Object.from_dict({ "type": "dummy_type", "id": "dummy_id", "attributes": { "attr1": "foo", "attr2": 1, }, "relationships": {"foos": {"data": [{"type": "foo", "id": "foo_id"}]}}, }) assert obj.id == "dummy_id" assert obj.type == "dummy_type" assert obj.attr1 == "foo" assert obj.attr2 == 1 assert obj.relationships["foos"]["data"][0]["id"] == "foo_id" with pytest.raises(ValueError, match=r"Expecting dictionary, got: int"): Object.from_dict(1) with pytest.raises(ValueError, match=r"Object type not found"): Object.from_dict({}) with pytest.raises(ValueError, match=r"Object id not found"): Object.from_dict({"type": "dummy_type"}) with pytest.raises( ValueError, match=r"Object attributes must be a dictionary" ): Object.from_dict({"type": "dummy_type", "id": "dummy_id", "attributes": 1}) def test_object_date_attrs(): obj = Object("dummy_type") obj.foo_date = 0 assert obj.foo_date == datetime.datetime(1970, 1, 1, 0, 0, 0) def test_object_to_dict(): obj = Object.from_dict({ "type": "dummy_type", "id": "dummy_id", "attributes": { "attr1": "foo", "attr2": 1, "attr3": {"subattr1": "bar"}, "attr4": {"subattr1": "baz"}, }, }) obj.set_data("data_key", {"some": "value"}) # No changes, attributes shouldn't appear in the dictionary. obj_dict = obj.to_dict(modified_attributes_only=True) assert not obj_dict["attributes"] # The new data field should appear in the dictionary. assert obj_dict["data_key"] == {"some": "value"} # attr1 set to its previous value, no changes yet. obj.attr1 = "foo" obj_dict = obj.to_dict(modified_attributes_only=True) assert not obj_dict["attributes"] # attr1 changed to 'bar', this should be the only attribute in the dictionary. obj.attr1 = "bar" obj_dict = obj.to_dict(modified_attributes_only=True) assert len(obj_dict["attributes"]) == 1 assert obj_dict["attributes"]["attr1"] == "bar" obj.attr3["subattr1"] = "foo" obj_dict = obj.to_dict(modified_attributes_only=True) assert len(obj_dict["attributes"]) == 2 assert obj_dict["attributes"]["attr1"] == "bar" assert obj_dict["attributes"]["attr3"] == {"subattr1": "foo"} del obj.attr4["subattr1"] obj_dict = obj.to_dict(modified_attributes_only=True) assert len(obj_dict["attributes"]) == 3 assert obj_dict["attributes"]["attr1"] == "bar" assert obj_dict["attributes"]["attr3"] == {"subattr1": "foo"} assert obj_dict["attributes"]["attr4"] == {} def test_get(httpserver): httpserver.expect_request( "/api/v3/foo", method="GET", headers={"X-Apikey": "dummy_api_key"} ).respond_with_json({"data": "dummy_data"}) with new_client(httpserver) as client: response = client.get("/foo") assert response.status == 200 def test_get_data(httpserver): httpserver.expect_request( "/api/v3/foo", method="GET", headers={"X-Apikey": "dummy_api_key"} ).respond_with_json({"data": "dummy_data"}) with new_client(httpserver) as client: data = client.get_data("/foo") assert data == "dummy_data" def test_get_object(httpserver): httpserver.expect_request( "/api/v3/dummy_types/dummy_id", method="GET", headers={"X-Apikey": "dummy_api_key"}, ).respond_with_json( { "data": { "id": "dummy_id", "type": "dummy_type", "attributes": {"foo": "foo", "bar": "bar"}, } } ) with new_client(httpserver) as client: obj = client.get_object("/dummy_types/dummy_id") assert obj.id == "dummy_id" assert obj.type == "dummy_type" assert obj.foo == "foo" assert obj.bar == "bar" assert obj.get("foo") == "foo" assert obj.get("bar") == "bar" assert obj.get("baz") is None def test_patch_object(httpserver): obj = Object("dummy_type", "dummy_id", {"foo": 1, "bar": 2}) obj.foo = 2 httpserver.expect_request( "/api/v3/dummy_types/dummy_id", method="PATCH", headers={"X-Apikey": "dummy_api_key", "Content-Type": "application/json"}, json={"data": obj.to_dict(modified_attributes_only=True)}, ).respond_with_json( { "data": { "id": "dummy_id", "type": "dummy_type", "attributes": { "foo": 2, }, } } ) with new_client(httpserver) as client: client.patch_object("/dummy_types/dummy_id", obj=obj) def test_post_object(httpserver): obj = Object("dummy_type") obj.foo = "foo" httpserver.expect_request( "/api/v3/dummy_types", method="POST", headers={"X-Apikey": "dummy_api_key", "Content-Type": "application/json"}, json={"data": obj.to_dict()}, ).respond_with_json( { "data": { "id": "dummy_id", "type": "dummy_type", "attributes": { "foo": "foo", }, } } ) with new_client(httpserver) as client: obj = client.post_object("/dummy_types", obj=obj) assert obj.id == "dummy_id" def test_delete(httpserver): httpserver.expect_request( "/api/v3/foo", method="DELETE", headers={"X-Apikey": "dummy_api_key"} ).respond_with_json({"data": "dummy_data"}) with new_client(httpserver) as client: response = client.delete("/foo") assert response.status == 200 def test_iterator(httpserver): httpserver.expect_request( "/api/v3/dummy_collection/foo", method="GET", headers={"X-Apikey": "dummy_api_key"}, ).respond_with_json( { "data": [{ "id": "dummy_id_1", "type": "dummy_type", "attributes": {"order": 0}, }] } ) with new_client(httpserver) as client: it = client.iterator("/dummy_collection/foo", limit=10) for i, _ in enumerate(it): assert 0 == i def test_download_file(httpserver): httpserver.expect_request( "/api/v3/files/01020304050607080900a0b0c0d0e0f/download", method="GET", headers={"X-Apikey": "dummy_api_key"}, ).respond_with_data("filecontent") with new_client(httpserver) as client: with io.BytesIO() as f: client.download_file("01020304050607080900a0b0c0d0e0f", f) f.seek(0) assert f.read() == b"filecontent" def test_download_file_with_error(httpserver): httpserver.expect_request( "/api/v3/files/01020304050607080900a0b0c0ddead/download", method="GET", headers={"X-Apikey": "dummy_api_key"}, ).respond_with_data( status=404, content_type="application/json", response_data=json.dumps( {"error": {"code": "NotFoundError", "message": "Resource not found."}} ), ) with pytest.raises(APIError) as e_info: with new_client(httpserver) as client: with io.BytesIO() as f: client.download_file("01020304050607080900a0b0c0ddead", f) assert e_info.value.args[0] == "NotFoundError" assert e_info.value.args[1] == "Resource not found." def test_download_zip_file(httpserver): httpserver.expect_ordered_request( "/api/v3/intelligence/zip_files", method="POST", headers={"X-Apikey": "dummy_api_key"}, data=json.dumps({"data": {"hashes": ["h1", "h2"], "password": "pass"}}), ).respond_with_json( { "data": { "id": "1234", "type": "zip_file", "attributes": {"status": "starting"}, } } ) httpserver.expect_ordered_request( "/api/v3/intelligence/zip_files/1234", method="GET", headers={"x-apikey": "dummy_api_key"}, ).respond_with_json( { "data": { "id": "1234", "type": "zip_file", "attributes": {"status": "creating"}, } } ) httpserver.expect_ordered_request( "/api/v3/intelligence/zip_files/1234", method="GET", headers={"x-apikey": "dummy_api_key"}, ).respond_with_json( { "data": { "id": "1234", "type": "zip_file", "attributes": {"status": "finished"}, } } ) httpserver.expect_ordered_request( "/api/v3/intelligence/zip_files/1234/download", method="GET", headers={"x-apikey": "dummy_api_key"}, ).respond_with_data("filecontent") with new_client(httpserver) as client: with io.BytesIO() as f: client.download_zip_files(["h1", "h2"], f, "pass", 1) f.seek(0) assert f.read() == b"filecontent" def test_download_zip_file_error_creating_file(httpserver): httpserver.expect_ordered_request( "/api/v3/intelligence/zip_files", method="POST", headers={"X-Apikey": "dummy_api_key"}, data=json.dumps({"data": {"hashes": ["h1", "h2"], "password": "pass"}}), ).respond_with_json( { "data": { "id": "1234", "type": "zip_file", "attributes": {"status": "starting"}, } } ) httpserver.expect_ordered_request( "/api/v3/intelligence/zip_files/1234", method="GET", headers={"x-apikey": "dummy_api_key"}, ).respond_with_json( { "data": { "id": "1234", "type": "zip_file", "attributes": {"status": "creating"}, } } ) httpserver.expect_ordered_request( "/api/v3/intelligence/zip_files/1234", method="GET", headers={"x-apikey": "dummy_api_key"}, ).respond_with_json( { "data": { "id": "1234", "type": "zip_file", "attributes": {"status": "timeout"}, } } ) with new_client(httpserver) as client: with io.BytesIO() as f: with pytest.raises(APIError) as e_info: client.download_zip_files(["h1", "h2"], f, "pass", 1) assert e_info.value.args[0] == "ServerError" assert e_info.value.args[1] == "Error when creating zip file: timeout" def test_scan_file(httpserver): upload_url = ( "http://" + httpserver.host + ":" + str(httpserver.port) + "/upload" ) httpserver.expect_oneshot_request( "/api/v3/files/upload_url", method="GET", headers={"X-Apikey": "dummy_api_key"}, ).respond_with_json({"data": upload_url}) httpserver.expect_oneshot_request( "/upload", method="POST", headers={"X-Apikey": "dummy_api_key"} ).respond_with_json( { "data": { "id": "dummy_id", "type": "analysis", "attributes": { "foo": "foo", }, } } ) with new_client(httpserver) as client: f = io.StringIO("dummy file") analysis = client.scan_file(f) assert analysis.type == "analysis" def test_scan_file_valueerror(httpserver): """Tests an exception is raised when calling scan_file using invalid args.""" with new_client(httpserver) as client: with pytest.raises(TypeError): client.scan_file("/Users/test/path/to/file.txt") def test_scan_url(httpserver): httpserver.expect_request( "/api/v3/urls", method="POST", headers={"X-Apikey": "dummy_api_key"} ).respond_with_json( { "data": { "id": "dummy_id", "type": "analysis", "attributes": { "foo": "foo", }, } } ) with new_client(httpserver) as client: analysis = client.scan_url("https://www.dummy.url") assert analysis.type == "analysis" def test_user_headers(httpserver): user_headers = {"foo": "bar"} client = Client( "dummy_api_key", host="http://" + httpserver.host + ":" + str(httpserver.port), timeout=500, headers=user_headers, ) headers = client._get_session().headers # pylint: disable=protected-access assert "X-Apikey" in headers assert "Accept-Encoding" in headers assert "User-Agent" in headers assert "foo" in headers
10faa105589374df194c71e8d09d6eccac196a29
61004e474b7b2ad0071c16766f0f7874f04f9466
/examples/bq_benchmarks/tests/test_bucket_util.py
7c94de1e0a48d976cc1e4485fc6e0b4cdc8a448b
[ "Apache-2.0" ]
permissive
GoogleCloudPlatform/professional-services
eb79751efae765a8c691a745e520f44f51bd715c
0f51121b945bd74c7f667e74e8861fceda87565c
refs/heads/main
2023-09-05T02:57:33.328973
2023-08-30T14:40:30
2023-08-30T14:40:30
91,730,359
2,626
1,381
Apache-2.0
2023-09-14T20:13:42
2017-05-18T19:29:27
Python
UTF-8
Python
false
false
4,258
py
test_bucket_util.py
# Copyright 2018 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from google.api_core import exceptions from google.cloud import storage from bq_benchmarks.generic_benchmark_tools import bucket_util class TestBucketUtil(object): """Tests functionality of load_benchmark_tools.benchmark_result_util. Attributes: bucket_name(str): Name of the bucket used for testing. bucket(google.cloud.storage.bucket.Bucket): Bucket used for testing. blob1_name(str): Name of the first blob that will be uploaded to the bucket for testing. blob2_name(str): Name of the second blob that will be uploaded to the bucket for testing. test_file_parameters(dict): Dictionary containing each test file parameter and its possible values. """ def setup(self): """Sets up resources for tests. """ self.bucket_name = 'bq_benchmark_test_bucket' gcs_client = storage.Client() try: gcs_client.get_bucket(self.bucket_name).delete(force=True) self.bucket = gcs_client.create_bucket(self.bucket_name) except exceptions.NotFound: self.bucket = gcs_client.create_bucket(self.bucket_name) abs_path = os.path.abspath(os.path.dirname(__file__)) file1 = os.path.join( abs_path, ('test_data/fileType=csv/compression=none/' 'numColumns=10/columnTypes=50_STRING_50_NUMERIC/numFiles=1/' 'tableSize=10MB/file1.csv')) self.blob1_name = file1.split('test_data/')[1] blob1 = self.bucket.blob(self.blob1_name) blob1.upload_from_filename(file1) file2 = os.path.join(abs_path, ('test_data/fileType=json/compression=none/' 'numColumns=10/columnTypes=100_STRING/numFiles=1/' 'tableSize=10MB/file1.json')) self.blob2_name = file2.split('test_data/')[1] blob2 = self.bucket.blob(self.blob2_name) blob2.upload_from_filename(file2) self.test_file_parameters = { 'fileType': ['csv', 'json'], 'fileCompressionTypes': { 'csv': ['none'], 'json': ['none'] }, 'numColumns': [10], 'numFiles': [1, 100, 1000, 10000], 'targetDataSizes': [.01], 'stagingDataSizes': ['10MB'], 'columnTypes': [ '100_STRING', '50_STRING_50_NUMERIC', ], } def test_get_existing_paths(self, project_id): """Tests BucketUtil.get_existing_paths(). Tests BucketUtil's ability to check the existence of each blob generated from path combinations of the parameters in a file_params dict and to return a set of existing blobs. Args: project_id(str): ID of the project that holds the test GCS bucket. Returns: True if test passes, else False. """ if not project_id: raise Exception( 'Test needs project_id to pass. ' 'Add --project_id={your project ID} to test command') self.bucket_util = bucket_util.BucketUtil( bucket_name=self.bucket_name, project_id=project_id, file_params=self.test_file_parameters) existing_paths = self.bucket_util.get_existing_paths( run_federated_query_benchmark=False) expected_paths = set([self.blob1_name, self.blob2_name]) assert existing_paths == expected_paths def teardown(self): """Tears down resources created in setup(). """ self.bucket.delete(force=True)
d9b2c2074390115c511862978b98b9fdfa8b12ba
64e5f76a15d0178e851d64573196a33043e68164
/test/py/ganeti.cli_unittest.py
fb866e53ad15765e5f3219b333b166f6ab02b9de
[ "BSD-2-Clause" ]
permissive
ganeti/ganeti
759aa20d2d3e15c816fa3ba5019d7d143b2d1294
456ea285a7583183c2c8e5bcffe9006ec8a9d658
refs/heads/master
2023-07-24T21:23:49.389657
2023-05-25T15:37:44
2023-05-28T10:17:36
25,163,509
465
126
BSD-2-Clause
2023-08-18T17:06:33
2014-10-13T15:03:51
Python
UTF-8
Python
false
false
57,465
py
ganeti.cli_unittest.py
#!/usr/bin/python3 # # Copyright (C) 2008, 2011, 2012, 2013 Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Script for unittesting the cli module""" import copy import testutils import time import unittest import yaml from io import StringIO from ganeti import constants from ganeti import cli from ganeti import errors from ganeti import utils from ganeti import objects from ganeti import qlang from ganeti.errors import OpPrereqError, ParameterError class TestParseTimespec(unittest.TestCase): """Testing case for ParseTimespec""" def testValidTimes(self): """Test valid timespecs""" test_data = [ ("1s", 1), ("1", 1), ("1m", 60), ("1h", 60 * 60), ("1d", 60 * 60 * 24), ("1w", 60 * 60 * 24 * 7), ("4h", 4 * 60 * 60), ("61m", 61 * 60), ] for value, expected_result in test_data: self.assertEqual(cli.ParseTimespec(value), expected_result) def testInvalidTime(self): """Test invalid timespecs""" test_data = [ "1y", "", "aaa", "s", ] for value in test_data: self.assertRaises(OpPrereqError, cli.ParseTimespec, value) class TestToStream(unittest.TestCase): """Test the ToStream functions""" def testBasic(self): for data in ["foo", "foo %s", "foo %(test)s", "foo %s %s", "", ]: buf = StringIO() cli._ToStream(buf, data) self.assertEqual(buf.getvalue(), data + "\n") def testParams(self): buf = StringIO() cli._ToStream(buf, "foo %s", 1) self.assertEqual(buf.getvalue(), "foo 1\n") buf = StringIO() cli._ToStream(buf, "foo %s", (15,16)) self.assertEqual(buf.getvalue(), "foo (15, 16)\n") buf = StringIO() cli._ToStream(buf, "foo %s %s", "a", "b") self.assertEqual(buf.getvalue(), "foo a b\n") class TestGenerateTable(unittest.TestCase): HEADERS = dict([("f%s" % i, "Field%s" % i) for i in range(5)]) FIELDS1 = ["f1", "f2"] DATA1 = [ ["abc", 1234], ["foobar", 56], ["b", -14], ] def _test(self, headers, fields, separator, data, numfields, unitfields, units, expected): table = cli.GenerateTable(headers, fields, separator, data, numfields=numfields, unitfields=unitfields, units=units) self.assertEqual(table, expected) def testPlain(self): exp = [ "Field1 Field2", "abc 1234", "foobar 56", "b -14", ] self._test(self.HEADERS, self.FIELDS1, None, self.DATA1, None, None, "m", exp) def testNoFields(self): self._test(self.HEADERS, [], None, [[], []], None, None, "m", ["", "", ""]) self._test(None, [], None, [[], []], None, None, "m", ["", ""]) def testSeparator(self): for sep in ["#", ":", ",", "^", "!", "%", "|", "###", "%%", "!!!", "||"]: exp = [ "Field1%sField2" % sep, "abc%s1234" % sep, "foobar%s56" % sep, "b%s-14" % sep, ] self._test(self.HEADERS, self.FIELDS1, sep, self.DATA1, None, None, "m", exp) def testNoHeader(self): exp = [ "abc 1234", "foobar 56", "b -14", ] self._test(None, self.FIELDS1, None, self.DATA1, None, None, "m", exp) def testUnknownField(self): headers = { "f1": "Field1", } exp = [ "Field1 UNKNOWN", "abc 1234", "foobar 56", "b -14", ] self._test(headers, ["f1", "UNKNOWN"], None, self.DATA1, None, None, "m", exp) def testNumfields(self): fields = ["f1", "f2", "f3"] data = [ ["abc", 1234, 0], ["foobar", 56, 3], ["b", -14, "-"], ] exp = [ "Field1 Field2 Field3", "abc 1234 0", "foobar 56 3", "b -14 -", ] self._test(self.HEADERS, fields, None, data, ["f2", "f3"], None, "m", exp) def testUnitfields(self): expnosep = [ "Field1 Field2 Field3", "abc 1234 0M", "foobar 56 3M", "b -14 -", ] expsep = [ "Field1:Field2:Field3", "abc:1234:0M", "foobar:56:3M", "b:-14:-", ] for sep, expected in [(None, expnosep), (":", expsep)]: fields = ["f1", "f2", "f3"] data = [ ["abc", 1234, 0], ["foobar", 56, 3], ["b", -14, "-"], ] self._test(self.HEADERS, fields, sep, data, ["f2", "f3"], ["f3"], "h", expected) def testUnusual(self): data = [ ["%", "xyz"], ["%%", "abc"], ] exp = [ "Field1 Field2", "% xyz", "%% abc", ] self._test(self.HEADERS, ["f1", "f2"], None, data, None, None, "m", exp) class TestFormatQueryResult(unittest.TestCase): def test(self): fields = [ objects.QueryFieldDefinition(name="name", title="Name", kind=constants.QFT_TEXT), objects.QueryFieldDefinition(name="size", title="Size", kind=constants.QFT_NUMBER), objects.QueryFieldDefinition(name="act", title="Active", kind=constants.QFT_BOOL), objects.QueryFieldDefinition(name="mem", title="Memory", kind=constants.QFT_UNIT), objects.QueryFieldDefinition(name="other", title="SomeList", kind=constants.QFT_OTHER), ] response = objects.QueryResponse(fields=fields, data=[ [(constants.RS_NORMAL, "nodeA"), (constants.RS_NORMAL, 128), (constants.RS_NORMAL, False), (constants.RS_NORMAL, 1468006), (constants.RS_NORMAL, [])], [(constants.RS_NORMAL, "other"), (constants.RS_NORMAL, 512), (constants.RS_NORMAL, True), (constants.RS_NORMAL, 16), (constants.RS_NORMAL, [1, 2, 3])], [(constants.RS_NORMAL, "xyz"), (constants.RS_NORMAL, 1024), (constants.RS_NORMAL, True), (constants.RS_NORMAL, 4096), (constants.RS_NORMAL, [{}, {}])], ]) self.assertEqual(cli.FormatQueryResult(response, unit="h", header=True), (cli.QR_NORMAL, [ "Name Size Active Memory SomeList", "nodeA 128 N 1.4T []", "other 512 Y 16M [1, 2, 3]", "xyz 1024 Y 4.0G [{}, {}]", ])) def testTimestampAndUnit(self): fields = [ objects.QueryFieldDefinition(name="name", title="Name", kind=constants.QFT_TEXT), objects.QueryFieldDefinition(name="size", title="Size", kind=constants.QFT_UNIT), objects.QueryFieldDefinition(name="mtime", title="ModTime", kind=constants.QFT_TIMESTAMP), ] response = objects.QueryResponse(fields=fields, data=[ [(constants.RS_NORMAL, "a"), (constants.RS_NORMAL, 1024), (constants.RS_NORMAL, 0)], [(constants.RS_NORMAL, "b"), (constants.RS_NORMAL, 144996), (constants.RS_NORMAL, 1291746295)], ]) self.assertEqual(cli.FormatQueryResult(response, unit="m", header=True), (cli.QR_NORMAL, [ "Name Size ModTime", "a 1024 %s" % utils.FormatTime(0), "b 144996 %s" % utils.FormatTime(1291746295), ])) def testOverride(self): fields = [ objects.QueryFieldDefinition(name="name", title="Name", kind=constants.QFT_TEXT), objects.QueryFieldDefinition(name="cust", title="Custom", kind=constants.QFT_OTHER), objects.QueryFieldDefinition(name="xt", title="XTime", kind=constants.QFT_TIMESTAMP), ] response = objects.QueryResponse(fields=fields, data=[ [(constants.RS_NORMAL, "x"), (constants.RS_NORMAL, ["a", "b", "c"]), (constants.RS_NORMAL, 1234)], [(constants.RS_NORMAL, "y"), (constants.RS_NORMAL, range(10)), (constants.RS_NORMAL, 1291746295)], ]) override = { "cust": (utils.CommaJoin, False), "xt": (hex, True), } self.assertEqual(cli.FormatQueryResult(response, unit="h", header=True, format_override=override), (cli.QR_NORMAL, [ "Name Custom XTime", "x a, b, c 0x4d2", "y 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 0x4cfe7bf7", ])) def testSeparator(self): fields = [ objects.QueryFieldDefinition(name="name", title="Name", kind=constants.QFT_TEXT), objects.QueryFieldDefinition(name="count", title="Count", kind=constants.QFT_NUMBER), objects.QueryFieldDefinition(name="desc", title="Description", kind=constants.QFT_TEXT), ] response = objects.QueryResponse(fields=fields, data=[ [(constants.RS_NORMAL, "instance1.example.com"), (constants.RS_NORMAL, 21125), (constants.RS_NORMAL, "Hello World!")], [(constants.RS_NORMAL, "mail.other.net"), (constants.RS_NORMAL, -9000), (constants.RS_NORMAL, "a,b,c")], ]) for sep in [":", "|", "#", "|||", "###", "@@@", "@#@"]: for header in [None, "Name%sCount%sDescription" % (sep, sep)]: exp = [] if header: exp.append(header) exp.extend([ "instance1.example.com%s21125%sHello World!" % (sep, sep), "mail.other.net%s-9000%sa,b,c" % (sep, sep), ]) self.assertEqual(cli.FormatQueryResult(response, separator=sep, header=bool(header)), (cli.QR_NORMAL, exp)) def testStatusWithUnknown(self): fields = [ objects.QueryFieldDefinition(name="id", title="ID", kind=constants.QFT_NUMBER), objects.QueryFieldDefinition(name="unk", title="unk", kind=constants.QFT_UNKNOWN), objects.QueryFieldDefinition(name="unavail", title="Unavail", kind=constants.QFT_BOOL), objects.QueryFieldDefinition(name="nodata", title="NoData", kind=constants.QFT_TEXT), objects.QueryFieldDefinition(name="offline", title="OffLine", kind=constants.QFT_TEXT), ] response = objects.QueryResponse(fields=fields, data=[ [(constants.RS_NORMAL, 1), (constants.RS_UNKNOWN, None), (constants.RS_NORMAL, False), (constants.RS_NORMAL, ""), (constants.RS_OFFLINE, None)], [(constants.RS_NORMAL, 2), (constants.RS_UNKNOWN, None), (constants.RS_NODATA, None), (constants.RS_NORMAL, "x"), (constants.RS_OFFLINE, None)], [(constants.RS_NORMAL, 3), (constants.RS_UNKNOWN, None), (constants.RS_NORMAL, False), (constants.RS_UNAVAIL, None), (constants.RS_OFFLINE, None)], ]) self.assertEqual(cli.FormatQueryResult(response, header=True, separator="|", verbose=True), (cli.QR_UNKNOWN, [ "ID|unk|Unavail|NoData|OffLine", "1|(unknown)|N||(offline)", "2|(unknown)|(nodata)|x|(offline)", "3|(unknown)|N|(unavail)|(offline)", ])) self.assertEqual(cli.FormatQueryResult(response, header=True, separator="|", verbose=False), (cli.QR_UNKNOWN, [ "ID|unk|Unavail|NoData|OffLine", "1|??|N||*", "2|??|?|x|*", "3|??|N|-|*", ])) def testNoData(self): fields = [ objects.QueryFieldDefinition(name="id", title="ID", kind=constants.QFT_NUMBER), objects.QueryFieldDefinition(name="name", title="Name", kind=constants.QFT_TEXT), ] response = objects.QueryResponse(fields=fields, data=[]) self.assertEqual(cli.FormatQueryResult(response, header=True), (cli.QR_NORMAL, ["ID Name"])) def testNoDataWithUnknown(self): fields = [ objects.QueryFieldDefinition(name="id", title="ID", kind=constants.QFT_NUMBER), objects.QueryFieldDefinition(name="unk", title="unk", kind=constants.QFT_UNKNOWN), ] response = objects.QueryResponse(fields=fields, data=[]) self.assertEqual(cli.FormatQueryResult(response, header=False), (cli.QR_UNKNOWN, [])) def testStatus(self): fields = [ objects.QueryFieldDefinition(name="id", title="ID", kind=constants.QFT_NUMBER), objects.QueryFieldDefinition(name="unavail", title="Unavail", kind=constants.QFT_BOOL), objects.QueryFieldDefinition(name="nodata", title="NoData", kind=constants.QFT_TEXT), objects.QueryFieldDefinition(name="offline", title="OffLine", kind=constants.QFT_TEXT), ] response = objects.QueryResponse(fields=fields, data=[ [(constants.RS_NORMAL, 1), (constants.RS_NORMAL, False), (constants.RS_NORMAL, ""), (constants.RS_OFFLINE, None)], [(constants.RS_NORMAL, 2), (constants.RS_NODATA, None), (constants.RS_NORMAL, "x"), (constants.RS_NORMAL, "abc")], [(constants.RS_NORMAL, 3), (constants.RS_NORMAL, False), (constants.RS_UNAVAIL, None), (constants.RS_OFFLINE, None)], ]) self.assertEqual(cli.FormatQueryResult(response, header=False, separator="|", verbose=True), (cli.QR_INCOMPLETE, [ "1|N||(offline)", "2|(nodata)|x|abc", "3|N|(unavail)|(offline)", ])) self.assertEqual(cli.FormatQueryResult(response, header=False, separator="|", verbose=False), (cli.QR_INCOMPLETE, [ "1|N||*", "2|?|x|abc", "3|N|-|*", ])) def testInvalidFieldType(self): fields = [ objects.QueryFieldDefinition(name="x", title="x", kind="#some#other#type"), ] response = objects.QueryResponse(fields=fields, data=[]) self.assertRaises(NotImplementedError, cli.FormatQueryResult, response) def testInvalidFieldStatus(self): fields = [ objects.QueryFieldDefinition(name="x", title="x", kind=constants.QFT_TEXT), ] response = objects.QueryResponse(fields=fields, data=[[(-1, None)]]) self.assertRaises(NotImplementedError, cli.FormatQueryResult, response) response = objects.QueryResponse(fields=fields, data=[[(-1, "x")]]) self.assertRaises(AssertionError, cli.FormatQueryResult, response) def testEmptyFieldTitle(self): fields = [ objects.QueryFieldDefinition(name="x", title="", kind=constants.QFT_TEXT), ] response = objects.QueryResponse(fields=fields, data=[]) self.assertRaises(AssertionError, cli.FormatQueryResult, response) class _MockJobPollCb(cli.JobPollCbBase, cli.JobPollReportCbBase): def __init__(self, tc, job_id): self.tc = tc self.job_id = job_id self._wfjcr = [] self._jobstatus = [] self._expect_notchanged = False self._expect_log = [] def CheckEmpty(self): self.tc.assertFalse(self._wfjcr) self.tc.assertFalse(self._jobstatus) self.tc.assertFalse(self._expect_notchanged) self.tc.assertFalse(self._expect_log) def AddWfjcResult(self, *args): self._wfjcr.append(args) def AddQueryJobsResult(self, *args): self._jobstatus.append(args) def WaitForJobChangeOnce(self, job_id, fields, prev_job_info, prev_log_serial, timeout=constants.DEFAULT_WFJC_TIMEOUT): self.tc.assertEqual(job_id, self.job_id) self.tc.assertEqualValues(fields, ["status"]) self.tc.assertFalse(self._expect_notchanged) self.tc.assertFalse(self._expect_log) (exp_prev_job_info, exp_prev_log_serial, result) = self._wfjcr.pop(0) self.tc.assertEqualValues(prev_job_info, exp_prev_job_info) self.tc.assertEqual(prev_log_serial, exp_prev_log_serial) if result == constants.JOB_NOTCHANGED: self._expect_notchanged = True elif result: (_, logmsgs) = result if logmsgs: self._expect_log.extend(logmsgs) return result def QueryJobs(self, job_ids, fields): self.tc.assertEqual(job_ids, [self.job_id]) self.tc.assertEqualValues(fields, ["status", "opstatus", "opresult"]) self.tc.assertFalse(self._expect_notchanged) self.tc.assertFalse(self._expect_log) result = self._jobstatus.pop(0) self.tc.assertEqual(len(fields), len(result)) return [result] def CancelJob(self, job_id): self.tc.assertEqual(job_id, self.job_id) def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg): self.tc.assertEqual(job_id, self.job_id) self.tc.assertEqualValues((serial, timestamp, log_type, log_msg), self._expect_log.pop(0)) def ReportNotChanged(self, job_id, status): self.tc.assertEqual(job_id, self.job_id) self.tc.assertTrue(self._expect_notchanged) self._expect_notchanged = False class TestGenericPollJob(testutils.GanetiTestCase): def testSuccessWithLog(self): job_id = 29609 cbs = _MockJobPollCb(self, job_id) cbs.AddWfjcResult(None, None, constants.JOB_NOTCHANGED) cbs.AddWfjcResult(None, None, ((constants.JOB_STATUS_QUEUED, ), None)) cbs.AddWfjcResult((constants.JOB_STATUS_QUEUED, ), None, constants.JOB_NOTCHANGED) cbs.AddWfjcResult((constants.JOB_STATUS_QUEUED, ), None, ((constants.JOB_STATUS_RUNNING, ), [(1, utils.SplitTime(1273491611.0), constants.ELOG_MESSAGE, "Step 1"), (2, utils.SplitTime(1273491615.9), constants.ELOG_MESSAGE, "Step 2"), (3, utils.SplitTime(1273491625.02), constants.ELOG_MESSAGE, "Step 3"), (4, utils.SplitTime(1273491635.05), constants.ELOG_MESSAGE, "Step 4"), (37, utils.SplitTime(1273491645.0), constants.ELOG_MESSAGE, "Step 5"), (203, utils.SplitTime(127349155.0), constants.ELOG_MESSAGE, "Step 6")])) cbs.AddWfjcResult((constants.JOB_STATUS_RUNNING, ), 203, ((constants.JOB_STATUS_RUNNING, ), [(300, utils.SplitTime(1273491711.01), constants.ELOG_MESSAGE, "Step X"), (302, utils.SplitTime(1273491815.8), constants.ELOG_MESSAGE, "Step Y"), (303, utils.SplitTime(1273491925.32), constants.ELOG_MESSAGE, "Step Z")])) cbs.AddWfjcResult((constants.JOB_STATUS_RUNNING, ), 303, ((constants.JOB_STATUS_SUCCESS, ), None)) cbs.AddQueryJobsResult(constants.JOB_STATUS_SUCCESS, [constants.OP_STATUS_SUCCESS, constants.OP_STATUS_SUCCESS], ["Hello World", "Foo man bar"]) self.assertEqual(["Hello World", "Foo man bar"], cli.GenericPollJob(job_id, cbs, cbs)) cbs.CheckEmpty() def testJobLost(self): job_id = 13746 cbs = _MockJobPollCb(self, job_id) cbs.AddWfjcResult(None, None, constants.JOB_NOTCHANGED) cbs.AddWfjcResult(None, None, None) self.assertRaises(errors.JobLost, cli.GenericPollJob, job_id, cbs, cbs) cbs.CheckEmpty() def testError(self): job_id = 31088 cbs = _MockJobPollCb(self, job_id) cbs.AddWfjcResult(None, None, constants.JOB_NOTCHANGED) cbs.AddWfjcResult(None, None, ((constants.JOB_STATUS_ERROR, ), None)) cbs.AddQueryJobsResult(constants.JOB_STATUS_ERROR, [constants.OP_STATUS_SUCCESS, constants.OP_STATUS_ERROR], ["Hello World", "Error code 123"]) self.assertRaises(errors.OpExecError, cli.GenericPollJob, job_id, cbs, cbs) cbs.CheckEmpty() def testError2(self): job_id = 22235 cbs = _MockJobPollCb(self, job_id) cbs.AddWfjcResult(None, None, ((constants.JOB_STATUS_ERROR, ), None)) encexc = errors.EncodeException(errors.LockError("problem")) cbs.AddQueryJobsResult(constants.JOB_STATUS_ERROR, [constants.OP_STATUS_ERROR], [encexc]) self.assertRaises(errors.LockError, cli.GenericPollJob, job_id, cbs, cbs) cbs.CheckEmpty() def testWeirdError(self): job_id = 28847 cbs = _MockJobPollCb(self, job_id) cbs.AddWfjcResult(None, None, ((constants.JOB_STATUS_ERROR, ), None)) cbs.AddQueryJobsResult(constants.JOB_STATUS_ERROR, [constants.OP_STATUS_RUNNING, constants.OP_STATUS_RUNNING], [None, None]) self.assertRaises(errors.OpExecError, cli.GenericPollJob, job_id, cbs, cbs) cbs.CheckEmpty() def testCancel(self): job_id = 4275 cbs = _MockJobPollCb(self, job_id) cbs.AddWfjcResult(None, None, constants.JOB_NOTCHANGED) cbs.AddWfjcResult(None, None, ((constants.JOB_STATUS_CANCELING, ), None)) cbs.AddQueryJobsResult(constants.JOB_STATUS_CANCELING, [constants.OP_STATUS_CANCELING, constants.OP_STATUS_CANCELING], [None, None]) self.assertRaises(errors.JobCanceled, cli.GenericPollJob, job_id, cbs, cbs) cbs.CheckEmpty() def testNegativeUpdateFreqParameter(self): job_id = 12345 cbs = _MockJobPollCb(self, job_id) self.assertRaises(errors.ParameterError, cli.GenericPollJob, job_id, cbs, cbs, update_freq=-30) def testZeroUpdateFreqParameter(self): job_id = 12345 cbs = _MockJobPollCb(self, job_id) self.assertRaises(errors.ParameterError, cli.GenericPollJob, job_id, cbs, cbs, update_freq=0) def testShouldCancel(self): job_id = 12345 cbs = _MockJobPollCb(self, job_id) cbs.AddWfjcResult(None, None, constants.JOB_NOTCHANGED) self.assertRaises(errors.JobCanceled, cli.GenericPollJob, job_id, cbs, cbs, cancel_fn=(lambda: True)) def testIgnoreCancel(self): job_id = 12345 cbs = _MockJobPollCb(self, job_id) cbs.AddWfjcResult(None, None, ((constants.JOB_STATUS_SUCCESS, ), None)) cbs.AddQueryJobsResult(constants.JOB_STATUS_SUCCESS, [constants.OP_STATUS_SUCCESS, constants.OP_STATUS_SUCCESS], ["Hello World", "Foo man bar"]) self.assertEqual(["Hello World", "Foo man bar"], cli.GenericPollJob( job_id, cbs, cbs, cancel_fn=(lambda: False))) cbs.CheckEmpty() class TestFormatLogMessage(unittest.TestCase): def test(self): self.assertEqual(cli.FormatLogMessage(constants.ELOG_MESSAGE, "Hello World"), "Hello World") self.assertRaises(TypeError, cli.FormatLogMessage, constants.ELOG_MESSAGE, [1, 2, 3]) self.assertTrue(cli.FormatLogMessage("some other type", (1, 2, 3))) class TestParseFields(unittest.TestCase): def test(self): self.assertEqual(cli.ParseFields(None, []), []) self.assertEqual(cli.ParseFields("name,foo,hello", []), ["name", "foo", "hello"]) self.assertEqual(cli.ParseFields(None, ["def", "ault", "fields", "here"]), ["def", "ault", "fields", "here"]) self.assertEqual(cli.ParseFields("name,foo", ["def", "ault"]), ["name", "foo"]) self.assertEqual(cli.ParseFields("+name,foo", ["def", "ault"]), ["def", "ault", "name", "foo"]) class TestParseNicOption(unittest.TestCase): def test(self): self.assertEqual(cli.ParseNicOption([("0", { "link": "eth0", })]), [{ "link": "eth0", }]) self.assertEqual(cli.ParseNicOption([("5", { "ip": "192.0.2.7", })]), [{}, {}, {}, {}, {}, { "ip": "192.0.2.7", }]) def testErrors(self): for i in [None, "", "abc", "zero", "Hello World", "\0", []]: self.assertRaises(errors.OpPrereqError, cli.ParseNicOption, [(i, { "link": "eth0", })]) self.assertRaises(errors.OpPrereqError, cli.ParseNicOption, [("0", i)]) self.assertRaises(errors.TypeEnforcementError, cli.ParseNicOption, [(0, { True: False, })]) self.assertRaises(errors.TypeEnforcementError, cli.ParseNicOption, [(3, { "mode": [], })]) class TestFormatResultError(unittest.TestCase): def testNormal(self): for verbose in [False, True]: self.assertRaises(AssertionError, cli.FormatResultError, constants.RS_NORMAL, verbose) def testUnknown(self): for verbose in [False, True]: self.assertRaises(NotImplementedError, cli.FormatResultError, "#some!other!status#", verbose) def test(self): for status in constants.RS_ALL: if status == constants.RS_NORMAL: continue self.assertNotEqual(cli.FormatResultError(status, False), cli.FormatResultError(status, True)) result = cli.FormatResultError(status, True) self.assertTrue(result.startswith("(")) self.assertTrue(result.endswith(")")) class TestGetOnlineNodes(unittest.TestCase): class _FakeClient: def __init__(self): self._query = [] def AddQueryResult(self, *args): self._query.append(args) def CountPending(self): return len(self._query) def Query(self, res, fields, qfilter): if res != constants.QR_NODE: raise Exception("Querying wrong resource") (exp_fields, check_filter, result) = self._query.pop(0) if exp_fields != fields: raise Exception("Expected fields %s, got %s" % (exp_fields, fields)) if not (qfilter is None or check_filter(qfilter)): raise Exception("Filter doesn't match expectations") return objects.QueryResponse(fields=None, data=result) def testEmpty(self): cl = self._FakeClient() cl.AddQueryResult(["name", "offline", "sip"], None, []) self.assertEqual(cli.GetOnlineNodes(None, cl=cl), []) self.assertEqual(cl.CountPending(), 0) def testNoSpecialFilter(self): cl = self._FakeClient() cl.AddQueryResult(["name", "offline", "sip"], None, [ [(constants.RS_NORMAL, "master.example.com"), (constants.RS_NORMAL, False), (constants.RS_NORMAL, "192.0.2.1")], [(constants.RS_NORMAL, "node2.example.com"), (constants.RS_NORMAL, False), (constants.RS_NORMAL, "192.0.2.2")], ]) self.assertEqual(cli.GetOnlineNodes(None, cl=cl), ["master.example.com", "node2.example.com"]) self.assertEqual(cl.CountPending(), 0) def testNoMaster(self): cl = self._FakeClient() def _CheckFilter(qfilter): self.assertEqual(qfilter, [qlang.OP_NOT, [qlang.OP_TRUE, "master"]]) return True cl.AddQueryResult(["name", "offline", "sip"], _CheckFilter, [ [(constants.RS_NORMAL, "node2.example.com"), (constants.RS_NORMAL, False), (constants.RS_NORMAL, "192.0.2.2")], ]) self.assertEqual(cli.GetOnlineNodes(None, cl=cl, filter_master=True), ["node2.example.com"]) self.assertEqual(cl.CountPending(), 0) def testSecondaryIpAddress(self): cl = self._FakeClient() cl.AddQueryResult(["name", "offline", "sip"], None, [ [(constants.RS_NORMAL, "master.example.com"), (constants.RS_NORMAL, False), (constants.RS_NORMAL, "192.0.2.1")], [(constants.RS_NORMAL, "node2.example.com"), (constants.RS_NORMAL, False), (constants.RS_NORMAL, "192.0.2.2")], ]) self.assertEqual(cli.GetOnlineNodes(None, cl=cl, secondary_ips=True), ["192.0.2.1", "192.0.2.2"]) self.assertEqual(cl.CountPending(), 0) def testNoMasterFilterNodeName(self): cl = self._FakeClient() def _CheckFilter(qfilter): self.assertEqual(qfilter, [qlang.OP_AND, [qlang.OP_OR] + [[qlang.OP_EQUAL, "name", name] for name in ["node2", "node3"]], [qlang.OP_NOT, [qlang.OP_TRUE, "master"]]]) return True cl.AddQueryResult(["name", "offline", "sip"], _CheckFilter, [ [(constants.RS_NORMAL, "node2.example.com"), (constants.RS_NORMAL, False), (constants.RS_NORMAL, "192.0.2.12")], [(constants.RS_NORMAL, "node3.example.com"), (constants.RS_NORMAL, False), (constants.RS_NORMAL, "192.0.2.13")], ]) self.assertEqual(cli.GetOnlineNodes(["node2", "node3"], cl=cl, secondary_ips=True, filter_master=True), ["192.0.2.12", "192.0.2.13"]) self.assertEqual(cl.CountPending(), 0) def testOfflineNodes(self): cl = self._FakeClient() cl.AddQueryResult(["name", "offline", "sip"], None, [ [(constants.RS_NORMAL, "master.example.com"), (constants.RS_NORMAL, False), (constants.RS_NORMAL, "192.0.2.1")], [(constants.RS_NORMAL, "node2.example.com"), (constants.RS_NORMAL, True), (constants.RS_NORMAL, "192.0.2.2")], [(constants.RS_NORMAL, "node3.example.com"), (constants.RS_NORMAL, True), (constants.RS_NORMAL, "192.0.2.3")], ]) self.assertEqual(cli.GetOnlineNodes(None, cl=cl, nowarn=True), ["master.example.com"]) self.assertEqual(cl.CountPending(), 0) def testNodeGroup(self): cl = self._FakeClient() def _CheckFilter(qfilter): self.assertEqual(qfilter, [qlang.OP_OR, [qlang.OP_EQUAL, "group", "foobar"], [qlang.OP_EQUAL, "group.uuid", "foobar"]]) return True cl.AddQueryResult(["name", "offline", "sip"], _CheckFilter, [ [(constants.RS_NORMAL, "master.example.com"), (constants.RS_NORMAL, False), (constants.RS_NORMAL, "192.0.2.1")], [(constants.RS_NORMAL, "node3.example.com"), (constants.RS_NORMAL, False), (constants.RS_NORMAL, "192.0.2.3")], ]) self.assertEqual(cli.GetOnlineNodes(None, cl=cl, nodegroup="foobar"), ["master.example.com", "node3.example.com"]) self.assertEqual(cl.CountPending(), 0) class TestFormatTimestamp(unittest.TestCase): def testGood(self): self.assertEqual(cli.FormatTimestamp((0, 1)), time.strftime("%F %T", time.localtime(0)) + ".000001") self.assertEqual(cli.FormatTimestamp((1332944009, 17376)), (time.strftime("%F %T", time.localtime(1332944009)) + ".017376")) def testWrong(self): for i in [0, [], {}, "", [1]]: self.assertEqual(cli.FormatTimestamp(i), "?") class TestFormatUsage(unittest.TestCase): def test(self): binary = "gnt-unittest" commands = { "cmdA": (NotImplemented, NotImplemented, NotImplemented, NotImplemented, "description of A"), "bbb": (NotImplemented, NotImplemented, NotImplemented, NotImplemented, "Hello World," * 10), "longname": (NotImplemented, NotImplemented, NotImplemented, NotImplemented, "Another description"), } self.assertEqual(list(cli._FormatUsage(binary, commands)), [ "Usage: gnt-unittest {command} [options...] [argument...]", "gnt-unittest <command> --help to see details, or man gnt-unittest", "", "Commands:", (" bbb - Hello World,Hello World,Hello World,Hello World,Hello" " World,Hello"), " World,Hello World,Hello World,Hello World,Hello World,", " cmdA - description of A", " longname - Another description", "", ]) class TestParseArgs(unittest.TestCase): def testNoArguments(self): for argv in [[], ["gnt-unittest"]]: try: cli._ParseArgs("gnt-unittest", argv, {}, {}, set()) except cli._ShowUsage as err: self.assertTrue(err.exit_error) else: self.fail("Did not raise exception") def testVersion(self): for argv in [["test", "--version"], ["test", "--version", "somethingelse"]]: try: cli._ParseArgs("test", argv, {}, {}, set()) except cli._ShowVersion: pass else: self.fail("Did not raise exception") def testHelp(self): for argv in [["test", "--help"], ["test", "--help", "somethingelse"]]: try: cli._ParseArgs("test", argv, {}, {}, set()) except cli._ShowUsage as err: self.assertFalse(err.exit_error) else: self.fail("Did not raise exception") def testUnknownCommandOrAlias(self): for argv in [["test", "list"], ["test", "somethingelse", "--help"]]: try: cli._ParseArgs("test", argv, {}, {}, set()) except cli._ShowUsage as err: self.assertTrue(err.exit_error) else: self.fail("Did not raise exception") def testInvalidAliasList(self): cmd = { "list": NotImplemented, "foo": NotImplemented, } aliases = { "list": NotImplemented, "foo": NotImplemented, } assert sorted(cmd.keys()) == sorted(aliases.keys()) self.assertRaises(AssertionError, cli._ParseArgs, "test", ["test", "list"], cmd, aliases, set()) def testAliasForNonExistantCommand(self): cmd = {} aliases = { "list": NotImplemented, } self.assertRaises(errors.ProgrammerError, cli._ParseArgs, "test", ["test", "list"], cmd, aliases, set()) class TestQftNames(unittest.TestCase): def testComplete(self): self.assertEqual(frozenset(cli._QFT_NAMES), constants.QFT_ALL) def testUnique(self): lcnames = [s.lower() for s in cli._QFT_NAMES.values()] self.assertFalse(utils.FindDuplicates(lcnames)) def testUppercase(self): for name in cli._QFT_NAMES.values(): self.assertEqual(name[0], name[0].upper()) class TestFieldDescValues(unittest.TestCase): def testKnownKind(self): fdef = objects.QueryFieldDefinition(name="aname", title="Atitle", kind=constants.QFT_TEXT, doc="aaa doc aaa") self.assertEqual(cli._FieldDescValues(fdef), ["aname", "Text", "Atitle", "aaa doc aaa"]) def testUnknownKind(self): kind = "#foo#" self.assertFalse(kind in constants.QFT_ALL) self.assertFalse(kind in cli._QFT_NAMES) fdef = objects.QueryFieldDefinition(name="zname", title="Ztitle", kind=kind, doc="zzz doc zzz") self.assertEqual(cli._FieldDescValues(fdef), ["zname", kind, "Ztitle", "zzz doc zzz"]) class TestSerializeGenericInfo(unittest.TestCase): """Test case for cli._SerializeGenericInfo""" def _RunTest(self, data, expected): buf = StringIO() cli._SerializeGenericInfo(buf, data, 0) self.assertEqual(buf.getvalue(), expected) def testSimple(self): test_samples = [ ("abc", "abc\n"), ([], "\n"), ({}, "\n"), (["1", "2", "3"], "- 1\n- 2\n- 3\n"), ([("z", "26")], "z: 26\n"), ({"z": "26"}, "z: 26\n"), ([("z", "26"), ("a", "1")], "z: 26\na: 1\n"), ({"z": "26", "a": "1"}, "a: 1\nz: 26\n"), ] for (data, expected) in test_samples: self._RunTest(data, expected) def testLists(self): adict = { "aa": "11", "bb": "22", "cc": "33", } adict_exp = ("- aa: 11\n" " bb: 22\n" " cc: 33\n") anobj = [ ("zz", "11"), ("ww", "33"), ("xx", "22"), ] anobj_exp = ("- zz: 11\n" " ww: 33\n" " xx: 22\n") alist = ["aa", "cc", "bb"] alist_exp = ("- - aa\n" " - cc\n" " - bb\n") test_samples = [ (adict, adict_exp), (anobj, anobj_exp), (alist, alist_exp), ] for (base_data, base_expected) in test_samples: for k in range(1, 4): data = k * [base_data] expected = k * base_expected self._RunTest(data, expected) def testDictionaries(self): data = [ ("aaa", ["x", "y"]), ("bbb", { "w": "1", "z": "2", }), ("ccc", [ ("xyz", "123"), ("efg", "456"), ]), ] expected = ("aaa: \n" " - x\n" " - y\n" "bbb: \n" " w: 1\n" " z: 2\n" "ccc: \n" " xyz: 123\n" " efg: 456\n") self._RunTest(data, expected) self._RunTest(dict(data), expected) class TestFormatPolicyInfo(unittest.TestCase): """Test case for cli.FormatPolicyInfo. These tests rely on cli._SerializeGenericInfo (tested elsewhere). """ def setUp(self): # Policies are big, and we want to see the difference in case of an error self.maxDiff = None def _RenameDictItem(self, parsed, old, new): self.assertTrue(old in parsed) self.assertTrue(new not in parsed) parsed[new] = parsed[old] del parsed[old] def _TranslateParsedNames(self, parsed): for (pretty, raw) in [ ("bounds specs", constants.ISPECS_MINMAX), ("allowed disk templates", constants.IPOLICY_DTS) ]: self._RenameDictItem(parsed, pretty, raw) for minmax in parsed[constants.ISPECS_MINMAX]: for key in set(minmax.keys()): keyparts = key.split("/", 1) if len(keyparts) > 1: self._RenameDictItem(minmax, key, keyparts[0]) self.assertTrue(constants.IPOLICY_DTS in parsed) parsed[constants.IPOLICY_DTS] = yaml.load("[%s]" % parsed[constants.IPOLICY_DTS], Loader=yaml.SafeLoader) @staticmethod def _PrintAndParsePolicy(custom, effective, iscluster): formatted = cli.FormatPolicyInfo(custom, effective, iscluster) buf = StringIO() cli._SerializeGenericInfo(buf, formatted, 0) return yaml.load(buf.getvalue(), Loader=yaml.SafeLoader) def _PrintAndCheckParsed(self, policy): parsed = self._PrintAndParsePolicy(policy, NotImplemented, True) self._TranslateParsedNames(parsed) self.assertEqual(parsed, policy) def _CompareClusterGroupItems(self, cluster, group, skip=None): if isinstance(group, dict): self.assertTrue(isinstance(cluster, dict)) if skip is None: skip = frozenset() self.assertEqual(frozenset(cluster).difference(skip), frozenset(group)) for key in group: self._CompareClusterGroupItems(cluster[key], group[key]) elif isinstance(group, list): self.assertTrue(isinstance(cluster, list)) self.assertEqual(len(cluster), len(group)) for (cval, gval) in zip(cluster, group): self._CompareClusterGroupItems(cval, gval) else: self.assertTrue(isinstance(group, str)) self.assertEqual("default (%s)" % cluster, group) def _TestClusterVsGroup(self, policy): cluster = self._PrintAndParsePolicy(policy, NotImplemented, True) group = self._PrintAndParsePolicy({}, policy, False) self._CompareClusterGroupItems(cluster, group, ["std"]) def testWithDefaults(self): self._PrintAndCheckParsed(constants.IPOLICY_DEFAULTS) self._TestClusterVsGroup(constants.IPOLICY_DEFAULTS) class TestCreateIPolicyFromOpts(unittest.TestCase): """Test case for cli.CreateIPolicyFromOpts.""" def setUp(self): # Policies are big, and we want to see the difference in case of an error self.maxDiff = None def _RecursiveCheckMergedDicts(self, default_pol, diff_pol, merged_pol, merge_minmax=False): self.assertTrue(type(default_pol) is dict) self.assertTrue(type(diff_pol) is dict) self.assertTrue(type(merged_pol) is dict) self.assertEqual(frozenset(default_pol), frozenset(merged_pol)) for (key, val) in merged_pol.items(): if key in diff_pol: if type(val) is dict: self._RecursiveCheckMergedDicts(default_pol[key], diff_pol[key], val) elif (merge_minmax and key == "minmax" and type(val) is list and len(val) == 1): self.assertEqual(len(default_pol[key]), 1) self.assertEqual(len(diff_pol[key]), 1) self._RecursiveCheckMergedDicts(default_pol[key][0], diff_pol[key][0], val[0]) else: self.assertEqual(val, diff_pol[key]) else: self.assertEqual(val, default_pol[key]) def testClusterPolicy(self): pol0 = cli.CreateIPolicyFromOpts( ispecs_mem_size={}, ispecs_cpu_count={}, ispecs_disk_count={}, ispecs_disk_size={}, ispecs_nic_count={}, ipolicy_disk_templates=None, ipolicy_vcpu_ratio=None, ipolicy_spindle_ratio=None, fill_all=True ) self.assertEqual(pol0, constants.IPOLICY_DEFAULTS) exp_pol1 = { constants.ISPECS_MINMAX: [ { constants.ISPECS_MIN: { constants.ISPEC_CPU_COUNT: 2, constants.ISPEC_DISK_COUNT: 1, }, constants.ISPECS_MAX: { constants.ISPEC_MEM_SIZE: 12*1024, constants.ISPEC_DISK_COUNT: 2, }, }, ], constants.ISPECS_STD: { constants.ISPEC_CPU_COUNT: 2, constants.ISPEC_DISK_COUNT: 2, }, constants.IPOLICY_VCPU_RATIO: 3.1, } pol1 = cli.CreateIPolicyFromOpts( ispecs_mem_size={"max": "12g"}, ispecs_cpu_count={"min": 2, "std": 2}, ispecs_disk_count={"min": 1, "max": 2, "std": 2}, ispecs_disk_size={}, ispecs_nic_count={}, ipolicy_disk_templates=None, ipolicy_vcpu_ratio=3.1, ipolicy_spindle_ratio=None, fill_all=True ) self._RecursiveCheckMergedDicts(constants.IPOLICY_DEFAULTS, exp_pol1, pol1, merge_minmax=True) exp_pol2 = { constants.ISPECS_MINMAX: [ { constants.ISPECS_MIN: { constants.ISPEC_DISK_SIZE: 512, constants.ISPEC_NIC_COUNT: 2, }, constants.ISPECS_MAX: { constants.ISPEC_NIC_COUNT: 3, }, }, ], constants.ISPECS_STD: { constants.ISPEC_CPU_COUNT: 2, constants.ISPEC_NIC_COUNT: 3, }, constants.IPOLICY_SPINDLE_RATIO: 1.3, constants.IPOLICY_DTS: ["templates"], } pol2 = cli.CreateIPolicyFromOpts( ispecs_mem_size={}, ispecs_cpu_count={"std": 2}, ispecs_disk_count={}, ispecs_disk_size={"min": "0.5g"}, ispecs_nic_count={"min": 2, "max": 3, "std": 3}, ipolicy_disk_templates=["templates"], ipolicy_vcpu_ratio=None, ipolicy_spindle_ratio=1.3, fill_all=True ) self._RecursiveCheckMergedDicts(constants.IPOLICY_DEFAULTS, exp_pol2, pol2, merge_minmax=True) for fill_all in [False, True]: exp_pol3 = { constants.ISPECS_STD: { constants.ISPEC_CPU_COUNT: 2, constants.ISPEC_NIC_COUNT: 3, }, } pol3 = cli.CreateIPolicyFromOpts( std_ispecs={ constants.ISPEC_CPU_COUNT: "2", constants.ISPEC_NIC_COUNT: "3", }, ipolicy_disk_templates=None, ipolicy_vcpu_ratio=None, ipolicy_spindle_ratio=None, fill_all=fill_all ) if fill_all: self._RecursiveCheckMergedDicts(constants.IPOLICY_DEFAULTS, exp_pol3, pol3, merge_minmax=True) else: self.assertEqual(pol3, exp_pol3) def testPartialPolicy(self): exp_pol0 = objects.MakeEmptyIPolicy() pol0 = cli.CreateIPolicyFromOpts( minmax_ispecs=None, std_ispecs=None, ipolicy_disk_templates=None, ipolicy_vcpu_ratio=None, ipolicy_spindle_ratio=None, fill_all=False ) self.assertEqual(pol0, exp_pol0) exp_pol1 = { constants.IPOLICY_VCPU_RATIO: 3.1, } pol1 = cli.CreateIPolicyFromOpts( minmax_ispecs=None, std_ispecs=None, ipolicy_disk_templates=None, ipolicy_vcpu_ratio=3.1, ipolicy_spindle_ratio=None, fill_all=False ) self.assertEqual(pol1, exp_pol1) exp_pol2 = { constants.IPOLICY_SPINDLE_RATIO: 1.3, constants.IPOLICY_DTS: ["templates"], } pol2 = cli.CreateIPolicyFromOpts( minmax_ispecs=None, std_ispecs=None, ipolicy_disk_templates=["templates"], ipolicy_vcpu_ratio=None, ipolicy_spindle_ratio=1.3, fill_all=False ) self.assertEqual(pol2, exp_pol2) def _TestInvalidISpecs(self, minmax_ispecs, std_ispecs, fail=True): for fill_all in [False, True]: if fail: self.assertRaises((errors.OpPrereqError, errors.UnitParseError, errors.TypeEnforcementError), cli.CreateIPolicyFromOpts, minmax_ispecs=minmax_ispecs, std_ispecs=std_ispecs, fill_all=fill_all) else: cli.CreateIPolicyFromOpts(minmax_ispecs=minmax_ispecs, std_ispecs=std_ispecs, fill_all=fill_all) def testInvalidPolicies(self): self.assertRaises(AssertionError, cli.CreateIPolicyFromOpts, std_ispecs={constants.ISPEC_MEM_SIZE: 1024}, ipolicy_disk_templates=None, ipolicy_vcpu_ratio=None, ipolicy_spindle_ratio=None, group_ipolicy=True) self.assertRaises(errors.OpPrereqError, cli.CreateIPolicyFromOpts, ispecs_mem_size={"wrong": "x"}, ispecs_cpu_count={}, ispecs_disk_count={}, ispecs_disk_size={}, ispecs_nic_count={}, ipolicy_disk_templates=None, ipolicy_vcpu_ratio=None, ipolicy_spindle_ratio=None, fill_all=True) self.assertRaises(errors.TypeEnforcementError, cli.CreateIPolicyFromOpts, ispecs_mem_size={}, ispecs_cpu_count={"min": "default"}, ispecs_disk_count={}, ispecs_disk_size={}, ispecs_nic_count={}, ipolicy_disk_templates=None, ipolicy_vcpu_ratio=None, ipolicy_spindle_ratio=None, fill_all=True) good_mmspecs = [ constants.ISPECS_MINMAX_DEFAULTS, constants.ISPECS_MINMAX_DEFAULTS, ] self._TestInvalidISpecs(good_mmspecs, None, fail=False) broken_mmspecs = copy.deepcopy(good_mmspecs) for minmaxpair in broken_mmspecs: for key in constants.ISPECS_MINMAX_KEYS: for par in constants.ISPECS_PARAMETERS: old = minmaxpair[key][par] del minmaxpair[key][par] self._TestInvalidISpecs(broken_mmspecs, None) minmaxpair[key][par] = "invalid" self._TestInvalidISpecs(broken_mmspecs, None) minmaxpair[key][par] = old minmaxpair[key]["invalid_key"] = None self._TestInvalidISpecs(broken_mmspecs, None) del minmaxpair[key]["invalid_key"] minmaxpair["invalid_key"] = None self._TestInvalidISpecs(broken_mmspecs, None) del minmaxpair["invalid_key"] assert broken_mmspecs == good_mmspecs good_stdspecs = constants.IPOLICY_DEFAULTS[constants.ISPECS_STD] self._TestInvalidISpecs(None, good_stdspecs, fail=False) broken_stdspecs = copy.deepcopy(good_stdspecs) for par in constants.ISPECS_PARAMETERS: old = broken_stdspecs[par] broken_stdspecs[par] = "invalid" self._TestInvalidISpecs(None, broken_stdspecs) broken_stdspecs[par] = old broken_stdspecs["invalid_key"] = None self._TestInvalidISpecs(None, broken_stdspecs) del broken_stdspecs["invalid_key"] assert broken_stdspecs == good_stdspecs def testAllowedValues(self): allowedv = "blah" exp_pol1 = { constants.ISPECS_MINMAX: allowedv, constants.IPOLICY_DTS: allowedv, constants.IPOLICY_VCPU_RATIO: allowedv, constants.IPOLICY_SPINDLE_RATIO: allowedv, } pol1 = cli.CreateIPolicyFromOpts(minmax_ispecs=[{allowedv: {}}], std_ispecs=None, ipolicy_disk_templates=allowedv, ipolicy_vcpu_ratio=allowedv, ipolicy_spindle_ratio=allowedv, allowed_values=[allowedv]) self.assertEqual(pol1, exp_pol1) @staticmethod def _ConvertSpecToStrings(spec): ret = {} for (par, val) in spec.items(): ret[par] = str(val) return ret def _CheckNewStyleSpecsCall(self, exp_ipolicy, minmax_ispecs, std_ispecs, group_ipolicy, fill_all): ipolicy = cli.CreateIPolicyFromOpts(minmax_ispecs=minmax_ispecs, std_ispecs=std_ispecs, group_ipolicy=group_ipolicy, fill_all=fill_all) self.assertEqual(ipolicy, exp_ipolicy) def _TestFullISpecsInner(self, skel_exp_ipol, exp_minmax, exp_std, group_ipolicy, fill_all): exp_ipol = skel_exp_ipol.copy() if exp_minmax is not None: minmax_ispecs = [] for exp_mm_pair in exp_minmax: mmpair = {} for (key, spec) in exp_mm_pair.items(): mmpair[key] = self._ConvertSpecToStrings(spec) minmax_ispecs.append(mmpair) exp_ipol[constants.ISPECS_MINMAX] = exp_minmax else: minmax_ispecs = None if exp_std is not None: std_ispecs = self._ConvertSpecToStrings(exp_std) exp_ipol[constants.ISPECS_STD] = exp_std else: std_ispecs = None self._CheckNewStyleSpecsCall(exp_ipol, minmax_ispecs, std_ispecs, group_ipolicy, fill_all) if minmax_ispecs: for mmpair in minmax_ispecs: for (key, spec) in mmpair.items(): for par in [constants.ISPEC_MEM_SIZE, constants.ISPEC_DISK_SIZE]: if par in spec: spec[par] += "m" self._CheckNewStyleSpecsCall(exp_ipol, minmax_ispecs, std_ispecs, group_ipolicy, fill_all) if std_ispecs: for par in [constants.ISPEC_MEM_SIZE, constants.ISPEC_DISK_SIZE]: if par in std_ispecs: std_ispecs[par] += "m" self._CheckNewStyleSpecsCall(exp_ipol, minmax_ispecs, std_ispecs, group_ipolicy, fill_all) def testFullISpecs(self): exp_minmax1 = [ { constants.ISPECS_MIN: { constants.ISPEC_MEM_SIZE: 512, constants.ISPEC_CPU_COUNT: 2, constants.ISPEC_DISK_COUNT: 2, constants.ISPEC_DISK_SIZE: 512, constants.ISPEC_NIC_COUNT: 2, constants.ISPEC_SPINDLE_USE: 2, }, constants.ISPECS_MAX: { constants.ISPEC_MEM_SIZE: 768*1024, constants.ISPEC_CPU_COUNT: 7, constants.ISPEC_DISK_COUNT: 6, constants.ISPEC_DISK_SIZE: 2048*1024, constants.ISPEC_NIC_COUNT: 3, constants.ISPEC_SPINDLE_USE: 3, }, }, ] exp_minmax2 = [ { constants.ISPECS_MIN: { constants.ISPEC_MEM_SIZE: 512, constants.ISPEC_CPU_COUNT: 2, constants.ISPEC_DISK_COUNT: 2, constants.ISPEC_DISK_SIZE: 512, constants.ISPEC_NIC_COUNT: 2, constants.ISPEC_SPINDLE_USE: 2, }, constants.ISPECS_MAX: { constants.ISPEC_MEM_SIZE: 768*1024, constants.ISPEC_CPU_COUNT: 7, constants.ISPEC_DISK_COUNT: 6, constants.ISPEC_DISK_SIZE: 2048*1024, constants.ISPEC_NIC_COUNT: 3, constants.ISPEC_SPINDLE_USE: 3, }, }, { constants.ISPECS_MIN: { constants.ISPEC_MEM_SIZE: 1024*1024, constants.ISPEC_CPU_COUNT: 3, constants.ISPEC_DISK_COUNT: 3, constants.ISPEC_DISK_SIZE: 256, constants.ISPEC_NIC_COUNT: 4, constants.ISPEC_SPINDLE_USE: 5, }, constants.ISPECS_MAX: { constants.ISPEC_MEM_SIZE: 2048*1024, constants.ISPEC_CPU_COUNT: 5, constants.ISPEC_DISK_COUNT: 5, constants.ISPEC_DISK_SIZE: 1024*1024, constants.ISPEC_NIC_COUNT: 5, constants.ISPEC_SPINDLE_USE: 7, }, }, ] exp_std1 = { constants.ISPEC_MEM_SIZE: 768*1024, constants.ISPEC_CPU_COUNT: 7, constants.ISPEC_DISK_COUNT: 6, constants.ISPEC_DISK_SIZE: 2048*1024, constants.ISPEC_NIC_COUNT: 3, constants.ISPEC_SPINDLE_USE: 1, } for fill_all in [False, True]: if fill_all: skel_ipolicy = constants.IPOLICY_DEFAULTS else: skel_ipolicy = {} self._TestFullISpecsInner(skel_ipolicy, None, exp_std1, False, fill_all) for exp_minmax in [exp_minmax1, exp_minmax2]: self._TestFullISpecsInner(skel_ipolicy, exp_minmax, exp_std1, False, fill_all) self._TestFullISpecsInner(skel_ipolicy, exp_minmax, None, False, fill_all) class TestPrintIPolicyCommand(unittest.TestCase): """Test case for cli.PrintIPolicyCommand""" _SPECS1 = { "par1": 42, "par2": "xyz", } _SPECS1_STR = "par1=42,par2=xyz" _SPECS2 = { "param": 10, "another_param": 101, } _SPECS2_STR = "another_param=101,param=10" _SPECS3 = { "par1": 1024, "param": "abc", } _SPECS3_STR = "par1=1024,param=abc" def _CheckPrintIPolicyCommand(self, ipolicy, isgroup, expected): buf = StringIO() cli.PrintIPolicyCommand(buf, ipolicy, isgroup) self.assertEqual(buf.getvalue(), expected) def testIgnoreStdForGroup(self): self._CheckPrintIPolicyCommand({"std": self._SPECS1}, True, "") def testIgnoreEmpty(self): policies = [ {}, {"std": {}}, {"minmax": []}, {"minmax": [{}]}, {"minmax": [{ "min": {}, "max": {}, }]}, {"minmax": [{ "min": self._SPECS1, "max": {}, }]}, ] for pol in policies: self._CheckPrintIPolicyCommand(pol, False, "") def testFullPolicies(self): cases = [ ({"std": self._SPECS1}, " %s %s" % (cli.IPOLICY_STD_SPECS_STR, self._SPECS1_STR)), ({"minmax": [{ "min": self._SPECS1, "max": self._SPECS2, }]}, " %s min:%s/max:%s" % (cli.IPOLICY_BOUNDS_SPECS_STR, self._SPECS1_STR, self._SPECS2_STR)), ({"minmax": [ { "min": self._SPECS1, "max": self._SPECS2, }, { "min": self._SPECS2, "max": self._SPECS3, }, ]}, " %s min:%s/max:%s//min:%s/max:%s" % (cli.IPOLICY_BOUNDS_SPECS_STR, self._SPECS1_STR, self._SPECS2_STR, self._SPECS2_STR, self._SPECS3_STR)), ] for (pol, exp) in cases: self._CheckPrintIPolicyCommand(pol, False, exp) if __name__ == "__main__": testutils.GanetiTestProgram()
9d86a480233066eefa6e5831ec7fd77c674c5ea3
7401a59db9b626486fce50f77dc9ae846bb131ce
/setup.py
f29ba5c1f0c992853c851b6d76280bb391164615
[ "BSD-3-Clause" ]
permissive
dicompyler/dicompyler-core
1891193896ae8536b2b74ce1402dbeca627081de
e6dd12d6e138e2c39a544f9d7adf6d124c979144
refs/heads/master
2023-08-07T23:14:35.428123
2023-06-13T01:21:38
2023-06-13T01:21:38
51,550,203
129
75
NOASSERTION
2023-08-02T01:42:07
2016-02-11T22:00:12
Python
UTF-8
Python
false
false
2,180
py
setup.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # __init__.py """setup.py for dicompyler-core.""" # Copyright (c) 2009-2018 Aditya Panchal # This file is part of dicompyler-core, relased under a BSD license. # See the file license.txt included with this distribution, also # available at https://github.com/dicompyler/dicompyler-core/ from setuptools import setup with open('README.rst') as readme_file: readme = readme_file.read() test_requirements = [ # TODO: put package test requirements here ] setup( name='dicompyler-core', version='0.5.7', description="A library of core radiation therapy modules for DICOM / " + "DICOM RT used by dicompyler", long_description=readme, author="Aditya Panchal", author_email='apanchal@bastula.org', url='https://github.com/dicompyler/dicompyler-core', packages=[ 'dicompylercore', ], package_dir={'dicompylercore': 'dicompylercore'}, include_package_data=True, install_requires=[ "numpy>=1.2", "six>=1.5", "pydicom>=0.9.9", "matplotlib>=1.3.0" ], extras_require={ 'image': ["pillow>=1.0"], 'dvhinterpolation': ["scikit-image"], 'volume': ["shapely>=1.6"], 'doseinterpolation': ["scipy"] }, license="BSD License", zip_safe=False, keywords=[ 'dicompyler-core', 'dicompylercore', 'dicompyler' ], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Healthcare Industry', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Topic :: Scientific/Engineering :: Medical Science Apps.', 'Topic :: Scientific/Engineering :: Physics' ], test_suite='tests', tests_require=test_requirements )
0d5175d1efcde2a730054a77260ece7adfc33e1b
812045c3ec6587827aeb18bde666237dfffc21ae
/tf_quant_finance/datetime/schedules.py
d805e42b50b1dae44167ba953a59d7f8f40152b8
[ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-unknown-license-reference", "BSD-3-Clause" ]
permissive
google/tf-quant-finance
2062082c85e8679b71e69bbeb579fe338c1b0288
0d3a2193c0f2d320b65e602cf01d7a617da484df
refs/heads/master
2023-08-31T01:58:15.415811
2023-08-15T07:37:46
2023-08-15T07:38:22
198,669,252
4,165
557
Apache-2.0
2023-08-04T19:25:55
2019-07-24T16:09:50
Python
UTF-8
Python
false
false
18,652
py
schedules.py
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for creating schedules.""" import tensorflow.compat.v2 as tf from tf_quant_finance.datetime import constants from tf_quant_finance.datetime import date_tensor _MIN_DAYS_IN_PERIOD = { constants.PeriodType.DAY: 1, constants.PeriodType.WEEK: 7, constants.PeriodType.MONTH: 28, constants.PeriodType.YEAR: 365 } class PeriodicSchedule: """Defines an array of dates specified by a regular schedule.""" def __init__(self, *, start_date, end_date, tenor, holiday_calendar=None, roll_convention=constants.BusinessDayConvention.NONE, backward=False, end_of_month=False): """Initializes the schedule. Initializes a schedule with a given tenor, date range and holiday calendar. A schedule is an increasing sequence of dates at a regular interval subject to holiday adjustments. The rules for schedule generation (accessed via the `dates()` method) are as follows. (a) If `backward=False`, take `start_date` and add `tenor` multiplied by 0, 1, 2, etc. until the resulting date is greater than `end_date`. (b) If `backward=True`, take `end_date` and subtract `tenor` multiplied by 0, 1, 2, etc. until the resulting date is smaller than `start_date`. Ensure that the result is in ascending order. (c) Both `start_date` and `end_date` are included, even if the distance between then is not an integer number of tenor periods. (d) If `holiday_calendar` is specified, roll all the dates according to `roll_convention`. The rolling includes `start_date` and `end_date` when they are part of resulting schedule. Thus if `start_date` or `end_date` fall on holidays, they will change and may go out of the [`start_date`, `end_date`] interval. Note that `tenor = PeriodType.DAY` is treated as an actual day, not as a business day. So a schedule with `tenor = days(7)` is the same as one with `tenor = week()`. The `dates()` can create multiple schedules simultaneously. The start and end dates may have any (compatible) shape. The `DateTensor` returned by `dates()` has the shape `start_date.shape + (n,)`, where `n` is the maximum length of schedules in the batch. If schedules have different lengths, the extra elements will be padded with extra `end_date` elements at the end, if `backward=False` and with extra `start_date` elements in the beginning if `backward=True`. In all cases each schedule in the batch is monotonic. The following examples demonstrate the batch and non-batch usage. #### Example Usage (Non-batch) ```python start_date = datetime.dates_from_tuples([(2020, 1, 18)]) end_date = datetime.dates_from_tuples([(2021, 3, 25)]) tenor = datetime.months(3) backward = False holiday_calendar = dates.HolidayCalendar(start_year=2020, end_year=2021) roll_convention = dates.BusinessDayConvention.FOLLOWING schedule = datetime.PeriodicSchedule( start_date=start_date, end_date=end_date, tenor=tenor, holiday_calendar=holiday_calendar, roll_convention=datetime.BusinessDayConvention.FOLLOWING, backward=backward).dates() # schedule is a DateTensor of # [[(2020, 1, 18), (2020, 4, 20), (2020, 7, 20), (2020, 10, 19), # (2021, 1, 18), (2021, 3, 25)]] for backward = False and # [[(2020, 1, 18), (2020, 3, 25), (2020, 6, 25), (2020, 9, 25), # (2020, 12, 25), (2021, 3, 25)]] for backward = True. ``` The following example demonstrates this batching property. #### Example Usage (Batch) ```python start_date = datetime.dates_from_tuples([(2020, 1, 15), (2020, 4, 15)]) end_date = datetime.dates_from_tuples([(2021, 3, 31), (2021, 1, 1)]) tenor = datetime.months([4, 3]) schedule = datetime.PeriodicSchedule( start_dates, end_dates, tenors, dates.HolidayCalendar(start_year=2020, end_year=2021), roll_convention=datetime.BusinessDayConvention.FOLLOWING, backward=False).dates() # Returns DateTensor of # [[(2020, 1, 15), (2020, 5, 15), (2020, 9, 15), (2021, 1, 15), # (2021, 3, 31)], # [(2020, 4, 15), (2020, 7, 15), (2020, 10, 15), (2021, 1, 1), # (2021, 1, 1)]]. ``` Args: start_date: `DateTensor`. Defines the lower boundary of schedule. If `backward=True` must be broadcastable to `end_date`, otherwise has arbitrary shape. end_date: `DateTensor`. Defines the upper boundary of the schedule. If `backward=False` must be broadcastable to `start_date`, otherwise has arbitrary shape. tenor: `PeriodTensor`. Defines the frequency of the schedule. Must be broadcastable to `start_date` if `backward=False`, and to `end_date` if `backward=True`. holiday_calendar: `dates.HolidayCalendar`. If `None`, the dates in the schedule will not be rolled to business days. roll_convention: BusinessDayConvention. Defines how dates in the schedule should be rolled to business days if they fall on holidays. Ignored if `holiday_calendar = None`. Default value: BusinessDayConvention.NONE (i.e. no rolling). backward: Python `bool`. Whether to build the schedule from the `start_date` moving forwards or from the `end_date` and moving backwards. end_of_month: Python `bool`. If `True`, shifts all dates in schedule to the ends of corresponding months, if `start_date` or `end_date` ( depending on `backward`) is at the end of a month. The shift is applied before applying `roll_convention`. In the batched case, only those schedules in a batch, whose corresponding `start_date` (or `end_date`) are at ends of months, will be shifted. """ if end_of_month and tenor.period_type() not in [constants.PeriodType.MONTH, constants.PeriodType.YEAR]: raise ValueError( "end_of_month may only be used with tenors of PeriodType.MONTH or " "PeriodType.YEAR" ) self._start_date = start_date self._end_date = end_date self._tenor = tenor self._holiday_calendar = holiday_calendar self._roll_convention = roll_convention self._backward = backward self._end_of_month = end_of_month def dates(self): """Returns the dates as computed from the schedule as a DateTensor. Constructs the date schedule from the supplied data. For more details see the initializer docstring. Returns: `DateTensor` of rank one more than `start_date` or `end_date` (depending on `backwards`), representing schedules for each element of the input. """ return _gen_periodic_schedule( self._start_date, self._end_date, self._tenor, holiday_calendar=self._holiday_calendar, roll_convention=self._roll_convention, backward=self._backward, end_of_month=self._end_of_month) @property def start_date(self): return self._start_date @property def end_date(self): return self._end_date @property def tenor(self): return self._tenor @property def holiday_calendar(self): return self._holiday_calendar @property def roll_convention(self): return self._roll_convention @property def generate_backwards(self): """Returns whether the schedule is generated from the end date.""" return self._backward @property def end_of_month(self): return self._end_of_month class BusinessDaySchedule: """Generates schedules containing every business day in a period.""" def __init__(self, *, start_date, end_date, holiday_calendar, backward=False): """Initializes the schedule. Initializes a schedule with a given date range and holiday calendar. The schedule includes all business days between and including `start_date` and `end_date`. Can create multiple schedules simultaneously. The start and end dates may have any (compatible) shape. The `DateTensor` returned by `dates()` has the shape `start_date.shape + (n,)`, where `n` is the maximum length of schedules in the batch. If schedules have different lengths, the extra elements will be padded with extra `end_date` elements at the end, if `backward=False` and with extra `start_date` elements in the beginning if `backward=True`. In all cases each schedule in the batch is monotonic. #### Example Usage (Non-batch) ```python start_date = datetime.dates_from_tuples([(2020, 3, 19)]) end_date = datetime.dates_from_tuples([(2021, 3, 25)]) holiday_calendar = datetime.HolidayCalendar(start_year=2020, end_year=2021) schedule = datetime.BusinessDaysSchedule( start_date=start_date, end_date=end_date, holiday_calendar=holiday_calendar, roll_convention=datetime.BusinessDayConvention.FOLLOWING, backward=False).dates() # schedule is a DateTensor of # [[(2020, 3, 19), (2020, 3, 20), (2020, 3, 23), (2020, 3, 24), # (2021, 3, 25)]] regardless of `backward`. ``` #### Example Usage (Batch) ```python start_date = datetime.dates_from_tuples([(2020, 3, 19), (2020, 4, 15)]) end_date = datetime.dates_from_tuples([(2021, 3, 13), (2021, 3, 17)]) schedule = datetime.BusinessDaysSchedule( start_dates, end_dates, datetime.HolidayCalendar(start_year=2020, end_year=2021), backward=False).dates() # Returns DateTensor of # [[(2020, 3, 19), (2020, 3, 20), (2020, 3, 23), (2020, 3, 24), # (2021, 3, 25)], # [(2020, 3, 13), (2020, 3, 16), (2020, 3, 17), (2020, 3, 17), # (2021, 3, 17)]], if `backward` is True. # [[(2020, 3, 19), (2020, 3, 20), (2020, 3, 23), (2020, 3, 24), # (2021, 3, 25)], # [(2020, 3, 13), (2020, 3, 13), (2020, 3, 13), (2020, 3, 16), # (2021, 3, 17)]], if `backward` is True. ``` Args: start_date: `dates.DateTensor`. Defines the lower boundary of schedule. If `backward=True` must be broadcastable to `end_date`, otherwise has arbitrary shape. end_date: `dates.DateTensor`. Defines the upper boundary of the schedule. If `backward=False` must be broadcastable to `start_date`, otherwise has arbitrary shape. holiday_calendar: `dates.HolidayCalendar` that defines which days will be included. backward: Python `bool`. Defines the way padding is applied in case of batching. If schedules in a batch have different lengths, the extra elements will be padded with extra `end_date` elements at the end, if `backward=False` and with extra `start_date` elements in the beginning if `backward=True`. """ self._start_date = start_date self._end_date = end_date self._holiday_calendar = holiday_calendar self._backward = backward def dates(self): """Returns the dates as computed from the schedule as a DateTensor. Constructs the date schedule from the supplied data. For more details see the initializer docstring. Returns: `DateTensor` of rank one more than `start_date` or `end_date` (depending on `backwards`), representing schedules for each element of the input. """ return _gen_business_days(self._start_date, self._end_date, self._holiday_calendar, self._backward) @property def holiday_calendar(self): return self._holiday_calendar @property def start_date(self): return self._start_date @property def end_date(self): return self._end_date @property def generate_backwards(self): return self._backward def _gen_periodic_schedule(start_date, end_date, tenor, holiday_calendar=None, roll_convention=constants.BusinessDayConvention.NONE, backward=False, end_of_month=False): """Generates a periodic schedule, see PeriodicSchedule.""" # Validate inputs. control_deps = [ tf.debugging.assert_greater_equal( end_date.ordinal(), start_date.ordinal(), message="End date must be >= to start date."), tf.debugging.assert_positive( tenor.quantity(), message="Tenor quantity must be positive.") ] with tf.compat.v1.control_dependencies(control_deps): # Reshape the input Tensors. if backward: start_date = start_date.broadcast_to(tf.shape(end_date.ordinal())) tenor = tenor.broadcast_to(tf.shape(end_date.ordinal())) else: end_date = end_date.broadcast_to(tf.shape(start_date.ordinal())) tenor = tenor.broadcast_to(tf.shape(start_date.ordinal())) start_date = start_date.expand_dims(axis=-1) end_date = end_date.expand_dims(axis=-1) tenor = tenor.expand_dims(axis=-1) # Figure out the upper bound of the schedule length. min_days_in_period = _MIN_DAYS_IN_PERIOD[tenor.period_type()] days_between = end_date.ordinal() - start_date.ordinal() + 1 schedule_len_upper_bound = tf.cast( tf.math.ceil(tf.math.reduce_max( days_between / (tenor.quantity() * min_days_in_period))), dtype=tf.int32) # Add the periods. if backward: # Subtract tenor * n, where n = n_max, ..., 2, 1, 0. tenors_expanded = tenor * tf.range(schedule_len_upper_bound - 1, -1, -1, dtype=tf.int32) schedules = end_date - tenors_expanded # Prepend start_date to ensure we always include it. schedules = date_tensor.DateTensor.concat((start_date, schedules), axis=-1) in_bounds = schedules.ordinal() >= start_date.ordinal() # Pad with start_date. schedules = date_tensor.DateTensor.where(in_bounds, schedules, start_date) # Find how much we overestimated max schedule length and trim the extras. not_start_date = tf.math.not_equal(schedules.ordinal(), start_date.ordinal()) max_schedule_len_error = ( tf.math.reduce_min(tf.where(not_start_date)[..., -1]) - 1) schedules = schedules[..., max_schedule_len_error:] else: # Add tenor * n, where n = 0, 1, 2, ..., n_max. tenors_expanded = tenor * tf.range(schedule_len_upper_bound, dtype=tf.int32) schedules = start_date + tenors_expanded # Append end_date to ensure we always include it. schedules = date_tensor.DateTensor.concat((schedules, end_date), axis=-1) in_bounds = schedules.ordinal() <= end_date.ordinal() # Pad with end_date. schedules = date_tensor.DateTensor.where(in_bounds, schedules, end_date) # Find the actual schedule length and trim the extras. not_end_date = tf.math.not_equal(schedules.ordinal(), end_date.ordinal()) max_schedule_len = tf.math.reduce_max(tf.where(not_end_date)[..., -1]) + 2 schedules = schedules[..., :max_schedule_len] # Move to the end of month where necessary. if end_of_month: where_cond = (end_date if backward else start_date).is_end_of_month() schedules = date_tensor.DateTensor.where(where_cond, schedules.to_end_of_month(), schedules) # Roll to business days. if holiday_calendar is not None: schedules = holiday_calendar.roll_to_business_day(schedules, roll_convention) return schedules def _gen_business_days(start_date, end_date, holiday_calendar, backward=False): """Generates business days between given dates, see BusinessDaySchedule.""" # Handle the case when start_date or end_date fall on holidays. start_date = holiday_calendar.roll_to_business_day( start_date, roll_convention=constants.BusinessDayConvention.FOLLOWING) end_date = holiday_calendar.roll_to_business_day( end_date, roll_convention=constants.BusinessDayConvention.PRECEDING) # Validate inputs. control_deps = [ tf.debugging.assert_greater_equal( end_date.ordinal(), start_date.ordinal(), message="End date must be >= Start date."), ] with tf.compat.v1.control_dependencies(control_deps): # Reshape the input Tensors. if backward: start_date = start_date.broadcast_to(tf.shape(end_date.ordinal())) else: end_date = end_date.broadcast_to(tf.shape(start_date.ordinal())) start_date = start_date.expand_dims(axis=-1) end_date = end_date.expand_dims(axis=-1) # Find the longest schedule in the batch. max_len = tf.math.abs(tf.math.reduce_max( holiday_calendar.business_days_between(start_date, end_date))) + 1 if backward: # Subtract n days, where n = max_len-1, ..., 2, 1, 0. days = tf.range(-max_len + 1, 1, dtype=tf.int32) schedules = holiday_calendar.add_business_days(end_date, days) in_bounds = schedules.ordinal() >= start_date.ordinal() # Pad with start_date. schedules = date_tensor.DateTensor.where(in_bounds, schedules, start_date) else: # Add n days, where n = 0, 1, 2, ..., max_len-1. days = tf.range(max_len, dtype=tf.int32) schedules = holiday_calendar.add_business_days(start_date, days) in_bounds = schedules.ordinal() <= end_date.ordinal() # Pad with end_date. schedules = date_tensor.DateTensor.where(in_bounds, schedules, end_date) return schedules
669ad9a41b939b68bcdfd9f931a700671358d5ca
6cd1a085c8113030f14a8c992ecd2b45db70c4e2
/scripts/create_prime_factors_database.py
00c96f10e82f781848f1817c95d4cd85a5af9302
[ "MIT" ]
permissive
mhostetter/galois
a72e6339ac3bba2fda31e5c9f1228ebc85d5ebe7
a140a468fa1f7619f94ad2551f9c14e684ee3a34
refs/heads/master
2023-07-27T17:02:04.925608
2023-05-09T19:39:50
2023-05-09T22:37:26
312,901,841
186
25
MIT
2023-09-10T14:56:32
2020-11-14T21:05:20
Python
UTF-8
Python
false
false
22,092
py
create_prime_factors_database.py
""" A script to create a database of prime factorizations of p^n +/- 1 using the Cunningham Project's tables. https://homes.cerias.purdue.edu/~ssw/cun/ """ from __future__ import annotations import os import re import sqlite3 from pathlib import Path import requests import galois def main(): """ The main routine to create a database of prime factorizations of b^n +/- 1. """ primes = create_prime_lut() composites = create_composite_lut() database_file = Path(__file__).parent.parent / "src" / "galois" / "_databases" / "prime_factors.db" conn, cursor = create_database(database_file) add_main_tables(conn, cursor, primes, composites) for base in [2, 3, 5, 6, 7, 10, 11, 12]: create_even_negative_offset_table(conn, cursor, base) conn.close() def create_prime_lut() -> dict[tuple[str, str], int]: """ Creates a dictionary of special prime numbers. The key is (label, name). For example, ("2,471+", "P21"). """ primes = process_appendix_a() extra_primes = process_other_primes() # There are duplicate entries in these tables and they contain different primes! So we're saving both # and figuring out which one is correct at runtime. for key, value in extra_primes.items(): if key in primes: primes[key] = [primes[key], value] else: primes[key] = value return primes def process_appendix_a() -> dict[tuple[str, str], int]: """ Process Appendix A that contains large primes, e.g. P52. """ url = "https://homes.cerias.purdue.edu/~ssw/cun/third/appa901" text = requests.get(url).text # Make multi-line entries single-line text = re.sub("\n ", "", text) primes = {} for line in text.splitlines()[1:]: if line.startswith(" There"): continue bits = int(line[0:4].strip()) label = line[4:14].strip() assert line[14:19] == "P " prime_str = line[19:].replace(" ", "") assert len(prime_str) == bits primes[(label, f"P{bits}")] = int(prime_str) return primes def process_other_primes() -> dict[tuple[str, str], int]: """ Process all primes in the main tables (except those already in Appendix A). """ text = requests.get("https://homes.cerias.purdue.edu/~ssw/cun/third/mainprimes").text primes = {} for line in text.splitlines()[1:]: bits = int(line[0:4].strip()) label = line[4:18].strip() prime_str = line[18:].replace(" ", "") assert len(prime_str) == bits primes[(label, f"P{bits}")] = int(prime_str) return primes def create_composite_lut() -> dict[tuple[str, str], int]: """ Creates a dictionary of special composite numbers. The key is (label, name). For example, ("2,471+", "C152"). """ return process_appendix_c() def process_appendix_c() -> dict[tuple[str, str], int]: """ Process Appendix C that contains large composites, e.g. C152. """ text = requests.get("https://homes.cerias.purdue.edu/~ssw/cun/third/appc901").text # Make multi-line entries single-line text = re.sub("\n ", "", text) last_key = None composites = {} for line in text.splitlines()[1:]: bits = int(line[0:4].strip()) label = line[4:16].replace(" ", "") partial_composite_str = line[16:].replace(" ", "") if label != "": key = (label, f"C{bits}") composites[key] = int(partial_composite_str) last_key = key else: assert last_key is not None assert last_key in composites composites[last_key] = int(str(composites[last_key]) + partial_composite_str) # Test that the stitching worked for key, value in composites.items(): bits = int(key[1][1:]) assert len(str(value)) == bits return composites def create_database(file: Path) -> tuple[sqlite3.Connection, sqlite3.Cursor]: """ Deletes the old database, makes a new one, and returns the database connection. """ if file.exists(): os.remove(file) conn = sqlite3.connect(file) conn.row_factory = sqlite3.Row cursor = conn.cursor() create_table(conn, cursor) return conn, cursor def create_table(conn: sqlite3.Connection, cursor: sqlite3.Cursor): """ Creates an empty 'factorizations' table. """ cursor.execute( """ CREATE TABLE factorizations ( base INTEGER NOT NULL, exponent INTEGER NOT NULL, offset INTEGER NOT NULL, value TEXT NOT NULL, factors TEXT NOT NULL, multiplicities TEXT NOT NULL, composite TEXT NOT NULL, PRIMARY KEY (value) ) """ ) conn.commit() def add_main_tables(conn: sqlite3.Connection, cursor: sqlite3.Cursor, primes: dict, composites: dict): """ Adds the main factorization tables of the Cunningham Book to the database. """ text = requests.get("https://homes.cerias.purdue.edu/~ssw/cun/third/pmain901").text for lines in text.split(" Table ")[1:]: header, data = lines.split("n Prime Factors", 2) data = clean_up_data(data) pattern = r"(?P<name>[a-zA-Z0-9()+-]*)\s+Factorizations of (?P<base>\d+)\^n(?P<offset>[+-]\d+), (?P<extra>.*)" match = re.search(pattern, header) assert match is not None, f"Could not parse header: {header}" table = { "name": match.group("name"), "base": int(match.group("base")), "offset": int(match.group("offset")), "extra": match.group("extra"), "primes": primes, "composites": composites, } rows = {} for line in data.splitlines(): if line in ["", " -------------------------------------------------------------------------------"]: continue if "^" in line: # These lines contain the relationship equations at the end of a table continue rows, row = add_to_rows(table, rows, line) base = table["base"] offset = table["offset"] exponent = row["n"] value = row["value"] factors = list(row["factors"].keys()) multiplicities = list(row["factors"].values()) composite = row["composite"] add_to_database(cursor, base, exponent, offset, value, factors, multiplicities, composite) conn.commit() def clean_up_data(text: str) -> str: """ Preprocess the text file to make it easier to further parse. """ # Eliminate new lines that start with '.' since they indicate a continuation of the previous line text = re.sub(r"\.\n\s+", "", text) # Eliminate new lines that start with \ since they break up a long integer text = re.sub(r"\\\s+", "", text) # Eliminate spaces between '.' and the next prime text = re.sub(r"\.\s+", ".", text) # Eliminate new lines that also indent the next line since they indicate a continuation of the previous line text = re.sub(r"\n ", "", text) # Convert new lines with L and M onto the same line. We will then use the '\tL' and '\tM' to split off those sections. text = re.sub(r"\n L", "\tL", text) text = re.sub(r"\n M", "\tM", text) return text def add_to_rows(table: dict, rows: dict, line: str) -> tuple[dict, dict]: # Strip off the exponent 'n' first. It is always right-aligned and 5 characters long. n = int(line[0:5].strip()) line = line[5:] if line[0] == "L": # This only happens in the 2LM table. Make the line look like: L.M \tL<l_section>\tM<m_section> line = "L.M \t" + line if "L.M" in line: assert "\tL" in line and "\tM" in line, "Line should have both L and M sections" line, l_section = line.split("\tL", 1) l_section, m_section = l_section.split("\tM", 1) # Process the L and M sections first rows, _ = add_section_data(table, rows, n, "L", l_section.strip()) rows, _ = add_section_data(table, rows, n, "M", m_section.strip()) rows, row = add_section_data(table, rows, n, "", line.strip()) return rows, row def add_section_data(table: dict, rows: dict, n: int, letter: str, section: str) -> tuple[dict, dict]: """ Processes a factorization section, e.g. "(3L,21M,57M) 7928131.1262555546640315313.P92". The contents in the () indicate other factored values from the table that divide this value. After the () are the new factors for this value. """ if letter in ["L", "M"]: key = f"{n}{letter}" else: key = n value = table["base"] ** n + table["offset"] row = { "value": value, "n": n, "factors": {}, # Key is the factor, value is the multiplicity "composite": value, # The remaining composite number after factoring } divisors_str, factors_str = parse_section(section) row = parse_divisors_string(rows, row, divisors_str) row = parse_factors_string(table, rows, row, factors_str, letter) # Assign this row of the LUT to the table dictionary rows[key] = row return rows, row def parse_section(section: str) -> tuple[str, str]: """ Parses the divisors and factors sections. The divisors section are in () and the factors section is after. """ if "(" in section: assert section.count("(") == 1 and section.count(")") == 1, "There should be exactly one pair of parentheses" _, divisors_str = section.split("(", 1) divisors_str, factors_str = divisors_str.split(")", 1) factors_str = factors_str.strip() return divisors_str, factors_str return "", section.strip() def parse_divisors_string(rows: dict, row: dict, string: str) -> dict: """ Processes the other values that divide this value. It checks each factor of the divisor value and adds it with its multiplicity to the current row. """ for divisor in string.split(","): if divisor == "": continue try: key = int(divisor) except ValueError: key = divisor for factor in rows[key]["factors"]: row = add_factor(row, factor) return row def parse_factors_string(table: dict, rows: dict, row: dict, string: str, letter: str) -> dict: """ Processes the new factor of this value and adds it with its multiplicity to the current row. """ # Remove extra spaces to aid in parsing string = re.sub(r" ", "", string) # Remove the '*' since those indicate repeated factors string = re.sub(r"\*", "", string) # Remove extra '.' that separate factors string = re.sub(r"\.+", ".", string) for factor in string.split("."): if "L" in factor or "M" in factor: # Must be an L or M sub-factor from the current table if factor in ["L", "M"]: key = f"{row['n']}{factor}" else: key = factor for factor in rows[key]["factors"]: row = add_factor(row, factor) elif "P" in factor: # A special prime label = prime_composite_label(table, row["n"], letter) key = (label, factor) factor = table["primes"][key] if isinstance(factor, int): row = add_factor(row, factor) else: # There are multiple entries for this key. We need to try each one. for f in factor: if row["composite"] % f == 0: row = add_factor(row, f) elif "C" in factor: # A special composite. Verify that it divides the remaining value. label = prime_composite_label(table, row["n"], letter) key = (label, factor) assert ( row["composite"] % table["composites"][key] == 0 ), f"{row['composite']} is not divisible by {table['composites'][key]}" else: # Must be a regular integer factor = int(factor) row = add_factor(row, factor) return row def prime_composite_label(table: dict, n: int, letter: str) -> str: """ Given the current table, the exponent 'n', and the letter 'L' or 'M', this function determines the label given to the special prime or composite. """ if letter in ["L", "M"]: return f"{table['base']},{n}{letter}" if table["offset"] == 1: return f"{table['base']},{n}+" if table["offset"] == -1: return f"{table['base']},{n}-" raise RuntimeError(f"Could not get label for table: {table}") def add_factor(row: dict, factor: int) -> dict: """ Adds the given prime factor to the row and finds its multiplicity. This function reduces the remaining value 'composite' accordingly. """ while row["composite"] % factor == 0: row["composite"] //= factor if factor not in row["factors"]: row["factors"][factor] = 0 row["factors"][factor] += 1 return row def add_to_database( cursor: sqlite3.Cursor, base: int, exponent: int, offset: int, value: int, factors: list[int], multiplicities: list[int], composite: int, ): """ Add the given factorization to the database. This function verifies the validity of the factorization before writing to the database. """ if len(factors) > 0: # Sort the factors and multiplicities by ascending factor factors, multiplicities = zip(*sorted(zip(factors, multiplicities), key=lambda pair: pair[0])) test_factorization(base, exponent, offset, value, factors, multiplicities, composite) factors_str = ",".join([str(f) for f in factors]) multiplicities_str = ",".join([str(m) for m in multiplicities]) print(f"Adding to database: {base}^{exponent} + {offset}") # print("----------------------------------------------------------------------------------") # print("Adding to database:") # print(f" value: {base}^{exponent} + {offset} = {value}") # print(f" factors: {factors_str}") # print(f" multiplicities: {multiplicities_str}") # print(f" composite: {composite}") # NOTE: Ignore duplicates, which rarely occur. For example, 3^1 + 1 = 4 = 5^1 - 1. cursor.execute( """ INSERT OR IGNORE INTO factorizations (base, exponent, offset, value, factors, multiplicities, composite) VALUES (?, ?, ?, ?, ?, ?, ?) """, (base, exponent, offset, str(value), factors_str, multiplicities_str, str(composite)), ) def test_factorization( base: int, exponent: int, offset: int, value: int, factors: list[int], multiplicities: list[int], composite: int ): """ Tests that all the factorization parameters are consistent. """ assert base**exponent + offset == value, f"{base}^{exponent} + {offset} != {value}" assert not galois.is_prime(composite), f"{composite} is prime" product = composite for factor, multiplicity in zip(factors, multiplicities): product *= factor**multiplicity assert product == value, f"{product} != {value}" def create_even_negative_offset_table(conn: sqlite3.Connection, cursor: sqlite3.Cursor, base: int): """ Creates database entries for base^(2k) - 1 = A * B, A = (base^k - 1), B = (base^k + 1). """ if base == 2: seed_two_even_negative_offset_tables(conn, cursor) k_fail = 1201 elif base == 3: seed_three_even_negative_offset_tables(conn, cursor) k_fail = 541 elif base == 5: seed_five_even_negative_offset_tables(conn, cursor) k_fail = 376 elif base == 6: seed_six_even_negative_offset_tables(conn, cursor) k_fail = 331 elif base == 7: seed_seven_even_negative_offset_tables(conn, cursor) k_fail = 301 elif base == 10: seed_ten_even_negative_offset_tables(conn, cursor) k_fail = 331 elif base == 11: seed_eleven_even_negative_offset_tables(conn, cursor) k_fail = 241 elif base == 12: seed_twelve_even_negative_offset_tables(conn, cursor) k_fail = 241 else: raise ValueError(f"Invalid base: {base}") k = 2 while True: A_value = base**k - 1 B_value = base**k + 1 row = select_two_factorizations_from_database(cursor, A_value, B_value) if row is None: assert ( k == k_fail ), f"The {base}^(2k) - 1 table generation failed at k = {k}, but should have failed at k = {k_fail}" break exponent = 2 * k offset = -1 value = base**exponent + offset factors, multiplicities = merge_factors(row) composite = int(row["A_composite"]) * int(row["B_composite"]) add_to_database(cursor, base, exponent, offset, value, factors, multiplicities, composite) conn.commit() # Need to commit after each row because it is used in the next iteration k += 1 def seed_two_even_negative_offset_tables(conn: sqlite3.Connection, cursor: sqlite3.Cursor): """ Manually adds the 2^2 - 1 factorization to the database. """ base = 2 exponent = 2 offset = -1 value = base**exponent + offset factors = [3] multiplicities = [1] composite = 1 add_to_database(cursor, base, exponent, offset, value, factors, multiplicities, composite) conn.commit() def seed_three_even_negative_offset_tables(conn: sqlite3.Connection, cursor: sqlite3.Cursor): """ Manually adds the 3^2 - 1 factorization to the database. """ base = 3 exponent = 2 offset = -1 value = base**exponent + offset factors = [2] multiplicities = [3] composite = 1 add_to_database(cursor, base, exponent, offset, value, factors, multiplicities, composite) conn.commit() def seed_five_even_negative_offset_tables(conn: sqlite3.Connection, cursor: sqlite3.Cursor): """ Manually adds the 5^2 - 1 factorization to the database. """ base = 5 exponent = 2 offset = -1 value = base**exponent + offset factors = [2, 3] multiplicities = [3, 1] composite = 1 add_to_database(cursor, base, exponent, offset, value, factors, multiplicities, composite) conn.commit() def seed_six_even_negative_offset_tables(conn: sqlite3.Connection, cursor: sqlite3.Cursor): """ Manually adds the 6^2 - 1 factorization to the database. """ base = 6 exponent = 2 offset = -1 value = base**exponent + offset factors = [5, 7] multiplicities = [1, 1] composite = 1 add_to_database(cursor, base, exponent, offset, value, factors, multiplicities, composite) conn.commit() def seed_seven_even_negative_offset_tables(conn: sqlite3.Connection, cursor: sqlite3.Cursor): """ Manually adds the 7^2 - 1 factorization to the database. """ base = 7 exponent = 2 offset = -1 value = base**exponent + offset factors = [2, 3] multiplicities = [4, 1] composite = 1 add_to_database(cursor, base, exponent, offset, value, factors, multiplicities, composite) conn.commit() def seed_ten_even_negative_offset_tables(conn: sqlite3.Connection, cursor: sqlite3.Cursor): """ Manually adds the 10^2 - 1 factorization to the database. """ base = 10 exponent = 2 offset = -1 value = base**exponent + offset factors = [3, 11] multiplicities = [2, 1] composite = 1 add_to_database(cursor, base, exponent, offset, value, factors, multiplicities, composite) conn.commit() def seed_eleven_even_negative_offset_tables(conn: sqlite3.Connection, cursor: sqlite3.Cursor): """ Manually adds the 11^2 - 1 factorization to the database. """ base = 11 exponent = 2 offset = -1 value = base**exponent + offset factors = [2, 3, 5] multiplicities = [3, 1, 1] composite = 1 add_to_database(cursor, base, exponent, offset, value, factors, multiplicities, composite) conn.commit() def seed_twelve_even_negative_offset_tables(conn: sqlite3.Connection, cursor: sqlite3.Cursor): """ Manually adds the 12^2 - 1 factorization to the database. """ base = 12 exponent = 2 offset = -1 value = base**exponent + offset factors = [11, 13] multiplicities = [1, 1] composite = 1 add_to_database(cursor, base, exponent, offset, value, factors, multiplicities, composite) conn.commit() def select_two_factorizations_from_database(cursor: sqlite3.Cursor, A_value: int, B_value: int): """ Selects two factorizations from the database. """ cursor.execute( """ SELECT A.factors AS A_factors, A.multiplicities AS A_multiplicities, A.composite AS A_composite, B.factors AS B_factors, B.multiplicities AS B_multiplicities, B.composite AS B_composite FROM factorizations A, factorizations B WHERE ( A.value == ? AND B.value == ? ) """, (str(A_value), str(B_value)), ) return cursor.fetchone() def merge_factors(row: sqlite3.Row) -> tuple[list[int], list[int]]: """ Combines the factors and multiplicities from two factorizations. """ factors = [] multiplicities = [] if row["A_factors"] != "": factors += [int(f) for f in row["A_factors"].split(",")] if row["A_multiplicities"] != "": multiplicities = [int(m) for m in row["A_multiplicities"].split(",")] if row["B_factors"] != "": factors += [int(f) for f in row["B_factors"].split(",")] if row["B_multiplicities"] != "": multiplicities += [int(m) for m in row["B_multiplicities"].split(",")] return factors, multiplicities if __name__ == "__main__": main()
6adc43fed98e9c03a782c1503443f7bff91280e2
eb9f655206c43c12b497c667ba56a0d358b6bc3a
/plugins/hg4idea/testData/bin/mercurial/templatefilters.py
964b6bfe5acc211dea55023dba73448f01d01c4d
[ "Apache-2.0" ]
permissive
JetBrains/intellij-community
2ed226e200ecc17c037dcddd4a006de56cd43941
05dbd4575d01a213f3f4d69aa4968473f2536142
refs/heads/master
2023-09-03T17:06:37.560889
2023-09-03T11:51:00
2023-09-03T12:12:27
2,489,216
16,288
6,635
Apache-2.0
2023-09-12T07:41:58
2011-09-30T13:33:05
null
UTF-8
Python
false
false
15,476
py
templatefilters.py
# templatefilters.py - common template expansion filters # # Copyright 2005-2008 Olivia Mackall <olivia@selenic.com> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from __future__ import absolute_import import os import re import time from .i18n import _ from .node import hex from . import ( encoding, error, pycompat, registrar, smartset, templateutil, url, util, ) from .utils import ( cborutil, dateutil, stringutil, ) urlerr = util.urlerr urlreq = util.urlreq # filters are callables like: # fn(obj) # with: # obj - object to be filtered (text, date, list and so on) filters = {} templatefilter = registrar.templatefilter(filters) @templatefilter(b'addbreaks', intype=bytes) def addbreaks(text): """Any text. Add an XHTML "<br />" tag before the end of every line except the last. """ return text.replace(b'\n', b'<br/>\n') agescales = [ (b"year", 3600 * 24 * 365, b'Y'), (b"month", 3600 * 24 * 30, b'M'), (b"week", 3600 * 24 * 7, b'W'), (b"day", 3600 * 24, b'd'), (b"hour", 3600, b'h'), (b"minute", 60, b'm'), (b"second", 1, b's'), ] @templatefilter(b'age', intype=templateutil.date) def age(date, abbrev=False): """Date. Returns a human-readable date/time difference between the given date/time and the current date/time. """ def plural(t, c): if c == 1: return t return t + b"s" def fmt(t, c, a): if abbrev: return b"%d%s" % (c, a) return b"%d %s" % (c, plural(t, c)) now = time.time() then = date[0] future = False if then > now: future = True delta = max(1, int(then - now)) if delta > agescales[0][1] * 30: return b'in the distant future' else: delta = max(1, int(now - then)) if delta > agescales[0][1] * 2: return dateutil.shortdate(date) for t, s, a in agescales: n = delta // s if n >= 2 or s == 1: if future: return b'%s from now' % fmt(t, n, a) return b'%s ago' % fmt(t, n, a) @templatefilter(b'basename', intype=bytes) def basename(path): """Any text. Treats the text as a path, and returns the last component of the path after splitting by the path separator. For example, "foo/bar/baz" becomes "baz" and "foo/bar//" becomes "". """ return os.path.basename(path) def _tocborencodable(obj): if isinstance(obj, smartset.abstractsmartset): return list(obj) return obj @templatefilter(b'cbor') def cbor(obj): """Any object. Serializes the object to CBOR bytes.""" # cborutil is stricter about type than json() filter obj = pycompat.rapply(_tocborencodable, obj) return b''.join(cborutil.streamencode(obj)) @templatefilter(b'commondir') def commondir(filelist): """List of text. Treats each list item as file name with / as path separator and returns the longest common directory prefix shared by all list items. Returns the empty string if no common prefix exists. The list items are not normalized, i.e. "foo/../bar" is handled as file "bar" in the directory "foo/..". Leading slashes are ignored. For example, ["foo/bar/baz", "foo/baz/bar"] becomes "foo" and ["foo/bar", "baz"] becomes "". """ def common(a, b): if len(a) > len(b): a = b[: len(a)] elif len(b) > len(a): b = b[: len(a)] if a == b: return a for i in pycompat.xrange(len(a)): if a[i] != b[i]: return a[:i] return a try: if not filelist: return b"" dirlist = [f.lstrip(b'/').split(b'/')[:-1] for f in filelist] if len(dirlist) == 1: return b'/'.join(dirlist[0]) a = min(dirlist) b = max(dirlist) # The common prefix of a and b is shared with all # elements of the list since Python sorts lexicographical # and [1, x] after [1]. return b'/'.join(common(a, b)) except TypeError: raise error.ParseError(_(b'argument is not a list of text')) @templatefilter(b'count') def count(i): """List or text. Returns the length as an integer.""" try: return len(i) except TypeError: raise error.ParseError(_(b'not countable')) @templatefilter(b'dirname', intype=bytes) def dirname(path): """Any text. Treats the text as a path, and strips the last component of the path after splitting by the path separator. """ return os.path.dirname(path) @templatefilter(b'domain', intype=bytes) def domain(author): """Any text. Finds the first string that looks like an email address, and extracts just the domain component. Example: ``User <user@example.com>`` becomes ``example.com``. """ f = author.find(b'@') if f == -1: return b'' author = author[f + 1 :] f = author.find(b'>') if f >= 0: author = author[:f] return author @templatefilter(b'email', intype=bytes) def email(text): """Any text. Extracts the first string that looks like an email address. Example: ``User <user@example.com>`` becomes ``user@example.com``. """ return stringutil.email(text) @templatefilter(b'escape', intype=bytes) def escape(text): """Any text. Replaces the special XML/XHTML characters "&", "<" and ">" with XML entities, and filters out NUL characters. """ return url.escape(text.replace(b'\0', b''), True) para_re = None space_re = None def fill(text, width, initindent=b'', hangindent=b''): '''fill many paragraphs with optional indentation.''' global para_re, space_re if para_re is None: para_re = re.compile(b'(\n\n|\n\\s*[-*]\\s*)', re.M) space_re = re.compile(br' +') def findparas(): start = 0 while True: m = para_re.search(text, start) if not m: uctext = encoding.unifromlocal(text[start:]) w = len(uctext) while w > 0 and uctext[w - 1].isspace(): w -= 1 yield ( encoding.unitolocal(uctext[:w]), encoding.unitolocal(uctext[w:]), ) break yield text[start : m.start(0)], m.group(1) start = m.end(1) return b"".join( [ stringutil.wrap( space_re.sub(b' ', stringutil.wrap(para, width)), width, initindent, hangindent, ) + rest for para, rest in findparas() ] ) @templatefilter(b'fill68', intype=bytes) def fill68(text): """Any text. Wraps the text to fit in 68 columns.""" return fill(text, 68) @templatefilter(b'fill76', intype=bytes) def fill76(text): """Any text. Wraps the text to fit in 76 columns.""" return fill(text, 76) @templatefilter(b'firstline', intype=bytes) def firstline(text): """Any text. Returns the first line of text.""" try: return text.splitlines(True)[0].rstrip(b'\r\n') except IndexError: return b'' @templatefilter(b'hex', intype=bytes) def hexfilter(text): """Any text. Convert a binary Mercurial node identifier into its long hexadecimal representation. """ return hex(text) @templatefilter(b'hgdate', intype=templateutil.date) def hgdate(text): """Date. Returns the date as a pair of numbers: "1157407993 25200" (Unix timestamp, timezone offset). """ return b"%d %d" % text @templatefilter(b'isodate', intype=templateutil.date) def isodate(text): """Date. Returns the date in ISO 8601 format: "2009-08-18 13:00 +0200". """ return dateutil.datestr(text, b'%Y-%m-%d %H:%M %1%2') @templatefilter(b'isodatesec', intype=templateutil.date) def isodatesec(text): """Date. Returns the date in ISO 8601 format, including seconds: "2009-08-18 13:00:13 +0200". See also the rfc3339date filter. """ return dateutil.datestr(text, b'%Y-%m-%d %H:%M:%S %1%2') def indent(text, prefix, firstline=b''): '''indent each non-empty line of text after first with prefix.''' lines = text.splitlines() num_lines = len(lines) endswithnewline = text[-1:] == b'\n' def indenter(): for i in pycompat.xrange(num_lines): l = lines[i] if l.strip(): yield prefix if i else firstline yield l if i < num_lines - 1 or endswithnewline: yield b'\n' return b"".join(indenter()) @templatefilter(b'json') def json(obj, paranoid=True): """Any object. Serializes the object to a JSON formatted text.""" if obj is None: return b'null' elif obj is False: return b'false' elif obj is True: return b'true' elif isinstance(obj, (int, pycompat.long, float)): return pycompat.bytestr(obj) elif isinstance(obj, bytes): return b'"%s"' % encoding.jsonescape(obj, paranoid=paranoid) elif isinstance(obj, type(u'')): raise error.ProgrammingError( b'Mercurial only does output with bytes: %r' % obj ) elif util.safehasattr(obj, b'keys'): out = [ b'"%s": %s' % (encoding.jsonescape(k, paranoid=paranoid), json(v, paranoid)) for k, v in sorted(pycompat.iteritems(obj)) ] return b'{' + b', '.join(out) + b'}' elif util.safehasattr(obj, b'__iter__'): out = [json(i, paranoid) for i in obj] return b'[' + b', '.join(out) + b']' raise error.ProgrammingError(b'cannot encode %r' % obj) @templatefilter(b'lower', intype=bytes) def lower(text): """Any text. Converts the text to lowercase.""" return encoding.lower(text) @templatefilter(b'nonempty', intype=bytes) def nonempty(text): """Any text. Returns '(none)' if the string is empty.""" return text or b"(none)" @templatefilter(b'obfuscate', intype=bytes) def obfuscate(text): """Any text. Returns the input text rendered as a sequence of XML entities. """ text = pycompat.unicode( text, pycompat.sysstr(encoding.encoding), r'replace' ) return b''.join([b'&#%d;' % ord(c) for c in text]) @templatefilter(b'permissions', intype=bytes) def permissions(flags): if b"l" in flags: return b"lrwxrwxrwx" if b"x" in flags: return b"-rwxr-xr-x" return b"-rw-r--r--" @templatefilter(b'person', intype=bytes) def person(author): """Any text. Returns the name before an email address, interpreting it as per RFC 5322. """ return stringutil.person(author) @templatefilter(b'revescape', intype=bytes) def revescape(text): """Any text. Escapes all "special" characters, except @. Forward slashes are escaped twice to prevent web servers from prematurely unescaping them. For example, "@foo bar/baz" becomes "@foo%20bar%252Fbaz". """ return urlreq.quote(text, safe=b'/@').replace(b'/', b'%252F') @templatefilter(b'rfc3339date', intype=templateutil.date) def rfc3339date(text): """Date. Returns a date using the Internet date format specified in RFC 3339: "2009-08-18T13:00:13+02:00". """ return dateutil.datestr(text, b"%Y-%m-%dT%H:%M:%S%1:%2") @templatefilter(b'rfc822date', intype=templateutil.date) def rfc822date(text): """Date. Returns a date using the same format used in email headers: "Tue, 18 Aug 2009 13:00:13 +0200". """ return dateutil.datestr(text, b"%a, %d %b %Y %H:%M:%S %1%2") @templatefilter(b'short', intype=bytes) def short(text): """Changeset hash. Returns the short form of a changeset hash, i.e. a 12 hexadecimal digit string. """ return text[:12] @templatefilter(b'shortbisect', intype=bytes) def shortbisect(label): """Any text. Treats `label` as a bisection status, and returns a single-character representing the status (G: good, B: bad, S: skipped, U: untested, I: ignored). Returns single space if `text` is not a valid bisection status. """ if label: return label[0:1].upper() return b' ' @templatefilter(b'shortdate', intype=templateutil.date) def shortdate(text): """Date. Returns a date like "2006-09-18".""" return dateutil.shortdate(text) @templatefilter(b'slashpath', intype=bytes) def slashpath(path): """Any text. Replaces the native path separator with slash.""" return util.pconvert(path) @templatefilter(b'splitlines', intype=bytes) def splitlines(text): """Any text. Split text into a list of lines.""" return templateutil.hybridlist(text.splitlines(), name=b'line') @templatefilter(b'stringescape', intype=bytes) def stringescape(text): return stringutil.escapestr(text) @templatefilter(b'stringify', intype=bytes) def stringify(thing): """Any type. Turns the value into text by converting values into text and concatenating them. """ return thing # coerced by the intype @templatefilter(b'stripdir', intype=bytes) def stripdir(text): """Treat the text as path and strip a directory level, if possible. For example, "foo" and "foo/bar" becomes "foo". """ dir = os.path.dirname(text) if dir == b"": return os.path.basename(text) else: return dir @templatefilter(b'tabindent', intype=bytes) def tabindent(text): """Any text. Returns the text, with every non-empty line except the first starting with a tab character. """ return indent(text, b'\t') @templatefilter(b'upper', intype=bytes) def upper(text): """Any text. Converts the text to uppercase.""" return encoding.upper(text) @templatefilter(b'urlescape', intype=bytes) def urlescape(text): """Any text. Escapes all "special" characters. For example, "foo bar" becomes "foo%20bar". """ return urlreq.quote(text) @templatefilter(b'user', intype=bytes) def userfilter(text): """Any text. Returns a short representation of a user name or email address.""" return stringutil.shortuser(text) @templatefilter(b'emailuser', intype=bytes) def emailuser(text): """Any text. Returns the user portion of an email address.""" return stringutil.emailuser(text) @templatefilter(b'utf8', intype=bytes) def utf8(text): """Any text. Converts from the local character encoding to UTF-8.""" return encoding.fromlocal(text) @templatefilter(b'xmlescape', intype=bytes) def xmlescape(text): text = ( text.replace(b'&', b'&amp;') .replace(b'<', b'&lt;') .replace(b'>', b'&gt;') .replace(b'"', b'&quot;') .replace(b"'", b'&#39;') ) # &apos; invalid in HTML return re.sub(b'[\x00-\x08\x0B\x0C\x0E-\x1F]', b' ', text) def websub(text, websubtable): """:websub: Any text. Only applies to hgweb. Applies the regular expression replacements defined in the websub section. """ if websubtable: for regexp, format in websubtable: text = regexp.sub(format, text) return text def loadfilter(ui, extname, registrarobj): """Load template filter from specified registrarobj""" for name, func in pycompat.iteritems(registrarobj._table): filters[name] = func # tell hggettext to extract docstrings from these functions: i18nfunctions = filters.values()
da18cf4f34d0af241fde3ca00bbcd305d1faf789
d29d1dce52e7dd89059b7255cf50ac31eebb75ef
/tests/test_PD013.py
0016def51df29784bc0290bb0c170738113ac4d7
[ "MIT" ]
permissive
deppen8/pandas-vet
4a4230ba61b2e7a39d86c0f2e2865daf6bf5f8d9
ded2485a6ead29f503c8ea63f6aa8e41487f0ad5
refs/heads/main
2023-08-17T05:59:01.534456
2023-08-11T15:07:51
2023-08-11T15:07:51
172,439,221
169
23
MIT
2023-08-11T15:07:52
2019-02-25T05:19:51
Python
UTF-8
Python
false
false
842
py
test_PD013.py
""" Test to check functionality for use of the `.melt()` data frame method in preference to `.stack()` method. """ import ast from pandas_vet import PD013, VetPlugin def test_PD013_pass(): """ Test that using .melt() explicitly does not result in an error. """ statement = """table = df.melt( id_vars='airline', value_vars=['ATL', 'DEN', 'DFW'], value_name='airline delay' ) """ tree = ast.parse(statement) actual = list(VetPlugin(tree).run()) expected = [] assert actual == expected def test_PD013_fail_stack(): """ Test that using .stack() results in an error. """ statement = "table = df.stack(level=-1, dropna=True)" tree = ast.parse(statement) actual = list(VetPlugin(tree).run()) expected = [PD013(1, 8)] assert actual == expected
b2f577d9ee5fc9edd40b626852cd95d355906203
94c1805df5a09c39159d502f420d19ad54b567fc
/runtime/deps/gyp/test/mac/xcode-env-order/test.gyp
8f975f7d6b17feabd001f0c5f5ee55846f0f7124
[ "Apache-2.0", "LicenseRef-scancode-public-domain", "LicenseRef-scancode-unknown-license-reference", "BSD-3-Clause" ]
permissive
tmikov/jscomp
9805a5a4d06520549c57380f0df4a1c0aa0dab56
83828441cb38ec96603a6a60be06977d4852940a
refs/heads/develop
2021-01-19T02:56:35.102659
2016-04-12T06:19:30
2016-04-12T06:19:30
36,981,674
237
13
Apache-2.0
2018-10-14T09:48:12
2015-06-06T13:49:26
C
UTF-8
Python
false
false
5,079
gyp
test.gyp
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'targets': [ { 'target_name': 'test_app', 'product_name': 'Test', 'type': 'executable', 'mac_bundle': 1, 'sources': [ 'main.c', 'file.ext1', 'file.ext2', 'file.ext3', ], # Env vars in copies. 'copies': [ { 'destination': '<(PRODUCT_DIR)/${PRODUCT_NAME}-copy-brace', 'files': [ 'main.c', ], # ${SOURCE_ROOT} doesn't work with xcode }, { 'destination': '<(PRODUCT_DIR)/$(PRODUCT_NAME)-copy-paren', 'files': [ '$(SOURCE_ROOT)/main.c', ], }, { 'destination': '<(PRODUCT_DIR)/$PRODUCT_NAME-copy-bare', 'files': [ 'main.c', ], # $SOURCE_ROOT doesn't work with xcode }, ], # Env vars in actions. The $FOO's are here to test that env vars that # aren't defined are handled in some way that doesn't break the build. 'actions': [ { 'action_name': 'Action copy braces ${PRODUCT_NAME} ${FOO}', 'description': 'Action copy braces ${PRODUCT_NAME} ${FOO}', 'inputs': [ '${SOURCE_ROOT}/main.c' ], # Referencing ${PRODUCT_NAME} in action outputs doesn't work with # the Xcode generator (PRODUCT_NAME expands to "Test Support"). 'outputs': [ '<(PRODUCT_DIR)/action-copy-brace.txt' ], 'action': [ 'cp', '${SOURCE_ROOT}/main.c', '<(PRODUCT_DIR)/action-copy-brace.txt' ], }, { 'action_name': 'Action copy parens $(PRODUCT_NAME) $(FOO)', 'description': 'Action copy parens $(PRODUCT_NAME) $(FOO)', 'inputs': [ '$(SOURCE_ROOT)/main.c' ], # Referencing $(PRODUCT_NAME) in action outputs doesn't work with # the Xcode generator (PRODUCT_NAME expands to "Test Support"). 'outputs': [ '<(PRODUCT_DIR)/action-copy-paren.txt' ], 'action': [ 'cp', '$(SOURCE_ROOT)/main.c', '<(PRODUCT_DIR)/action-copy-paren.txt' ], }, { 'action_name': 'Action copy bare $PRODUCT_NAME $FOO', 'description': 'Action copy bare $PRODUCT_NAME $FOO', 'inputs': [ '$SOURCE_ROOT/main.c' ], # Referencing $PRODUCT_NAME in action outputs doesn't work with # the Xcode generator (PRODUCT_NAME expands to "Test Support"). 'outputs': [ '<(PRODUCT_DIR)/action-copy-bare.txt' ], 'action': [ 'cp', '$SOURCE_ROOT/main.c', '<(PRODUCT_DIR)/action-copy-bare.txt' ], }, ], # Env vars in xcode_settings. 'xcode_settings': { 'INFOPLIST_FILE': 'Info.plist', 'STRING_KEY': '/Source/Project', 'BRACE_DEPENDENT_KEY2': '${STRING_KEY}/${PRODUCT_NAME}', 'BRACE_DEPENDENT_KEY1': 'D:${BRACE_DEPENDENT_KEY2}', 'BRACE_DEPENDENT_KEY3': '${PRODUCT_TYPE}:${BRACE_DEPENDENT_KEY1}', 'PAREN_DEPENDENT_KEY2': '$(STRING_KEY)/$(PRODUCT_NAME)', 'PAREN_DEPENDENT_KEY1': 'D:$(PAREN_DEPENDENT_KEY2)', 'PAREN_DEPENDENT_KEY3': '$(PRODUCT_TYPE):$(PAREN_DEPENDENT_KEY1)', 'BARE_DEPENDENT_KEY2': '$STRING_KEY/$PRODUCT_NAME', 'BARE_DEPENDENT_KEY1': 'D:$BARE_DEPENDENT_KEY2', 'BARE_DEPENDENT_KEY3': '$PRODUCT_TYPE:$BARE_DEPENDENT_KEY1', 'MIXED_DEPENDENT_KEY': '${STRING_KEY}:$(PRODUCT_NAME):$MACH_O_TYPE', }, # Env vars in rules. The $FOO's are here to test that env vars that # aren't defined are handled in some way that doesn't break the build. 'rules': [ { 'rule_name': 'brace_rule', 'message': 'Rule braces ${PRODUCT_NAME} ${FOO} <(RULE_INPUT_NAME)', 'extension': 'ext1', 'inputs': [ '${SOURCE_ROOT}/main.c' ], 'outputs': [ '<(PRODUCT_DIR)/rule-copy-brace.txt' ], 'action': [ 'cp', '${SOURCE_ROOT}/main.c', '<(PRODUCT_DIR)/rule-copy-brace.txt' ], }, { 'rule_name': 'paren_rule', 'message': 'Rule parens $(PRODUCT_NAME) $(FOO) <(RULE_INPUT_NAME)', 'extension': 'ext2', 'inputs': [ '$(SOURCE_ROOT)/main.c' ], 'outputs': [ '<(PRODUCT_DIR)/rule-copy-paren.txt' ], 'action': [ 'cp', '$(SOURCE_ROOT)/main.c', '<(PRODUCT_DIR)/rule-copy-paren.txt' ], }, # TODO: Fails in xcode. Looks like a bug in the xcode generator though # (which uses makefiles for rules, and thinks $PRODUCT_NAME is # $(P)RODUCT_NAME). #{ # 'rule_name': 'bare_rule', # 'message': 'Rule copy bare $PRODUCT_NAME $FOO', # 'extension': 'ext3', # 'inputs': [ '$SOURCE_ROOT/main.c' ], # 'outputs': [ '<(PRODUCT_DIR)/rule-copy-bare.txt' ], # 'action': [ 'cp', '$SOURCE_ROOT/main.c', # '<(PRODUCT_DIR)/rule-copy-bare.txt' ], #}, ], }, ], }
b22ef848759470652a9d1a4f0da05c0f70fec135
3a6a211ea0d32405497fbd6486c490bb147e25f9
/third_party/google-endpoints/Crypto/SelfTest/Cipher/common.py
8bebed9cfbe3ee4325c3200de26fe5a88b844642
[ "Apache-2.0", "BSD-3-Clause" ]
permissive
catapult-project/catapult
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
53102de187a48ac2cfc241fef54dcbc29c453a8e
refs/heads/main
2021-05-25T07:37:22.832505
2021-05-24T08:01:49
2021-05-25T06:07:38
33,947,548
2,032
742
BSD-3-Clause
2022-08-26T16:01:18
2015-04-14T17:49:05
HTML
UTF-8
Python
false
false
16,599
py
common.py
# -*- coding: utf-8 -*- # # SelfTest/Hash/common.py: Common code for Crypto.SelfTest.Hash # # Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net> # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== """Self-testing for PyCrypto hash modules""" __revision__ = "$Id$" import sys import unittest from binascii import a2b_hex, b2a_hex from Crypto.Util.py3compat import * # For compatibility with Python 2.1 and Python 2.2 if sys.hexversion < 0x02030000: # Python 2.1 doesn't have a dict() function # Python 2.2 dict() function raises TypeError if you do dict(MD5='blah') def dict(**kwargs): return kwargs.copy() else: dict = dict class _NoDefault: pass # sentinel object def _extract(d, k, default=_NoDefault): """Get an item from a dictionary, and remove it from the dictionary.""" try: retval = d[k] except KeyError: if default is _NoDefault: raise return default del d[k] return retval # Generic cipher test case class CipherSelfTest(unittest.TestCase): def __init__(self, module, params): unittest.TestCase.__init__(self) self.module = module # Extract the parameters params = params.copy() self.description = _extract(params, 'description') self.key = b(_extract(params, 'key')) self.plaintext = b(_extract(params, 'plaintext')) self.ciphertext = b(_extract(params, 'ciphertext')) self.module_name = _extract(params, 'module_name', None) mode = _extract(params, 'mode', None) self.mode_name = str(mode) if mode is not None: # Block cipher self.mode = getattr(self.module, "MODE_" + mode) self.iv = _extract(params, 'iv', None) if self.iv is not None: self.iv = b(self.iv) # Only relevant for OPENPGP mode self.encrypted_iv = _extract(params, 'encrypted_iv', None) if self.encrypted_iv is not None: self.encrypted_iv = b(self.encrypted_iv) else: # Stream cipher self.mode = None self.iv = None self.extra_params = params def shortDescription(self): return self.description def _new(self, do_decryption=0): params = self.extra_params.copy() # Handle CTR mode parameters. By default, we use Counter.new(self.module.block_size) if hasattr(self.module, "MODE_CTR") and self.mode == self.module.MODE_CTR: from Crypto.Util import Counter ctr_class = _extract(params, 'ctr_class', Counter.new) ctr_params = _extract(params, 'ctr_params', {}).copy() if ctr_params.has_key('prefix'): ctr_params['prefix'] = a2b_hex(b(ctr_params['prefix'])) if ctr_params.has_key('suffix'): ctr_params['suffix'] = a2b_hex(b(ctr_params['suffix'])) if not ctr_params.has_key('nbits'): ctr_params['nbits'] = 8*(self.module.block_size - len(ctr_params.get('prefix', '')) - len(ctr_params.get('suffix', ''))) params['counter'] = ctr_class(**ctr_params) if self.mode is None: # Stream cipher return self.module.new(a2b_hex(self.key), **params) elif self.iv is None: # Block cipher without iv return self.module.new(a2b_hex(self.key), self.mode, **params) else: # Block cipher with iv if do_decryption and self.mode == self.module.MODE_OPENPGP: # In PGP mode, the IV to feed for decryption is the *encrypted* one return self.module.new(a2b_hex(self.key), self.mode, a2b_hex(self.encrypted_iv), **params) else: return self.module.new(a2b_hex(self.key), self.mode, a2b_hex(self.iv), **params) def runTest(self): plaintext = a2b_hex(self.plaintext) ciphertext = a2b_hex(self.ciphertext) ct1 = b2a_hex(self._new().encrypt(plaintext)) pt1 = b2a_hex(self._new(1).decrypt(ciphertext)) ct2 = b2a_hex(self._new().encrypt(plaintext)) pt2 = b2a_hex(self._new(1).decrypt(ciphertext)) if hasattr(self.module, "MODE_OPENPGP") and self.mode == self.module.MODE_OPENPGP: # In PGP mode, data returned by the first encrypt() # is prefixed with the encrypted IV. # Here we check it and then remove it from the ciphertexts. eilen = len(self.encrypted_iv) self.assertEqual(self.encrypted_iv, ct1[:eilen]) self.assertEqual(self.encrypted_iv, ct2[:eilen]) ct1 = ct1[eilen:] ct2 = ct2[eilen:] self.assertEqual(self.ciphertext, ct1) # encrypt self.assertEqual(self.ciphertext, ct2) # encrypt (second time) self.assertEqual(self.plaintext, pt1) # decrypt self.assertEqual(self.plaintext, pt2) # decrypt (second time) class CipherStreamingSelfTest(CipherSelfTest): def shortDescription(self): desc = self.module_name if self.mode is not None: desc += " in %s mode" % (self.mode_name,) return "%s should behave like a stream cipher" % (desc,) def runTest(self): plaintext = a2b_hex(self.plaintext) ciphertext = a2b_hex(self.ciphertext) # The cipher should work like a stream cipher # Test counter mode encryption, 3 bytes at a time ct3 = [] cipher = self._new() for i in range(0, len(plaintext), 3): ct3.append(cipher.encrypt(plaintext[i:i+3])) ct3 = b2a_hex(b("").join(ct3)) self.assertEqual(self.ciphertext, ct3) # encryption (3 bytes at a time) # Test counter mode decryption, 3 bytes at a time pt3 = [] cipher = self._new() for i in range(0, len(ciphertext), 3): pt3.append(cipher.encrypt(ciphertext[i:i+3])) # PY3K: This is meant to be text, do not change to bytes (data) pt3 = b2a_hex(b("").join(pt3)) self.assertEqual(self.plaintext, pt3) # decryption (3 bytes at a time) class CTRSegfaultTest(unittest.TestCase): def __init__(self, module, params): unittest.TestCase.__init__(self) self.module = module self.key = b(params['key']) self.module_name = params.get('module_name', None) def shortDescription(self): return """Regression test: %s.new(key, %s.MODE_CTR) should raise TypeError, not segfault""" % (self.module_name, self.module_name) def runTest(self): self.assertRaises(TypeError, self.module.new, a2b_hex(self.key), self.module.MODE_CTR) class CTRWraparoundTest(unittest.TestCase): def __init__(self, module, params): unittest.TestCase.__init__(self) self.module = module self.key = b(params['key']) self.module_name = params.get('module_name', None) def shortDescription(self): return """Regression test: %s with MODE_CTR should raise OverflowError on wraparound when shortcut used""" % (self.module_name,) def runTest(self): from Crypto.Util import Counter for disable_shortcut in (0, 1): # (False, True) Test CTR-mode shortcut and PyObject_CallObject code paths for little_endian in (0, 1): # (False, True) Test both endiannesses ctr = Counter.new(8*self.module.block_size, initial_value=2L**(8*self.module.block_size)-1, little_endian=little_endian, disable_shortcut=disable_shortcut) cipher = self.module.new(a2b_hex(self.key), self.module.MODE_CTR, counter=ctr) block = b("\x00") * self.module.block_size cipher.encrypt(block) self.assertRaises(OverflowError, cipher.encrypt, block) class CFBSegmentSizeTest(unittest.TestCase): def __init__(self, module, params): unittest.TestCase.__init__(self) self.module = module self.key = b(params['key']) self.description = params['description'] def shortDescription(self): return self.description def runTest(self): """Regression test: m.new(key, m.MODE_CFB, segment_size=N) should require segment_size to be a multiple of 8 bits""" for i in range(1, 8): self.assertRaises(ValueError, self.module.new, a2b_hex(self.key), self.module.MODE_CFB, segment_size=i) self.module.new(a2b_hex(self.key), self.module.MODE_CFB, "\0"*self.module.block_size, segment_size=8) # should succeed class RoundtripTest(unittest.TestCase): def __init__(self, module, params): from Crypto import Random unittest.TestCase.__init__(self) self.module = module self.iv = Random.get_random_bytes(module.block_size) self.key = b(params['key']) self.plaintext = 100 * b(params['plaintext']) self.module_name = params.get('module_name', None) def shortDescription(self): return """%s .decrypt() output of .encrypt() should not be garbled""" % (self.module_name,) def runTest(self): for mode in (self.module.MODE_ECB, self.module.MODE_CBC, self.module.MODE_CFB, self.module.MODE_OFB, self.module.MODE_OPENPGP): encryption_cipher = self.module.new(a2b_hex(self.key), mode, self.iv) ciphertext = encryption_cipher.encrypt(self.plaintext) if mode != self.module.MODE_OPENPGP: decryption_cipher = self.module.new(a2b_hex(self.key), mode, self.iv) else: eiv = ciphertext[:self.module.block_size+2] ciphertext = ciphertext[self.module.block_size+2:] decryption_cipher = self.module.new(a2b_hex(self.key), mode, eiv) decrypted_plaintext = decryption_cipher.decrypt(ciphertext) self.assertEqual(self.plaintext, decrypted_plaintext) class PGPTest(unittest.TestCase): def __init__(self, module, params): unittest.TestCase.__init__(self) self.module = module self.key = b(params['key']) def shortDescription(self): return "MODE_PGP was implemented incorrectly and insecurely. It's completely banished now." def runTest(self): self.assertRaises(ValueError, self.module.new, a2b_hex(self.key), self.module.MODE_PGP) class IVLengthTest(unittest.TestCase): def __init__(self, module, params): unittest.TestCase.__init__(self) self.module = module self.key = b(params['key']) def shortDescription(self): return "Check that all modes except MODE_ECB and MODE_CTR require an IV of the proper length" def runTest(self): self.assertRaises(ValueError, self.module.new, a2b_hex(self.key), self.module.MODE_CBC, "") self.assertRaises(ValueError, self.module.new, a2b_hex(self.key), self.module.MODE_CFB, "") self.assertRaises(ValueError, self.module.new, a2b_hex(self.key), self.module.MODE_OFB, "") self.assertRaises(ValueError, self.module.new, a2b_hex(self.key), self.module.MODE_OPENPGP, "") self.module.new(a2b_hex(self.key), self.module.MODE_ECB, "") self.module.new(a2b_hex(self.key), self.module.MODE_CTR, "", counter=self._dummy_counter) def _dummy_counter(self): return "\0" * self.module.block_size def make_block_tests(module, module_name, test_data): tests = [] extra_tests_added = 0 for i in range(len(test_data)): row = test_data[i] # Build the "params" dictionary params = {'mode': 'ECB'} if len(row) == 3: (params['plaintext'], params['ciphertext'], params['key']) = row elif len(row) == 4: (params['plaintext'], params['ciphertext'], params['key'], params['description']) = row elif len(row) == 5: (params['plaintext'], params['ciphertext'], params['key'], params['description'], extra_params) = row params.update(extra_params) else: raise AssertionError("Unsupported tuple size %d" % (len(row),)) # Build the display-name for the test p2 = params.copy() p_key = _extract(p2, 'key') p_plaintext = _extract(p2, 'plaintext') p_ciphertext = _extract(p2, 'ciphertext') p_description = _extract(p2, 'description', None) p_mode = p2.get('mode', 'ECB') if p_mode == 'ECB': _extract(p2, 'mode', 'ECB') if p_description is not None: description = p_description elif p_mode == 'ECB' and not p2: description = "p=%s, k=%s" % (p_plaintext, p_key) else: description = "p=%s, k=%s, %r" % (p_plaintext, p_key, p2) name = "%s #%d: %s" % (module_name, i+1, description) params['description'] = name params['module_name'] = module_name # Add extra test(s) to the test suite before the current test if not extra_tests_added: tests += [ CTRSegfaultTest(module, params), CTRWraparoundTest(module, params), CFBSegmentSizeTest(module, params), RoundtripTest(module, params), PGPTest(module, params), IVLengthTest(module, params), ] extra_tests_added = 1 # Add the current test to the test suite tests.append(CipherSelfTest(module, params)) # When using CTR mode, test that the interface behaves like a stream cipher if p_mode == 'CTR': tests.append(CipherStreamingSelfTest(module, params)) # When using CTR mode, test the non-shortcut code path. if p_mode == 'CTR' and not params.has_key('ctr_class'): params2 = params.copy() params2['description'] += " (shortcut disabled)" ctr_params2 = params.get('ctr_params', {}).copy() params2['ctr_params'] = ctr_params2 if not params2['ctr_params'].has_key('disable_shortcut'): params2['ctr_params']['disable_shortcut'] = 1 tests.append(CipherSelfTest(module, params2)) return tests def make_stream_tests(module, module_name, test_data): tests = [] for i in range(len(test_data)): row = test_data[i] # Build the "params" dictionary params = {} if len(row) == 3: (params['plaintext'], params['ciphertext'], params['key']) = row elif len(row) == 4: (params['plaintext'], params['ciphertext'], params['key'], params['description']) = row elif len(row) == 5: (params['plaintext'], params['ciphertext'], params['key'], params['description'], extra_params) = row params.update(extra_params) else: raise AssertionError("Unsupported tuple size %d" % (len(row),)) # Build the display-name for the test p2 = params.copy() p_key = _extract(p2, 'key') p_plaintext = _extract(p2, 'plaintext') p_ciphertext = _extract(p2, 'ciphertext') p_description = _extract(p2, 'description', None) if p_description is not None: description = p_description elif not p2: description = "p=%s, k=%s" % (p_plaintext, p_key) else: description = "p=%s, k=%s, %r" % (p_plaintext, p_key, p2) name = "%s #%d: %s" % (module_name, i+1, description) params['description'] = name params['module_name'] = module_name # Add the test to the test suite tests.append(CipherSelfTest(module, params)) tests.append(CipherStreamingSelfTest(module, params)) return tests # vim:set ts=4 sw=4 sts=4 expandtab:
85b1c04ff651b4a9c58958e4976532f309d8c923
e9ee565cfff9e6b2a1ea6f73368f4a8948274795
/tests/test_io/test_spia.py
2e3e234a034a358deeebc369d3c97f9e5cb6d514
[ "MIT" ]
permissive
pybel/pybel
7e79530b454e23ae48486a5c0e3207744b7fa139
ed66f013a77f9cbc513892b0dad1025b8f68bb46
refs/heads/master
2022-08-26T18:41:25.724850
2022-02-11T12:22:35
2022-02-11T12:22:35
68,376,693
133
40
MIT
2022-02-11T12:11:24
2016-09-16T12:09:49
Python
UTF-8
Python
false
false
10,645
py
test_spia.py
# -*- coding: utf-8 -*- """This module contains tests for the SPIA exporter.""" import unittest from pandas import DataFrame from pybel.dsl import activity, composite_abundance, pmod, protein, rna from pybel.examples.sialic_acid_example import ( cd33, citation, evidence_1, shp1, shp2, sialic_acid_cd33_complex, sialic_acid_graph, trem2, ) from pybel.io.spia import ( build_spia_matrices, get_matrix_index, to_spia_dfs, update_spia_matrices, ) class TestSpia(unittest.TestCase): """Test SPIA Exporter.""" def setUp(self): self.sialic_acid_graph = sialic_acid_graph.copy() def test_build_matrix(self): """Test build empty matrix.""" node_names = get_matrix_index(self.sialic_acid_graph) matrix_dict = build_spia_matrices(node_names) nodes = {"PTPN11", "TREM2", "PTPN6", "TYROBP", "CD33", "SYK"} self.assertEqual(set(matrix_dict["activation"].columns), nodes) self.assertEqual(set(matrix_dict["repression"].index), nodes) def test_update_matrix_inhibition_ubiquination(self): """Test updating the matrix with an inhibition ubiquitination.""" sub = protein(namespace="HGNC", name="A", identifier="1") obj = protein(namespace="HGNC", name="B", identifier="2", variants=[pmod("Ub")]) index = {"A", "B"} test_dict = {} test_matrix = DataFrame(0, index=index, columns=index) # Initialize matrix correctly self.assertEqual(test_matrix.values.all(), 0) test_dict["inhibition_ubiquination"] = test_matrix update_spia_matrices(test_dict, sub, obj, {"relation": "decreases"}) self.assertEqual(test_dict["inhibition_ubiquination"]["A"]["B"], 1) self.assertEqual(test_dict["inhibition_ubiquination"]["A"]["A"], 0) self.assertEqual(test_dict["inhibition_ubiquination"]["B"]["A"], 0) self.assertEqual(test_dict["inhibition_ubiquination"]["B"]["B"], 0) def test_update_matrix_activation_ubiquination(self): """Test updating the matrix with an activation ubiquitination.""" sub = protein(namespace="HGNC", name="A", identifier="1") obj = protein(namespace="HGNC", name="B", identifier="2", variants=[pmod("Ub")]) index = {"A", "B"} test_dict = {} test_matrix = DataFrame(0, index=index, columns=index) test_dict["activation_ubiquination"] = test_matrix update_spia_matrices(test_dict, sub, obj, {"relation": "increases"}) self.assertEqual(test_dict["activation_ubiquination"]["A"]["B"], 1) self.assertEqual(test_dict["activation_ubiquination"]["A"]["A"], 0) self.assertEqual(test_dict["activation_ubiquination"]["B"]["A"], 0) self.assertEqual(test_dict["activation_ubiquination"]["B"]["B"], 0) def test_update_matrix_inhibition_phosphorylation(self): """Test updating the matrix with an inhibition phosphorylation.""" sub = protein(namespace="HGNC", name="A", identifier="1") obj = protein(namespace="HGNC", name="B", identifier="2", variants=[pmod("Ph")]) index = {"A", "B"} test_dict = {} test_matrix = DataFrame(0, index=index, columns=index) test_dict["inhibition_phosphorylation"] = test_matrix update_spia_matrices(test_dict, sub, obj, {"relation": "decreases"}) self.assertEqual(test_dict["inhibition_phosphorylation"]["A"]["B"], 1) self.assertEqual(test_dict["inhibition_phosphorylation"]["A"]["A"], 0) self.assertEqual(test_dict["inhibition_phosphorylation"]["B"]["A"], 0) self.assertEqual(test_dict["inhibition_phosphorylation"]["B"]["B"], 0) def test_update_matrix_activation_phosphorylation(self): """Test updating the matrix with an activation phosphorylation.""" sub = protein(namespace="HGNC", name="A", identifier="1") obj = protein(namespace="HGNC", name="B", identifier="2", variants=[pmod("Ph")]) index = {"A", "B"} test_dict = {} test_matrix = DataFrame(0, index=index, columns=index) test_dict["activation_phosphorylation"] = test_matrix update_spia_matrices(test_dict, sub, obj, {"relation": "increases"}) self.assertEqual(test_dict["activation_phosphorylation"]["A"]["B"], 1) self.assertEqual(test_dict["activation_phosphorylation"]["A"]["A"], 0) self.assertEqual(test_dict["activation_phosphorylation"]["B"]["A"], 0) self.assertEqual(test_dict["activation_phosphorylation"]["B"]["B"], 0) def test_update_matrix_expression(self): """Test updating the matrix with RNA expression.""" sub = protein(namespace="HGNC", name="A", identifier="1") obj = rna(namespace="HGNC", name="B", identifier="2") index = {"A", "B"} test_dict = {} test_matrix = DataFrame(0, index=index, columns=index) test_dict["expression"] = test_matrix update_spia_matrices(test_dict, sub, obj, {"relation": "increases"}) self.assertEqual(test_dict["expression"]["A"]["B"], 1) self.assertEqual(test_dict["expression"]["A"]["A"], 0) self.assertEqual(test_dict["expression"]["B"]["A"], 0) self.assertEqual(test_dict["expression"]["B"]["B"], 0) def test_update_matrix_repression(self): """Test updating the matrix with RNA repression.""" sub = protein(namespace="HGNC", name="A", identifier="1") obj = rna(namespace="HGNC", name="B", identifier="2") index = {"A", "B"} test_dict = {} test_matrix = DataFrame(0, index=index, columns=index) test_dict["repression"] = test_matrix update_spia_matrices(test_dict, sub, obj, {"relation": "decreases"}) self.assertEqual(test_dict["repression"]["A"]["B"], 1) self.assertEqual(test_dict["repression"]["A"]["A"], 0) self.assertEqual(test_dict["repression"]["B"]["A"], 0) self.assertEqual(test_dict["repression"]["B"]["B"], 0) def test_update_matrix_activation(self): """Test updating the matrix with activation.""" sub = protein(namespace="HGNC", name="A", identifier="1") obj = protein(namespace="HGNC", name="B", identifier="2") index = {"A", "B"} test_dict = {} test_matrix = DataFrame(0, index=index, columns=index) test_dict["activation"] = test_matrix update_spia_matrices(test_dict, sub, obj, {"relation": "increases"}) self.assertEqual(test_dict["activation"]["A"]["B"], 1) self.assertEqual(test_dict["activation"]["A"]["A"], 0) self.assertEqual(test_dict["activation"]["B"]["A"], 0) self.assertEqual(test_dict["activation"]["B"]["B"], 0) def test_update_matrix_inhibition(self): """Test updating the matrix with activation.""" sub = protein(namespace="HGNC", name="A", identifier="1") obj = protein(namespace="HGNC", name="B", identifier="2") index = {"A", "B"} test_dict = {} test_matrix = DataFrame(0, index=index, columns=index) test_dict["inhibition"] = test_matrix update_spia_matrices(test_dict, sub, obj, {"relation": "decreases"}) self.assertEqual(test_dict["inhibition"]["A"]["B"], 1) self.assertEqual(test_dict["inhibition"]["A"]["A"], 0) self.assertEqual(test_dict["inhibition"]["B"]["A"], 0) self.assertEqual(test_dict["inhibition"]["B"]["B"], 0) def test_update_matrix_association(self): """Test updating the matrix with association.""" sub = protein(namespace="HGNC", name="A", identifier="1") obj = protein(namespace="HGNC", name="B", identifier="2") index = {"A", "B"} test_dict = {} test_matrix = DataFrame(0, index=index, columns=index) test_dict["binding_association"] = test_matrix update_spia_matrices(test_dict, sub, obj, {"relation": "association"}) self.assertEqual(test_dict["binding_association"]["A"]["B"], 1) self.assertEqual(test_dict["binding_association"]["A"]["A"], 0) self.assertEqual(test_dict["binding_association"]["B"]["A"], 0) self.assertEqual(test_dict["binding_association"]["B"]["B"], 0) def test_update_matrix_pmods(self): """Test updating the matrix with multiple protein modifications.""" sub = protein(namespace="HGNC", name="A", identifier="1") obj = protein( namespace="HGNC", name="B", identifier="2", variants=[pmod("Ub"), pmod("Ph")], ) index = {"A", "B"} test_dict = {} test_matrix = DataFrame(0, index=index, columns=index) test_dict["activation_ubiquination"] = test_matrix test_dict["activation_phosphorylation"] = test_matrix update_spia_matrices(test_dict, sub, obj, {"relation": "increases"}) self.assertEqual(test_dict["activation_ubiquination"]["A"]["B"], 1) self.assertEqual(test_dict["activation_ubiquination"]["A"]["A"], 0) self.assertEqual(test_dict["activation_ubiquination"]["B"]["A"], 0) self.assertEqual(test_dict["activation_ubiquination"]["B"]["B"], 0) self.assertEqual(test_dict["activation_phosphorylation"]["A"]["B"], 1) self.assertEqual(test_dict["activation_phosphorylation"]["A"]["A"], 0) self.assertEqual(test_dict["activation_phosphorylation"]["B"]["A"], 0) self.assertEqual(test_dict["activation_phosphorylation"]["B"]["B"], 0) def test_spia_matrix_complexes(self): """Test handling of complexes.""" self.sialic_acid_graph.add_increases( sialic_acid_cd33_complex, trem2, citation=citation, annotations={"Species": "9606", "Confidence": "High"}, evidence=evidence_1, target_modifier=activity(), ) spia_dfs = to_spia_dfs(self.sialic_acid_graph) self.assertEqual(spia_dfs["activation"][cd33.name][trem2.name], 1) def test_spia_matrix_composites(self): """Test handling of composites.""" shp = composite_abundance([shp1, shp2]) self.sialic_acid_graph.add_increases( shp, trem2, citation=citation, annotations={"Species": "9606", "Confidence": "High"}, evidence=evidence_1, target_modifier=activity(), ) spia_dfs = to_spia_dfs(self.sialic_acid_graph) self.assertEqual(spia_dfs["activation"][shp1.name][trem2.name], 1) self.assertEqual(spia_dfs["activation"][shp2.name][trem2.name], 1)
479da4f3046538ca7e6572e7b64c54f8462aa20e
15f0514701a78e12750f68ba09d68095172493ee
/Python3/78.py
a73509b3f0e5d98a8c703de3ed693039c1471711
[ "MIT" ]
permissive
strengthen/LeetCode
5e38c8c9d3e8f27109b9124ae17ef8a4139a1518
3ffa6dcbeb787a6128641402081a4ff70093bb61
refs/heads/master
2022-12-04T21:35:17.872212
2022-11-30T06:23:24
2022-11-30T06:23:24
155,958,163
936
365
MIT
2021-11-15T04:02:45
2018-11-03T06:47:38
null
UTF-8
Python
false
false
1,421
py
78.py
__________________________________________________________________________________________________ sample 24 ms submission class Solution: def subsets(self, nums: List[int]) -> List[List[int]]: res = [[]] def dfs(index, path): if index == len(nums): return currPath = path.copy() currPath.append(nums[index]) res.append( currPath ) for i in range(index+1, len(nums)): dfs(i, currPath) for i in range(len(nums)): dfs(i, []) return res __________________________________________________________________________________________________ sample 13012 kb submission class Solution: def subsets(self, nums: List[int]) -> List[List[int]]: n = 2**len(nums) outputs = [] for i in range(0,n): output = [] number = i for j in range(0,len(nums)): condition = number % 2 if condition: output.append(nums[j]) number = number >> 1 outputs.append(output) return outputs __________________________________________________________________________________________________
2a7b919bc955d80783160e7690bc04c2d06557ed
5e739e329243ad4d7ea29b66387be52c923ead85
/pypeln/task/api/to_stage.py
4d727d83b030207d16b647c4280b235008adb2fc
[ "MIT" ]
permissive
cgarciae/pypeln
ba51746d36b4bec7ab97ab98ebe80b56c35ab98a
c91ec0d080f5b56191cdbe32a2d08a566552bc68
refs/heads/master
2023-08-05T02:26:18.485566
2023-03-22T14:52:27
2023-03-22T14:52:27
147,001,703
1,434
101
MIT
2023-07-20T16:18:21
2018-09-01T13:43:31
Python
UTF-8
Python
false
false
484
py
to_stage.py
import typing as tp from pypeln.utils import A from ..stage import Stage from .from_iterable import from_iterable def to_stage( obj: tp.Union[Stage[A], tp.Iterable[A], tp.AsyncIterable[A]], maxsize: int ) -> Stage[A]: if isinstance(obj, Stage): return obj elif isinstance(obj, tp.Iterable) or isinstance(obj, tp.AsyncIterable): return from_iterable(obj, maxsize=maxsize) else: raise ValueError(f"Object {obj} is not a Stage or iterable")
671019e910392bcee89e5436f8b93454e010bd78
a29b8d6ae6642ef80d04ae99d721b703de06db69
/maro/cli/grass/lib/services/utils/params.py
8a106e023802e12db772e81cf462a8cb4621527f
[ "LicenseRef-scancode-generic-cla", "MIT" ]
permissive
microsoft/maro
6aab1a4e86fddabf7f242f0d1020d985a5f7a5f3
b3c6a589ad9036b03221e776a6929b2bc1eb4680
refs/heads/master
2023-08-24T16:52:38.250279
2023-05-15T04:31:58
2023-05-15T04:31:58
230,389,247
764
158
MIT
2023-07-25T20:59:06
2019-12-27T06:48:27
Python
UTF-8
Python
false
false
619
py
params.py
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import os class NodeStatus: PENDING = "Pending" RUNNING = "Running" STOPPED = "Stopped" class ContainerStatus: RUNNING = "running" EXITED = "exited" class JobStatus: PENDING = "pending" RUNNING = "running" KILLED = "killed" FINISH = "finish" FAILED = "failed" class UserRole: ADMIN = "admin" class Paths: MARO_SHARED = "~/.maro-shared" ABS_MARO_SHARED = os.path.expanduser(path=MARO_SHARED) MARO_LOCAL = "~/.maro-local" ABS_MARO_LOCAL = os.path.expanduser(path=MARO_LOCAL)
5ffddc9b2db956e5c10ed70dd509ec35f0f21c0b
8ca19f1a31070738b376c0370c4bebf6b7efcb43
/office365/outlook/mail/automatic_replies_setting.py
887e028fa47a6b8966ed671c73dc9c1d15037152
[ "MIT" ]
permissive
vgrem/Office365-REST-Python-Client
2ef153d737c6ed5445ba1e446aeaec39c4ef4ed3
cbd245d1af8d69e013c469cfc2a9851f51c91417
refs/heads/master
2023-09-02T14:20:40.109462
2023-08-31T19:14:05
2023-08-31T19:14:05
51,305,798
1,006
326
MIT
2023-08-28T05:38:02
2016-02-08T15:24:51
Python
UTF-8
Python
false
false
1,252
py
automatic_replies_setting.py
from office365.runtime.client_value import ClientValue class AutomaticRepliesSetting(ClientValue): """ Configuration settings to automatically notify the sender of an incoming email with a message from the signed-in user. For example, an automatic reply to notify that the signed-in user is unavailable to respond to emails. """ def __init__(self, external_audience=None, external_reply_message=None, internal_reply_message=None): """ :param str external_audience: The set of audience external to the signed-in user's organization who will receive the ExternalReplyMessage, if Status is AlwaysEnabled or Scheduled. The possible values are: none, contactsOnly, all. :param str external_reply_message: The automatic reply to send to the specified external audience, if Status is AlwaysEnabled or Scheduled. :param str internal_reply_message: The automatic reply to send to the audience internal to the signed-in user's organization, if Status is AlwaysEnabled or Scheduled. """ self.externalAudience = external_audience self.externalReplyMessage = external_reply_message self.internalReplyMessage = internal_reply_message
79c19e0fb26f88368eb8975f0c8a1c116315991e
66f7d157f51897d08c45700f0a60ef6e4ddcaa34
/tests/test_filters.py
12cfdbc62a9877202b79736f1836385af36cb0e3
[ "MIT" ]
permissive
FKLC/AnyAPI
1d3eee97e507088ee754d80b6658cce7eca465d5
be6f23ad2d4affb8574da0082d1a9375fe11f9ed
refs/heads/master
2021-11-24T08:49:09.481422
2019-05-25T15:14:36
2019-05-25T15:14:36
166,031,038
131
9
null
null
null
null
UTF-8
Python
false
false
681
py
test_filters.py
from anyapi import AnyAPI import pytest @pytest.fixture def httpbin(): return AnyAPI("http://httpbin.org") def add_header(kwargs): kwargs["headers"]["Test-Header"] = "Test-Value" def test_filter_request(httpbin): """Test filter_request by setting header (It is enough to test against only headers)""" httpbin._filter_request.append(add_header) assert httpbin.anything.GET().json()["headers"]["Test-Header"] == "Test-Value" def test_filter_response(httpbin): """Test filter_response by responses to json automatically""" httpbin._filter_response.append(lambda _, response: response.json()) assert isinstance(httpbin.anything.GET(), dict)
2826fd718ff5d55f4946a8af5cf7dd4f82812e63
aa5c1a530f95d629e686ac9124caf1a49a9f23e9
/build_tools/github_actions/runner/config/health_server/health_server.py
626bb20b48803aa5df2310925290b1ab6b5774d6
[ "Apache-2.0", "LLVM-exception", "LicenseRef-scancode-unknown-license-reference" ]
permissive
openxla/iree
eacf5b239559e1d3b40c38039ac4c26315b523f7
13ef677e556d0a1d154e45b052fe016256057f65
refs/heads/main
2023-09-06T01:19:49.598662
2023-09-04T07:01:30
2023-09-04T07:01:30
208,145,128
387
110
Apache-2.0
2023-09-14T20:48:00
2019-09-12T20:57:39
C++
UTF-8
Python
false
false
3,326
py
health_server.py
#!/usr/bin/env python3 # Copyright 2022 The IREE Authors # # Licensed under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception """A really basic health check HTTP server. All it does is server a 200 to every request, basically only confirming its existence. This can later be extended with more functionality. Note that http.server is not in general fit for production, but our very limited usage of BaseHTTPRequestHandler here, not serving any files and not parsing or making use of request input, does not present any security concerns. Don't add those sorts of things. Additionally, this operates inside a firewall that blocks all but a few IPs and even those are internal to the network. """ import argparse import glob import http.server import subprocess from http.client import INTERNAL_SERVER_ERROR, NOT_FOUND, OK from typing import Optional RUNNER_SERVICE_NAME = "gh-runner" CHECK_SERVICE_CMD = ["systemctl", "is-active", RUNNER_SERVICE_NAME] CHECK_SERVICE_TIMEOUT = 10 RUNNER_WORK_LOG_PATTERN = "/runner-root/actions-runner/_diag/Worker_*" class HealthCheckHandler(http.server.BaseHTTPRequestHandler): def send_success(self, *, msg: Optional[str] = None, body: Optional[str] = None): self.send_response(OK) self.send_header("Content-type", "text/html") self.end_headers() if body is not None: self.wfile.write(bytes(body, encoding="utf-8")) def do_GET(self): try: subprocess.run( CHECK_SERVICE_CMD, check=True, text=True, stdout=subprocess.PIPE, timeout=CHECK_SERVICE_TIMEOUT, ) except subprocess.TimeoutExpired as e: msg = f"'{' '.join(e.cmd)}' timed out: {e.stdout}" return self.send_error(INTERNAL_SERVER_ERROR, msg) except subprocess.CalledProcessError as e: return self.send_error( NOT_FOUND, f"Runner service not found: '{' '.join(e.cmd)}' returned" f" '{e.stdout.strip()}' (exit code {e.returncode})", ) # The runner writes a log file for each job it runs. In our case it only # runs one, so we glob for anything matching that pattern. Yes that is an # absolutely ludicrous way to get the runner's status. GitHub should really # implement a proper health check so we don't have to hack around like this. if glob.glob(RUNNER_WORK_LOG_PATTERN): return self.send_success(body="active") return self.send_success(body="idle") def main(args: argparse.Namespace): webServer = http.server.HTTPServer(("", args.port), HealthCheckHandler) print(f"Server started on port {args.port}. Ctrl+C to stop.") try: webServer.serve_forever() except KeyboardInterrupt: # Don't print an exception on interrupt. Add a newline to handle printing of # "^C" print() webServer.server_close() print("Server stopped.") def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--port", type=int, default=8080) return parser.parse_args() if __name__ == "__main__": main(parse_args())
9a8c764a0989de3e82f589146a288659c2f66641
c1ff870879152fba2b54eddfb7591ec322eb3061
/plugins/languageAPI/jsAPI/3rdParty/nodejs/10.1.0/source/deps/openssl/openssl-cl_asm.gypi
614c0cf316c46222c2b4838698c0f0136ee64d02
[ "ISC", "LicenseRef-scancode-public-domain", "BSD-2-Clause", "Artistic-2.0", "NAIST-2003", "MIT", "BSD-3-Clause", "Zlib", "NTP", "LicenseRef-scancode-openssl", "ICU", "LicenseRef-scancode-unicode", "LicenseRef-scancode-unknown-license-reference", "LicenseRef-scancode-free-unknown" ]
permissive
MTASZTAKI/ApertusVR
1a9809fb7af81c3cd7fb732ed481ebe4ce66fefa
424ec5515ae08780542f33cc4841a8f9a96337b3
refs/heads/0.9
2022-12-11T20:03:42.926813
2019-10-11T09:29:45
2019-10-11T09:29:45
73,708,854
188
55
MIT
2022-12-11T08:53:21
2016-11-14T13:48:00
C++
UTF-8
Python
false
false
2,430
gypi
openssl-cl_asm.gypi
{ 'conditions': [ ['target_arch=="ppc" and OS=="aix"', { 'includes': ['config/archs/aix-gcc/asm/openssl-cl.gypi'], }, 'target_arch=="ppc" and OS=="linux"', { 'includes': ['config/archs/linux-ppc/asm/openssl-cl.gypi'], }, 'target_arch=="ppc64" and OS=="aix"', { 'includes': ['config/archs/aix64-gcc/asm/openssl-cl.gypi'], }, 'target_arch=="ppc64" and OS=="linux" and node_byteorder =="little"', { 'includes': ['config/archs/linux-ppc64le/asm/openssl-cl.gypi'], }, 'target_arch=="ppc64" and OS=="linux"', { 'includes': ['config/archs/linux-ppc64/asm/openssl-cl.gypi'], }, 'target_arch=="s390" and OS=="linux"', { 'includes': ['config/archs/linux32-s390x/asm/openssl-cl.gypi'], }, 'target_arch=="s390x" and OS=="linux"', { 'includes': ['config/archs/linux64-s390x/asm/openssl-cl.gypi'], }, 'target_arch=="arm" and OS=="linux"', { 'includes': ['config/archs/linux-armv4/asm/openssl-cl.gypi'], }, 'target_arch=="arm64" and OS=="linux"', { 'includes': ['config/archs/linux-aarch64/asm/openssl-cl.gypi'], }, 'target_arch=="ia32" and OS=="linux"', { 'includes': ['config/archs/linux-elf/asm/openssl-cl.gypi'], }, 'target_arch=="ia32" and OS=="mac"', { 'includes': ['config/archs/darwin-i386-cc/asm/openssl-cl.gypi'], }, 'target_arch=="ia32" and OS=="solaris"', { 'includes': ['config/archs/solaris-x86-gcc/asm/openssl-cl.gypi'], }, 'target_arch=="ia32" and OS=="win"', { 'includes': ['config/archs/VC-WIN32/asm/openssl-cl.gypi'], }, 'target_arch=="ia32"', { # noasm linux-elf for other ia32 platforms 'includes': ['config/archs/linux-elf/asm/openssl-cl.gypi'], }, 'target_arch=="x64" and OS=="freebsd"', { 'includes': ['config/archs/BSD-x86_64/asm/openssl-cl.gypi'], }, 'target_arch=="x64" and OS=="mac"', { 'includes': ['config/archs/darwin64-x86_64-cc/asm/openssl-cl.gypi'], }, 'target_arch=="x64" and OS=="solaris"', { 'includes': ['config/archs/solaris64-x86_64-gcc/asm/openssl-cl.gypi'], }, 'target_arch=="x64" and OS=="win"', { 'includes': ['config/archs/VC-WIN64A/asm/openssl-cl.gypi'], }, 'target_arch=="x64" and OS=="linux"', { 'includes': ['config/archs/linux-x86_64/asm/openssl-cl.gypi'], }, { # Other architectures don't use assembly 'includes': ['config/archs/linux-x86_64/asm/openssl-cl.gypi'], }], ], }
4b6b0a196180c978fba991e186fbb2d16e50235c
551990e68feda34d2a9173b05cc3a7259f4e8c9a
/projects/spie2022_radial_subsampling/plot_zoomed.py
1bcb7fcce9f6414d583d605bd49e55618cd855d3
[ "Apache-2.0" ]
permissive
NKI-AI/direct
a5c1ca0cb75d709b62e94ff76aba361e188d2d59
2a4c29342bc52a404aae097bc2654fb4323e1ac8
refs/heads/main
2023-08-03T11:37:52.941124
2023-06-28T14:11:56
2023-06-28T14:11:56
269,966,010
151
35
Apache-2.0
2023-06-28T14:11:58
2020-06-06T11:53:07
Python
UTF-8
Python
false
false
2,696
py
plot_zoomed.py
import matplotlib.patches as patches from mpl_toolkits.axes_grid1.inset_locator import mark_inset, zoomed_inset_axes def zoom_in_rectangle(img, ax, zoom, rectangle_xy, rectangle_width, rectangle_height, **kwargs): """ Parameters ---------- img: array-like The image data. ax: Axes Axes to place the inset axes. zoom: float Scaling factor of the data axes. zoom > 1 will enlargen the coordinates (i.e., "zoomed in"), while zoom < 1 will shrink the coordinates (i.e., "zoomed out"). rectangle_xy: (float or int, float or int) The anchor point of the rectangle to be zoomed. rectangle_width: float or int Rectangle to be zoomed width. rectangle_height: float or int Rectangle to be zoomed height. Other Parameters ---------------- cmap: str or Colormap, default 'gray' The Colormap instance or registered colormap name used to map scalar data to colors. zoomed_inset_loc: int or str, default: 'upper right' Location to place the inset axes. zoomed_inset_lw: float or None, default 1 Zoomed inset axes linewidth. zoomed_inset_col: float or None, default black Zoomed inset axes color. mark_inset_loc1: int or str, default is 1 First location to place line connecting box and inset axes. mark_inset_loc2: int or str, default is 3 Second location to place line connecting box and inset axes. mark_inset_lw: float or None, default None Linewidth of lines connecting box and inset axes. mark_inset_ec: color or None Color of lines connecting box and inset axes. """ axins = zoomed_inset_axes(ax, zoom, loc=kwargs.get("zoomed_inset_loc", 1)) rect = patches.Rectangle(xy=rectangle_xy, width=rectangle_width, height=rectangle_height) x1, x2 = rect.get_x(), rect.get_x() + rect.get_width() y1, y2 = rect.get_y(), rect.get_y() + rect.get_height() axins.set_xlim(x1, x2) axins.set_ylim(y1, y2) mark_inset( ax, axins, loc1=kwargs.get("mark_inset_loc1", 1), loc2=kwargs.get("mark_inset_loc2", 3), lw=kwargs.get("mark_inset_lw", None), ec=kwargs.get("mark_inset_ec", "1.0"), ) axins.imshow( img, cmap=kwargs.get("cmap", "gray"), origin="lower", vmin=kwargs.get("vmin", None), vmax=kwargs.get("vmax", None), ) for axis in ["top", "bottom", "left", "right"]: axins.spines[axis].set_linewidth(kwargs.get("zoomed_inset_lw", 1)) axins.spines[axis].set_color(kwargs.get("zoomed_inset_col", "k")) axins.set_xticklabels([]) axins.set_yticklabels([])
aa8748c5d03ab8698e678794e4f1d5a40d8a84e1
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
/alipay/aop/api/domain/JFExportInputFieldModel.py
9cad32c655b90ba0a91844f80a31f2d9cb30dacc
[ "Apache-2.0" ]
permissive
alipay/alipay-sdk-python-all
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
1fad300587c9e7e099747305ba9077d4cd7afde9
refs/heads/master
2023-08-27T21:35:01.778771
2023-08-23T07:12:26
2023-08-23T07:12:26
133,338,689
247
70
Apache-2.0
2023-04-25T04:54:02
2018-05-14T09:40:54
Python
UTF-8
Python
false
false
6,083
py
JFExportInputFieldModel.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * from alipay.aop.api.domain.ValidationRule import ValidationRule class JFExportInputFieldModel(object): def __init__(self): self._data_type = None self._default_value = None self._field_name = None self._field_tips = None self._field_title = None self._field_type = None self._gmt_modified = None self._mode_type = None self._priority = None self._regexp_rules = None @property def data_type(self): return self._data_type @data_type.setter def data_type(self, value): self._data_type = value @property def default_value(self): return self._default_value @default_value.setter def default_value(self, value): self._default_value = value @property def field_name(self): return self._field_name @field_name.setter def field_name(self, value): self._field_name = value @property def field_tips(self): return self._field_tips @field_tips.setter def field_tips(self, value): self._field_tips = value @property def field_title(self): return self._field_title @field_title.setter def field_title(self, value): self._field_title = value @property def field_type(self): return self._field_type @field_type.setter def field_type(self, value): self._field_type = value @property def gmt_modified(self): return self._gmt_modified @gmt_modified.setter def gmt_modified(self, value): self._gmt_modified = value @property def mode_type(self): return self._mode_type @mode_type.setter def mode_type(self, value): self._mode_type = value @property def priority(self): return self._priority @priority.setter def priority(self, value): self._priority = value @property def regexp_rules(self): return self._regexp_rules @regexp_rules.setter def regexp_rules(self, value): if isinstance(value, list): self._regexp_rules = list() for i in value: if isinstance(i, ValidationRule): self._regexp_rules.append(i) else: self._regexp_rules.append(ValidationRule.from_alipay_dict(i)) def to_alipay_dict(self): params = dict() if self.data_type: if hasattr(self.data_type, 'to_alipay_dict'): params['data_type'] = self.data_type.to_alipay_dict() else: params['data_type'] = self.data_type if self.default_value: if hasattr(self.default_value, 'to_alipay_dict'): params['default_value'] = self.default_value.to_alipay_dict() else: params['default_value'] = self.default_value if self.field_name: if hasattr(self.field_name, 'to_alipay_dict'): params['field_name'] = self.field_name.to_alipay_dict() else: params['field_name'] = self.field_name if self.field_tips: if hasattr(self.field_tips, 'to_alipay_dict'): params['field_tips'] = self.field_tips.to_alipay_dict() else: params['field_tips'] = self.field_tips if self.field_title: if hasattr(self.field_title, 'to_alipay_dict'): params['field_title'] = self.field_title.to_alipay_dict() else: params['field_title'] = self.field_title if self.field_type: if hasattr(self.field_type, 'to_alipay_dict'): params['field_type'] = self.field_type.to_alipay_dict() else: params['field_type'] = self.field_type if self.gmt_modified: if hasattr(self.gmt_modified, 'to_alipay_dict'): params['gmt_modified'] = self.gmt_modified.to_alipay_dict() else: params['gmt_modified'] = self.gmt_modified if self.mode_type: if hasattr(self.mode_type, 'to_alipay_dict'): params['mode_type'] = self.mode_type.to_alipay_dict() else: params['mode_type'] = self.mode_type if self.priority: if hasattr(self.priority, 'to_alipay_dict'): params['priority'] = self.priority.to_alipay_dict() else: params['priority'] = self.priority if self.regexp_rules: if isinstance(self.regexp_rules, list): for i in range(0, len(self.regexp_rules)): element = self.regexp_rules[i] if hasattr(element, 'to_alipay_dict'): self.regexp_rules[i] = element.to_alipay_dict() if hasattr(self.regexp_rules, 'to_alipay_dict'): params['regexp_rules'] = self.regexp_rules.to_alipay_dict() else: params['regexp_rules'] = self.regexp_rules return params @staticmethod def from_alipay_dict(d): if not d: return None o = JFExportInputFieldModel() if 'data_type' in d: o.data_type = d['data_type'] if 'default_value' in d: o.default_value = d['default_value'] if 'field_name' in d: o.field_name = d['field_name'] if 'field_tips' in d: o.field_tips = d['field_tips'] if 'field_title' in d: o.field_title = d['field_title'] if 'field_type' in d: o.field_type = d['field_type'] if 'gmt_modified' in d: o.gmt_modified = d['gmt_modified'] if 'mode_type' in d: o.mode_type = d['mode_type'] if 'priority' in d: o.priority = d['priority'] if 'regexp_rules' in d: o.regexp_rules = d['regexp_rules'] return o
dfde71f5800aa2ca5753ced75d6a6b33245f1cc8
bb33e6be8316f35decbb2b81badf2b6dcf7df515
/source/res/scripts/client/gui/scaleform/daapi/view/lobby/cybersport/cybersportintroview.py
88cde8b07e469c5e6132f2821eb9ede172e516d1
[]
no_license
StranikS-Scan/WorldOfTanks-Decompiled
999c9567de38c32c760ab72c21c00ea7bc20990c
d2fe9c195825ececc728e87a02983908b7ea9199
refs/heads/1.18
2023-08-25T17:39:27.718097
2022-09-22T06:49:44
2022-09-22T06:49:44
148,696,315
103
39
null
2022-09-14T17:50:03
2018-09-13T20:49:11
Python
UTF-8
Python
false
false
7,791
py
cybersportintroview.py
# Python bytecode 2.7 (decompiled from Python 2.7) # Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/cyberSport/CyberSportIntroView.py from gui.Scaleform.framework.managers.loaders import SFViewLoadParams from gui.shared.gui_items import GUI_ITEM_TYPE from gui.shared.items_cache import CACHE_SYNC_REASON from helpers import dependency from helpers.i18n import makeString as _ms from account_helpers.AccountSettings import SELECTED_INTRO_VEHICLES_FIELD from gui.ClientUpdateManager import g_clientUpdateManager from gui.shared import events from gui.shared.gui_items.Vehicle import VEHICLE_CLASS_NAME as _VCN from gui.shared.events import CSVehicleSelectEvent from gui.shared.event_bus import EVENT_BUS_SCOPE from gui.shared.formatters import text_styles from gui.Scaleform.daapi.view.lobby.rally.vo_converters import makeIntroVehicleVO from gui.Scaleform.daapi.view.meta.CyberSportIntroMeta import CyberSportIntroMeta from gui.Scaleform.locale.TOOLTIPS import TOOLTIPS from gui.Scaleform.locale.CYBERSPORT import CYBERSPORT from gui.Scaleform.genConsts.CYBER_SPORT_ALIASES import CYBER_SPORT_ALIASES from nation_change.nation_change_helpers import iterVehTypeCDsInNationGroup from nation_change_helpers.client_nation_change_helper import getValidVehicleCDForNationChange from skeletons.gui.shared import IItemsCache _ACCEPTED_VEH_TYPES = (_VCN.LIGHT_TANK, _VCN.MEDIUM_TANK, _VCN.HEAVY_TANK) class _IntroViewVO(object): def __init__(self): self.__data = {'teamDescriptionText': '', 'isTeamDescriptionTooltip': False, 'teamDescriptionTooltip': '', 'createBtnLabel': '', 'createBtnTooltip': '', 'isCreateBtnEnabled': False, 'isCreateBtnVisible': False, 'isCanCreateBattle': False, 'isCanJoinBattle': False, 'isNeedAddPlayers': False, 'isHaveTeamToShow': False} def getData(self): return self.__data def showCreateButton(self, label, tooltip, enabled=True): self.__data['isCreateBtnVisible'] = True self.__data['isCreateBtnEnabled'] = enabled self.__data['createBtnLabel'] = label self.__data['createBtnTooltip'] = tooltip def moveToTheUnitByCreateButton(self): self.__data['isCanCreateBattle'] = self.__data['isCanJoinBattle'] = True def needAddPlayers(self): self.__data['isNeedAddPlayers'] = True def fillDefault(self): self.showCreateButton(_ms(CYBERSPORT.INTROVIEW_RIGHTBLOCK_BTNLABEL), '', enabled=True) class CyberSportIntroView(CyberSportIntroMeta): itemsCache = dependency.descriptor(IItemsCache) def showSelectorPopup(self): rosterSettings = self.prbEntity.getRosterSettings() self._currentVehCD = -1 self.fireEvent(events.LoadViewEvent(SFViewLoadParams(CYBER_SPORT_ALIASES.VEHICLE_SELECTOR_POPUP_PY), ctx={'isMultiSelect': False, 'infoText': CYBERSPORT.WINDOW_VEHICLESELECTOR_INFO_INTRO, 'titleText': CYBERSPORT.WINDOW_VEHICLESELECTOR_TITLE, 'selectButton': CYBERSPORT.WINDOW_VEHICLESELECTOR_BUTTONS_SELECT, 'cancelButton': CYBERSPORT.WINDOW_VEHICLESELECTOR_BUTTONS_CANCEL, 'compatibleOnlyLabel': CYBERSPORT.WINDOW_VEHICLESELECTOR_FILTERS_MATCHES, 'componentsOffset': 45, 'selectedVehicles': self.__getSelectedVehicles(), 'section': 'cs_intro_view_vehicle', 'levelsRange': rosterSettings.getLevelsRange(), 'vehicleTypes': _ACCEPTED_VEH_TYPES}), scope=EVENT_BUS_SCOPE.LOBBY) def _populate(self): super(CyberSportIntroView, self)._populate() self.addListener(CSVehicleSelectEvent.VEHICLE_SELECTED, self.__updateSelectedVehicles) self.itemsCache.onSyncCompleted += self.__onCacheResync data = {'titleLblText': text_styles.promoTitle(CYBERSPORT.WINDOW_INTRO_TITLE), 'descrLblText': text_styles.main(CYBERSPORT.WINDOW_INTRO_DESCRIPTION), 'listRoomTitleLblText': text_styles.promoSubTitle(CYBERSPORT.WINDOW_INTRO_SEARCH_TITLE), 'listRoomDescrLblText': text_styles.main(CYBERSPORT.WINDOW_INTRO_SEARCH_DESCRIPTION), 'listRoomBtnLabel': _ms(CYBERSPORT.WINDOW_INTRO_SEARCH_BTN), 'autoTitleLblText': text_styles.middleTitle(CYBERSPORT.WINDOW_INTRO_AUTO_TITLE), 'autoDescrLblText': text_styles.main(CYBERSPORT.WINDOW_INTRO_AUTO_DESCRIPTION), 'vehicleBtnTitleTfText': text_styles.standard(CYBERSPORT.BUTTON_CHOOSEVEHICLES_SELECTED), 'rightBlockHeader': text_styles.promoSubTitle(CYBERSPORT.INTROVIEW_RIGHTBLOCK_HEADER), 'rightBlockDescr': text_styles.main(CYBERSPORT.INTROVIEW_RIGHTBLOCK_DESCR), 'rightBlockBtnLbl': _ms(CYBERSPORT.INTROVIEW_RIGHTBLOCK_BTNLABEL)} self.as_setTextsS(data) self.__checkSelectedVehicles() self.__updateAutoSearchVehicle(self.__getSelectedVehicles()) def _dispose(self): self.removeListener(CSVehicleSelectEvent.VEHICLE_SELECTED, self.__updateSelectedVehicles) self.itemsCache.onSyncCompleted -= self.__onCacheResync g_clientUpdateManager.removeObjectCallbacks(self) super(CyberSportIntroView, self)._dispose() def __checkSelectedVehicles(self): vehsIntCD = self.__getSelectedVehicles() if vehsIntCD: vehIntCD = vehsIntCD[0] vehicle = self.itemsCache.items.getItemByCD(vehIntCD) if not vehicle.activeInNationGroup: vehIntCD = getValidVehicleCDForNationChange(vehIntCD) self.prbEntity.setSelectedVehicles(SELECTED_INTRO_VEHICLES_FIELD, [vehIntCD]) def __onCacheResync(self, reason, diff): if reason != CACHE_SYNC_REASON.CLIENT_UPDATE: return else: if diff is not None and GUI_ITEM_TYPE.VEHICLE in diff: vehDiff = diff[GUI_ITEM_TYPE.VEHICLE] for vehIntCD in vehDiff: vehicle = self.itemsCache.items.getItemByCD(vehIntCD) if not vehicle.activeInNationGroup and self._currentVehCD == vehIntCD: itemCD = iterVehTypeCDsInNationGroup(vehicle.intCD).next() self.__setSelectedVehicle(itemCD) return def __updateSelectedVehicles(self, event): if event.ctx: vehIntCD = int(event.ctx[0]) self.__setSelectedVehicle(vehIntCD) def __setSelectedVehicle(self, vehIntCD): self.prbEntity.setSelectedVehicles(SELECTED_INTRO_VEHICLES_FIELD, [vehIntCD]) self.__updateAutoSearchVehicle([vehIntCD]) def __updateAutoSearchVehicle(self, vehsIntCD): if vehsIntCD: vehIntCD = vehsIntCD[0] vehicle = self.itemsCache.items.getItemByCD(vehIntCD) levelsRange = self.prbEntity.getRosterSettings().getLevelsRange() if vehicle.level not in levelsRange: isReadyVehicle = False warnTooltip = TOOLTIPS.CYBERSPORT_INTRO_SELECTEDVEHICLEWARN_INCOMPATIBLELEVEL elif vehicle.type not in _ACCEPTED_VEH_TYPES: isReadyVehicle = False warnTooltip = TOOLTIPS.CYBERSPORT_INTRO_SELECTEDVEHICLEWARN_INCOMPATIBLETYPE elif vehicle.isOnlyForEpicBattles: isReadyVehicle = False warnTooltip = TOOLTIPS.CYBERSPORT_UNIT_FIGHTBTN_EVENTVEHICLEWRONGMODE else: warnTooltip, isReadyVehicle = '', vehicle.isReadyToPrebattle() self._currentVehCD = vehIntCD self.as_setSelectedVehicleS(makeIntroVehicleVO(vehicle, isReadyVehicle, warnTooltip)) else: self.as_setNoVehiclesS(TOOLTIPS.CYBERSPORT_NOVEHICLESINHANGAR) def __getSelectedVehicles(self): return self.prbEntity.getSelectedVehicles(SELECTED_INTRO_VEHICLES_FIELD)
6d244b2b6bd6efb7533b800737abe6293db299e6
9587c0df0e5b36e2a9cdad9a2666885efe812620
/test/test_geom.py
16380a9b36dd5ccfc17441bb23c319c5642416fd
[ "LicenseRef-scancode-unknown-license-reference", "BSD-2-Clause" ]
permissive
osmcode/pyosmium
c4d1b743824ea3bda8f3248d3e39a60b22c2940e
0459fb4126f51d215daca03620286c191676c0a1
refs/heads/master
2023-07-20T13:17:04.726937
2023-07-15T18:27:03
2023-07-15T18:27:03
24,344,280
277
63
BSD-2-Clause
2023-05-24T21:26:17
2014-09-22T20:25:42
Python
UTF-8
Python
false
false
4,646
py
test_geom.py
# SPDX-License-Identifier: BSD # # This file is part of Pyosmium. # # Copyright (C) 2022 Sarah Hoffmann. import json import pytest import osmium as o import osmium.geom wkbfab = o.geom.WKBFactory() @pytest.fixture def node_geom(test_data): def _run(factory, data='n1 x-23.3 y28.0'): geoms = [] def _mk_point(node): geoms.append(factory.create_point(node)) handler = o.make_simple_handler(node=_mk_point) handler.apply_file(test_data(data)) assert len(geoms) == 1 return geoms[0] return _run def test_wkb_create_node(node_geom): wkb = node_geom(o.geom.WKBFactory()) if wkb.startswith('01'): assert wkb.startswith('0101000000') else: assert wkb.startswith('00') def test_wkt_create_node(node_geom): wkt = node_geom(o.geom.WKTFactory()) assert wkt.startswith('POINT(') def test_geojson_create_node(node_geom): geom = node_geom(o.geom.GeoJSONFactory()) geom = json.loads(geom) assert geom['type'], 'Point' @pytest.fixture def way_geom(test_data): def _run(factory): opl = test_data(['n1 x0 y0', 'n2 x1 y0', 'n3 x0 y1', 'w1 Nn1,n2,n3']) geoms = [] def _mk_way(w): geoms.append(factory.create_linestring(w)) geoms.append(factory.create_linestring(w, direction=o.geom.direction.BACKWARD)) geoms.append(factory.create_linestring(w, use_nodes=o.geom.use_nodes.ALL)) handler = o.make_simple_handler(way=_mk_way) handler.apply_file(opl, locations=True) assert len(geoms) == 3 return geoms return _run def test_wkb_create_way(way_geom): wkbs = way_geom(o.geom.WKBFactory()) for wkb in wkbs: if wkb.startswith('01'): assert wkb.startswith('0102000000030'), "wkb: " + wkb else: assert wkb.startswith('00') def test_wkt_create_way(way_geom): wkts = way_geom(o.geom.WKTFactory()) assert all(wkt.startswith('LINESTRING(') for wkt in wkts) def test_geojson_create_way(way_geom): geoms = way_geom(o.geom.GeoJSONFactory()) assert all(json.loads(geom)['type'] == 'LineString' for geom in geoms) @pytest.fixture def area_geom(test_data): def _run(factory): opl = test_data(['n1 x0 y0', 'n2 x1 y0', 'n3 x0 y1', 'w23 Nn1,n2,n3,n1 Tarea=yes']) geoms = [] def _mk_area(a): geoms.append(factory.create_multipolygon(a)) handler = o.make_simple_handler(area=_mk_area) handler.apply_file(opl, locations=True) assert len(geoms) == 1 return geoms[0] return _run def test_wkb_create_poly(area_geom): wkb = area_geom(o.geom.WKBFactory()) if wkb.startswith('01'): assert wkb.startswith('010600000001'), "wkb: " + wkb else: assert wkb.startswith('00') def test_wkt_create_poly(area_geom): wkt = area_geom(o.geom.WKTFactory()) assert wkt.startswith('MULTIPOLYGON(') def test_geojson_create_poly(area_geom): geom = area_geom(o.geom.GeoJSONFactory()) geom = json.loads(geom) assert geom['type'] == 'MultiPolygon' def test_lonlat_to_mercator(): c = o.geom.lonlat_to_mercator(o.geom.Coordinates(3.4,-7.3)) assert c.x == pytest.approx(378486.2686971) assert c.y == pytest.approx(-814839.8325696) def test_mercator_lonlat(): c = o.geom.mercator_to_lonlat(o.geom.Coordinates(0.03,10.2)) assert c.x == pytest.approx(0.00000026, rel=1e-1) assert c.y == pytest.approx(0.00009162, rel=1e-1) def test_coordinate_from_location(): c = o.geom.Coordinates(o.osm.Location(10.0, -3.0)) assert c.x == pytest.approx(10.0) assert c.y == pytest.approx(-3.0) def test_haversine(): data = ['n1 x0 y0', 'n2 x1 y0', 'n3 x0 y1', 'w1 Nn1,n2,n3'] results = [] def call_haversine(w): results.append(o.geom.haversine_distance(w.nodes)) handler = o.make_simple_handler(way=call_haversine) handler.apply_buffer('\n'.join(data).encode('utf-8'), 'opl', locations=True) assert 1 == len(results) assert 268520 == pytest.approx(results[0]) def test_haversine_invalid_object(): data = ['n1 x0 y0', 'n2 x1 y0', 'n3 x0 y1', 'w1 Nn1,n2,n3'] results = [] def call_haversine(w): print('AAAA') results.append(w.nodes) handler = o.make_simple_handler(way=call_haversine) handler.apply_buffer('\n'.join(data).encode('utf-8'), 'opl', locations=True) assert results with pytest.raises(RuntimeError, match="removed OSM object"): o.geom.haversine_distance(results[0])
d9c38757b2bde606fbe58b89957da7d658a5fe0b
82b05fc158acbb10263a9e2415caf31ed4ea1ff4
/graphbrain/parsers/nlp.py
2c24f40b22641bffffffb3ffb0588790fddf6128
[ "MIT" ]
permissive
graphbrain/graphbrain
e655de5c9f7d755b7a34649a461762d7def501ff
8cb019eeea4bfba036f66ca742f1b4c3fc2c9c6a
refs/heads/master
2023-09-04T04:07:04.985162
2023-07-19T12:41:20
2023-07-19T12:41:20
51,751,006
534
60
MIT
2023-03-10T21:32:47
2016-02-15T11:25:11
Python
UTF-8
Python
false
false
1,254
py
nlp.py
from collections import OrderedDict from asciitree import LeftAligned from termcolor import colored def with_color(text, color, colors=True): if colors: return colored(text, color) else: return text def token2str(token, colors=False): word = with_color(token.lower_.strip(), 'cyan', colors=colors) lemma = token.lemma_.strip() tag = with_color(token.tag_, 'green', colors=colors) dep = with_color(token.dep_, 'yellow', colors=colors) named_entity = token.ent_type_ if named_entity != '': named_entity = with_color('{{{}}}'.format(named_entity), 'magenta', colors=colors) return '{}/{}/{} ({}) {}'.format(word, lemma, tag, dep, named_entity) def _token2label_tree(token, prefix='*', colors=True): children = [_token2label_tree(leaf, '<') for leaf in token.lefts] +\ [_token2label_tree(leaf, '>') for leaf in token.rights] label = '{} {}'.format(with_color(prefix, 'red', colors=colors), token2str(token)) return label, OrderedDict(children) def print_tree(token, colors=True): label, children = _token2label_tree(token, colors=colors) tr = LeftAligned() print(tr({label: children}))
18c6a236495811516575c4b8df4ffaefc1cb10d8
5234f553b85839546b50c0575e84c2e451c8adcf
/src/robot_upstart/job.py
895525cb86c6902cfe028abd3f8afdb64d90b6df
[ "BSD-3-Clause" ]
permissive
clearpathrobotics/robot_upstart
bb766eec7f26415bb0781ad9b290d40f5709e03c
987376f5c49864e9e250e5bb5b88ae13e0ed6973
refs/heads/noetic-devel
2023-07-24T20:27:05.055264
2022-02-16T13:47:22
2022-02-16T13:47:22
12,384,688
196
103
BSD-3-Clause
2023-07-14T14:33:09
2013-08-26T17:01:19
Python
UTF-8
Python
false
false
9,757
py
job.py
# Software License Agreement (BSD) # # @author Mike Purvis <mpurvis@clearpathrobotics.com> # @copyright (c) 2015, Clearpath Robotics, Inc., All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that # the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the # following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the # following disclaimer in the documentation and/or other materials provided with the distribution. # * Neither the name of Clearpath Robotics nor the names of its contributors may be used to endorse or # promote products derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED # WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED # TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ This file defines the Job class, which is the primary code API to robot_upstart. """ import getpass import os import json import subprocess from glob import glob as glob_files from catkin.find_in_workspaces import find_in_workspaces from . import providers class Job(object): """ Represents a ROS configuration to launch on machine startup. """ def __init__(self, name="ros", interface=None, user=None, workspace_setup=None, rosdistro=None, master_uri=None, log_path=None, systemd_after=None): """Construct a new Job definition. :param name: Name of job to create. Defaults to "ros", but you might prefer to use the name of your platform. :type name: str :param interface: Network interface to bring ROS up with. If specified, the job will come up with that network interface, and ROS_IP will be set to that interface's IP address. If unspecified, the job will come up on system startup, and ROS_HOSTNAME will be set to the system's hostname. :type interface: str :param user: Unprivileged user to launch the job as. Defaults to the user creating the job. :type user: str :param workspace_setup: Location of the workspace setup file to source for the job's ROS context. Defaults to the current workspace. :type workspace_setup: str :param rosdistro: rosdistro to use for the /etc/ros/DISTRO path. Defaults to $ROS_DISTRO from the current environment. :type rosdistro: str :param master_uri: For systems with multiple computers, you may want this job to launch with ROS_MASTER_URI pointing to another machine. :type master_uri: str :param log_path: The location to set ROS_LOG_DIR to. If changed from the default of using /tmp, it is the user's responsibility to manage log rotation. :type log_path: str """ self.name = name self.interface = interface # Fall back on current user as the user to run ROS as. self.user = user or getpass.getuser() # Fall back on current workspace setup file if not explicitly specified. self.workspace_setup = workspace_setup or \ os.environ['CMAKE_PREFIX_PATH'].split(':')[0] + '/setup.bash' # Fall back on current distro if not otherwise specified. self.rosdistro = rosdistro or os.environ['ROS_DISTRO'] self.master_uri = master_uri or "http://127.0.0.1:11311" self.log_path = log_path or "/tmp" # Override this to false if you want to bypass generating the # upstart conf file. self.generate_system_files = True # Override this to True if you want to create symbolic link for # job launch files instead of copying them. self.symlink = False # Override this to True is you want the --wait flag passed to roslaunch. # This will be desired if the nodes spawned by this job are intended to # connect to an existing master. self.roslaunch_wait = False # Set the string of the "After=" section # of the generated Systemd service file self.systemd_after = systemd_after or "network.target" # Set of files to be installed for the job. This is only launchers # and other user-specified configs--- nothing related to the system # startup job itself. List of strs. self.files = [] def add(self, package=None, filename=None, glob=None): """ Add launch or other configuration files to Job. Files may be specified using relative, absolute, or package-relative paths. Files may also be specified using shell globs. :param package: Optionally specify a package to search for the file or glob relative-to. :type package: str :param filename: Name of a file to add to the job. Relative to the package path, if specified. :type filename: str :param glob: Shell glob of files to add to the job. Relative to the package path, if specified. :type glob: str """ if package: search_paths = reversed(find_in_workspaces(project=package)) else: search_paths = ('.', ) if glob and filename: raise RuntimeError("You must specify only an exact filename or a glob, not both.") # See: https://docs.python.org/2/library/os.html#os.getlogin if filename: for path in search_paths: candidate = os.path.join(path, filename) if os.path.isfile(candidate): self.files.append(candidate) if glob: for path in search_paths: self.files.extend(glob_files(os.path.join(path, glob))) def install(self, root="/", sudo="/usr/bin/sudo", Provider=None): """ Install the job definition to the system. :param root: Override the root to install to, useful for testing. :type root: str :param sudo: Override which sudo is used, useful for testing or for making it use gksudo instead. :type sudo: str :param provider: Override to use your own generator function for the system file preparation. :type provider: Provider """ # This is a recipe of files and their contents which is pickled up and # passed to a sudo process so that it can create the actual files, # without needing a ROS workspace or any other environmental setup. if Provider is None: Provider = providers.detect_provider() p = Provider(root, self) installation_files = p.generate_install() print("Preparing to install files to the following paths:") for filename in sorted(installation_files.keys()): print(" %s" % filename) self._call_mutate(sudo, installation_files) p.post_install() def uninstall(self, root="/", sudo="/usr/bin/sudo", Provider=None): """ Uninstall the job definition from the system. :param root: Override the root to uninstall from, useful for testing. :type root: str :param sudo: Override which sudo is used, useful for testing or for making it use gksudo instead. :type sudo: str :param provider: Override to use your own generator function for the system file preparation. :type provider: Provider """ if Provider is None: Provider = providers.detect_provider() p = Provider(root, self) installation_files = p.generate_uninstall() if len(installation_files) == 0: print("Job not found, nothing to remove.") else: print("Preparing to remove the following paths:") for filename in sorted(installation_files.keys()): print(" %s" % filename) self._call_mutate(sudo, installation_files) def _call_mutate(self, sudo, installation_files): try: # Installed script location mutate_files_exec = find_in_workspaces( project="robot_upstart", path="mutate_files", first_match_only=True)[0] except IndexError: # Devel script location mutate_files_exec = find_in_workspaces( project="robot_upstart", path="scripts/mutate_files", first_match_only=True)[0] # If sudo is specified, then the user will be prompted at this point. cmd = [mutate_files_exec] if sudo: cmd.insert(0, sudo) print("Now calling: %s" % ' '.join(cmd)) # changed to use json, as pickle gives 0-bytes error p = subprocess.Popen(cmd + [json.dumps(installation_files)]) p.communicate() if p.returncode == 0: print("Filesystem operation succeeded.") else: print("Error encountered; filesystem operation aborted.") return p.returncode
cbb6cdd46a3fb28bdf4847bc36dde8fde09620bc
15f0514701a78e12750f68ba09d68095172493ee
/Python3/479.py
94cf168e998958b68d5af4fea151144080bdc38b
[ "MIT" ]
permissive
strengthen/LeetCode
5e38c8c9d3e8f27109b9124ae17ef8a4139a1518
3ffa6dcbeb787a6128641402081a4ff70093bb61
refs/heads/master
2022-12-04T21:35:17.872212
2022-11-30T06:23:24
2022-11-30T06:23:24
155,958,163
936
365
MIT
2021-11-15T04:02:45
2018-11-03T06:47:38
null
UTF-8
Python
false
false
1,665
py
479.py
__________________________________________________________________________________________________ sample 36 ms submission class Solution: def largestPalindrome(self, n: int) -> int: if n == 1: return 9 k = 1 base = 10**n while True: upper = base - 2 * k lower = int(str(upper)[::-1]) if k**2 - lower >= 0: sqrt_root = int(math.sqrt(k**2 - lower)) if sqrt_root * sqrt_root == k**2 - lower: i = (k + sqrt_root) % 1337 j = (2 * k - i) % 1337 lower_part = (i * j) % 1337 base %= 1337 upper_part = (base - (i + j)) % 1337 return ((upper_part * base) % 1337 + lower_part) % 1337 k += 1 __________________________________________________________________________________________________ sample 13028 kb submission class Solution: def largestPalindrome(self, n): """ :type n: int :rtype: int """ if n == 1: return 9 for z in range(2, 2 * (9 * 10**n) - 1): left = 10**n - z right = int(str(left)[::-1]) root_1, root_2 = 0, 0 if z**2 - 4*right < 0: continue else: root_1 = 1/2 * (z + (z**2-4*right)**0.5) root_2 = 1/2 * (z - (z**2-4*right)**0.5) if root_1.is_integer() or root_2.is_integer(): return (10**n*left+right) %1337 __________________________________________________________________________________________________
bcebd41ac32649b3da38c93ef264bf3472c0b6d7
2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5
/tools/mo/openvino/tools/mo/front/tf/SwitchMergeOptimization.py
9a5fb162d20b1c6a4956a7bcfb2d359578718cba
[ "Apache-2.0" ]
permissive
openvinotoolkit/openvino
38ea745a247887a4e14580dbc9fc68005e2149f9
e4bed7a31c9f00d8afbfcabee3f64f55496ae56a
refs/heads/master
2023-08-18T03:47:44.572979
2023-08-17T21:24:59
2023-08-17T21:24:59
153,097,643
3,953
1,492
Apache-2.0
2023-09-14T21:42:24
2018-10-15T10:54:40
C++
UTF-8
Python
false
false
4,651
py
SwitchMergeOptimization.py
# Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.tools.mo.ops.select import Select from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph from openvino.tools.mo.graph.graph import Graph class SwitchMergeOptimization(FrontReplacementSubgraph): """ Optimization for case, when combination of Switches have one common condition and can be expressed as Select node. This transformation matches too big number of instances for models with many BatchNorm layers with the same input from the model input data node with training/inference flag. So the transformation is implemented as a simple graph traversal instead of regular pattern-based approach. The following pattern is checked: nodes=[('Merge', dict(kind='op', op='Merge')), ('Switch_2_input', dict(kind='data')), ('Switch_2', dict(kind='op', op='Switch')), ('Switch_2_data', dict(kind='data')), ('op', dict(kind='op')), ('op_data', dict(kind='data')), ('Switch', dict(kind='op', op='Switch')), ('Switch_data', dict(kind='data')), ('Switch_1', dict(kind='op', op='Switch')), ('Switch_1_data', dict(kind='data')), ('cond_data', dict(kind='data')), ('identity', dict(kind='op', op='Identity')), ('identity_data', dict(kind='data')), ], edges=[ ('Switch_2_input', 'Switch_2', {'in': 0}), ('Switch_2', 'Switch_2_data', {'out': 1}), ('Switch_2_data', 'Merge'), ('cond_data', 'Switch_2', {'in': 1}), ('cond_data', 'Switch_1', {'in': 1}), ('cond_data', 'Switch', {'in': 1}), ('Switch_1', 'Switch_1_data', {'out': 0}), ('Switch', 'Switch_data', {'out': 0}), ('Switch_1_data', 'op', {'in': 1}), ('Switch_data', 'op', {'in': 0}), ('op', 'op_data'), ('op_data', 'identity'), ('identity', 'identity_data'), ('identity_data', 'Merge'), ], """ enabled = True def find_and_replace_pattern(self, graph: Graph): for merge in graph.get_op_nodes(op='Merge'): for merge_switch_in_port in range(2): if merge.in_port(merge_switch_in_port).disconnected() or \ merge.in_port(merge_switch_in_port).get_source().node.op != 'Switch': continue switch_2 = merge.in_port(merge_switch_in_port).get_source().node if merge.in_port(1 - merge_switch_in_port).disconnected() or \ merge.in_port(1 - merge_switch_in_port).get_source().node.op != 'Identity': continue false_value_port = merge.in_port(1 - merge_switch_in_port).get_source() true_value_port = switch_2.in_port(0).get_source() op = false_value_port.node.in_port(0).get_source().node if op.in_port(0).disconnected() or op.in_port(0).get_source().node.op != 'Switch': continue switch = op.in_port(0).get_source().node if op.in_port(1).disconnected() or op.in_port(1).get_source().node.op != 'Switch': continue switch_1 = op.in_port(1).get_source().node if switch.in_port(1).get_source() == switch_1.in_port(1).get_source() and \ switch.in_port(1).get_source() == switch_2.in_port(1).get_source(): select = Select(graph, dict(name=merge.soft_get('name') + '/Select/', format='tf')).create_node() select.in_port(0).connect(switch.in_port(1).get_source()) select.in_port(1).connect(true_value_port) select.in_port(2).connect(false_value_port) merge.out_port(0).get_connection().set_source(select.out_port(0)) assert 1 in op.in_ports() and 0 in op.in_ports() op.in_port(0).disconnect() op.in_port(1).disconnect() switch.in_port(0).get_connection().set_destination(op.in_port(0)) switch_1.in_port(0).get_connection().set_destination(op.in_port(1)) graph.remove_nodes_from(nodes=[switch_1.id, switch.id, switch_2.id, merge.id]) # need to exit from the inner for loop because the Merge op has been removed break
4572d37f4186659b534e4fdf97eaf77d4e108acb
6a7005ca7e418a18cbfeec296129873aef6446a4
/examples/DecryptLoginExamples/crawlers/neteaseclickplaylist/__init__.py
d5dcf4c278bbe7b71c2ff777c65478e1d6ab0819
[ "Apache-2.0" ]
permissive
CharlesPikachu/DecryptLogin
f0646d37e8604fb9c41dc74c17c0ea48cb5066ec
bb4228c0535ffd7060b7816cbd1da51ba8d95ab8
refs/heads/master
2023-05-22T15:21:59.038844
2022-08-29T08:59:05
2022-08-29T08:59:05
172,416,496
2,871
809
Apache-2.0
2022-10-06T14:58:49
2019-02-25T01:57:20
Python
UTF-8
Python
false
false
71
py
__init__.py
'''initialize''' from .neteaseclickplaylist import NeteaseClickPlaylist
6787924cb69830635a205d8f9658b12ba2a1e1ec
7343ece3b82ac87a594865c4074623b45b0297b4
/tests/handlers/test_room_member.py
3e28117e2c0f058fe8920c65125caef1e7f5f2cb
[ "Apache-2.0" ]
permissive
matrix-org/synapse
a00111f83310783b78e2996557f8bbae4d9fb229
d35bed8369514fe727b4fe1afb68f48cc8b2655a
refs/heads/develop
2023-09-05T05:24:20.808942
2023-09-04T16:14:09
2023-09-04T16:14:09
22,844,864
12,215
2,869
Apache-2.0
2023-09-14T15:20:48
2014-08-11T15:51:42
Python
UTF-8
Python
false
false
16,212
py
test_room_member.py
from unittest.mock import AsyncMock, patch from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin import synapse.rest.client.login import synapse.rest.client.room from synapse.api.constants import EventTypes, Membership from synapse.api.errors import LimitExceededError, SynapseError from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.events import FrozenEventV3 from synapse.federation.federation_client import SendJoinResult from synapse.server import HomeServer from synapse.types import UserID, create_requester from synapse.util import Clock from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.server import make_request from tests.unittest import ( FederatingHomeserverTestCase, HomeserverTestCase, override_config, ) class TestJoinsLimitedByPerRoomRateLimiter(FederatingHomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, synapse.rest.client.login.register_servlets, synapse.rest.client.room.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.handler = hs.get_room_member_handler() # Create three users. self.alice = self.register_user("alice", "pass") self.alice_token = self.login("alice", "pass") self.bob = self.register_user("bob", "pass") self.bob_token = self.login("bob", "pass") self.chris = self.register_user("chris", "pass") self.chris_token = self.login("chris", "pass") # Create a room on this homeserver. Note that this counts as a join: it # contributes to the rate limter's count of actions self.room_id = self.helper.create_room_as(self.alice, tok=self.alice_token) self.intially_unjoined_room_id = f"!example:{self.OTHER_SERVER_NAME}" @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 2}}) def test_local_user_local_joins_contribute_to_limit_and_are_limited(self) -> None: # The rate limiter has accumulated one token from Alice's join after the create # event. # Try joining the room as Bob. self.get_success( self.handler.update_membership( requester=create_requester(self.bob), target=UserID.from_string(self.bob), room_id=self.room_id, action=Membership.JOIN, ) ) # The rate limiter bucket is full. A second join should be denied. self.get_failure( self.handler.update_membership( requester=create_requester(self.chris), target=UserID.from_string(self.chris), room_id=self.room_id, action=Membership.JOIN, ), LimitExceededError, ) @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 2}}) def test_local_user_profile_edits_dont_contribute_to_limit(self) -> None: # The rate limiter has accumulated one token from Alice's join after the create # event. Alice should still be able to change her displayname. self.get_success( self.handler.update_membership( requester=create_requester(self.alice), target=UserID.from_string(self.alice), room_id=self.room_id, action=Membership.JOIN, content={"displayname": "Alice Cooper"}, ) ) # Still room in the limiter bucket. Chris's join should be accepted. self.get_success( self.handler.update_membership( requester=create_requester(self.chris), target=UserID.from_string(self.chris), room_id=self.room_id, action=Membership.JOIN, ) ) @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 1}}) def test_remote_joins_contribute_to_rate_limit(self) -> None: # Join once, to fill the rate limiter bucket. # # To do this we have to mock the responses from the remote homeserver. # We also patch out a bunch of event checks on our end. All we're really # trying to check here is that remote joins will bump the rate limter when # they are persisted. create_event_source = { "auth_events": [], "content": { "creator": f"@creator:{self.OTHER_SERVER_NAME}", "room_version": self.hs.config.server.default_room_version.identifier, }, "depth": 0, "origin_server_ts": 0, "prev_events": [], "room_id": self.intially_unjoined_room_id, "sender": f"@creator:{self.OTHER_SERVER_NAME}", "state_key": "", "type": EventTypes.Create, } self.add_hashes_and_signatures_from_other_server( create_event_source, self.hs.config.server.default_room_version, ) create_event = FrozenEventV3( create_event_source, self.hs.config.server.default_room_version, {}, None, ) join_event_source = { "auth_events": [create_event.event_id], "content": {"membership": "join"}, "depth": 1, "origin_server_ts": 100, "prev_events": [create_event.event_id], "sender": self.bob, "state_key": self.bob, "room_id": self.intially_unjoined_room_id, "type": EventTypes.Member, } add_hashes_and_signatures( self.hs.config.server.default_room_version, join_event_source, self.hs.hostname, self.hs.signing_key, ) join_event = FrozenEventV3( join_event_source, self.hs.config.server.default_room_version, {}, None, ) mock_make_membership_event = AsyncMock( return_value=( self.OTHER_SERVER_NAME, join_event, self.hs.config.server.default_room_version, ) ) mock_send_join = AsyncMock( return_value=SendJoinResult( join_event, self.OTHER_SERVER_NAME, state=[create_event], auth_chain=[create_event], partial_state=False, servers_in_room=frozenset(), ) ) with patch.object( self.handler.federation_handler.federation_client, "make_membership_event", mock_make_membership_event, ), patch.object( self.handler.federation_handler.federation_client, "send_join", mock_send_join, ), patch( "synapse.event_auth._is_membership_change_allowed", return_value=None, ), patch( "synapse.handlers.federation_event.check_state_dependent_auth_rules", return_value=None, ): self.get_success( self.handler.update_membership( requester=create_requester(self.bob), target=UserID.from_string(self.bob), room_id=self.intially_unjoined_room_id, action=Membership.JOIN, remote_room_hosts=[self.OTHER_SERVER_NAME], ) ) # Try to join as Chris. Should get denied. self.get_failure( self.handler.update_membership( requester=create_requester(self.chris), target=UserID.from_string(self.chris), room_id=self.intially_unjoined_room_id, action=Membership.JOIN, remote_room_hosts=[self.OTHER_SERVER_NAME], ), LimitExceededError, ) # TODO: test that remote joins to a room are rate limited. # Could do this by setting the burst count to 1, then: # - remote-joining a room # - immediately leaving # - trying to remote-join again. class TestReplicatedJoinsLimitedByPerRoomRateLimiter(BaseMultiWorkerStreamTestCase): servlets = [ synapse.rest.admin.register_servlets, synapse.rest.client.login.register_servlets, synapse.rest.client.room.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.handler = hs.get_room_member_handler() # Create three users. self.alice = self.register_user("alice", "pass") self.alice_token = self.login("alice", "pass") self.bob = self.register_user("bob", "pass") self.bob_token = self.login("bob", "pass") self.chris = self.register_user("chris", "pass") self.chris_token = self.login("chris", "pass") # Create a room on this homeserver. # Note that this counts as a self.room_id = self.helper.create_room_as(self.alice, tok=self.alice_token) self.intially_unjoined_room_id = "!example:otherhs" @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 2}}) def test_local_users_joining_on_another_worker_contribute_to_rate_limit( self, ) -> None: # The rate limiter has accumulated one token from Alice's join after the create # event. self.replicate() # Spawn another worker and have bob join via it. worker_app = self.make_worker_hs( "synapse.app.generic_worker", extra_config={"worker_name": "other worker"} ) worker_site = self._hs_to_site[worker_app] channel = make_request( self.reactor, worker_site, "POST", f"/_matrix/client/v3/rooms/{self.room_id}/join", access_token=self.bob_token, ) self.assertEqual(channel.code, 200, channel.json_body) # wait for join to arrive over replication self.replicate() # Try to join as Chris on the worker. Should get denied because Alice # and Bob have both joined the room. self.get_failure( worker_app.get_room_member_handler().update_membership( requester=create_requester(self.chris), target=UserID.from_string(self.chris), room_id=self.room_id, action=Membership.JOIN, ), LimitExceededError, ) # Try to join as Chris on the original worker. Should get denied because Alice # and Bob have both joined the room. self.get_failure( self.handler.update_membership( requester=create_requester(self.chris), target=UserID.from_string(self.chris), room_id=self.room_id, action=Membership.JOIN, ), LimitExceededError, ) class RoomMemberMasterHandlerTestCase(HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, synapse.rest.client.login.register_servlets, synapse.rest.client.room.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.handler = hs.get_room_member_handler() self.store = hs.get_datastores().main # Create two users. self.alice = self.register_user("alice", "pass") self.alice_ID = UserID.from_string(self.alice) self.alice_token = self.login("alice", "pass") self.bob = self.register_user("bob", "pass") self.bob_ID = UserID.from_string(self.bob) self.bob_token = self.login("bob", "pass") # Create a room on this homeserver. self.room_id = self.helper.create_room_as(self.alice, tok=self.alice_token) def test_leave_and_forget(self) -> None: """Tests that forget a room is successfully. The test is performed with two users, as forgetting by the last user respectively after all users had left the is a special edge case.""" self.helper.join(self.room_id, user=self.bob, tok=self.bob_token) # alice is not the last room member that leaves and forgets the room self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token) self.get_success(self.handler.forget(self.alice_ID, self.room_id)) self.assertTrue( self.get_success(self.store.did_forget(self.alice, self.room_id)) ) # the server has not forgotten the room self.assertFalse( self.get_success(self.store.is_locally_forgotten_room(self.room_id)) ) def test_leave_and_unforget(self) -> None: """Tests if rejoining a room unforgets the room, so that it shows up in sync again.""" self.helper.join(self.room_id, user=self.bob, tok=self.bob_token) # alice is not the last room member that leaves and forgets the room self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token) self.get_success(self.handler.forget(self.alice_ID, self.room_id)) self.assertTrue( self.get_success(self.store.did_forget(self.alice, self.room_id)) ) self.helper.join(self.room_id, user=self.alice, tok=self.alice_token) self.assertFalse( self.get_success(self.store.did_forget(self.alice, self.room_id)) ) # the server has not forgotten the room self.assertFalse( self.get_success(self.store.is_locally_forgotten_room(self.room_id)) ) @override_config({"forget_rooms_on_leave": True}) def test_leave_and_auto_forget(self) -> None: """Tests the `forget_rooms_on_leave` config option.""" self.helper.join(self.room_id, user=self.bob, tok=self.bob_token) # alice is not the last room member that leaves and forgets the room self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token) self.assertTrue( self.get_success(self.store.did_forget(self.alice, self.room_id)) ) def test_leave_and_forget_last_user(self) -> None: """Tests that forget a room is successfully when the last user has left the room.""" # alice is the last room member that leaves and forgets the room self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token) self.get_success(self.handler.forget(self.alice_ID, self.room_id)) self.assertTrue( self.get_success(self.store.did_forget(self.alice, self.room_id)) ) # the server has forgotten the room self.assertTrue( self.get_success(self.store.is_locally_forgotten_room(self.room_id)) ) def test_forget_when_not_left(self) -> None: """Tests that a user cannot not forgets a room that has not left.""" self.get_failure(self.handler.forget(self.alice_ID, self.room_id), SynapseError) def test_rejoin_forgotten_by_user(self) -> None: """Test that a user that has forgotten a room can do a re-join. The room was not forgotten from the local server. One local user is still member of the room.""" self.helper.join(self.room_id, user=self.bob, tok=self.bob_token) self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token) self.get_success(self.handler.forget(self.alice_ID, self.room_id)) self.assertTrue( self.get_success(self.store.did_forget(self.alice, self.room_id)) ) # the server has not forgotten the room self.assertFalse( self.get_success(self.store.is_locally_forgotten_room(self.room_id)) ) self.helper.join(self.room_id, user=self.alice, tok=self.alice_token) # TODO: A join to a room does not invalidate the forgotten cache # see https://github.com/matrix-org/synapse/issues/13262 self.store.did_forget.invalidate_all() self.assertFalse( self.get_success(self.store.did_forget(self.alice, self.room_id)) )
5d75add79bd39c185638c64158da758bc22458e3
27b86f422246a78704e0e84983b2630533a47db6
/src/ezdxf/render/leader.py
0bc9a9a8e756319de1dae5a63882e5e9dcff0404
[ "MIT" ]
permissive
mozman/ezdxf
7512decd600896960660f0f580cab815bf0d7a51
ba6ab0264dcb6833173042a37b1b5ae878d75113
refs/heads/master
2023-09-01T11:55:13.462105
2023-08-15T11:50:05
2023-08-15T12:00:04
79,697,117
750
194
MIT
2023-09-14T09:40:41
2017-01-22T05:55:55
Python
UTF-8
Python
false
false
4,602
py
leader.py
# Copyright (c) 2020-2023, Manfred Moitzi # License: MIT License from __future__ import annotations from typing import TYPE_CHECKING, Iterator, cast from ezdxf import ARROWS from ezdxf.entities import factory from ezdxf.lldxf.const import BYBLOCK from ezdxf.math import Vec3, fit_points_to_cad_cv if TYPE_CHECKING: from ezdxf.entities import DXFGraphic, Leader, Insert, Spline, Dimension, Line def virtual_entities(leader: Leader) -> Iterator[DXFGraphic]: # Source: https://atlight.github.io/formats/dxf-leader.html # GDAL: DXF LEADER implementation: # https://github.com/OSGeo/gdal/blob/master/gdal/ogr/ogrsf_frmts/dxf/ogrdxf_leader.cpp # LEADER DXF Reference: # http://help.autodesk.com/view/OARX/2018/ENU/?guid=GUID-396B2369-F89F-47D7-8223-8B7FB794F9F3 assert leader.dxftype() == "LEADER" vertices = Vec3.list(leader.vertices) # WCS if len(vertices) < 2: # This LEADER entities should be removed by the auditor if loaded or # ignored at exporting, if created by an ezdxf-user (log). raise ValueError("More than 1 vertex required.") dxf = leader.dxf doc = leader.doc # Some default values depend on the measurement system # 0/1 = imperial/metric if doc: measurement = doc.header.get("$MEASUREMENT", 0) else: measurement = 0 # Set default styling attributes values: dimtad = 1 dimgap = 0.625 if measurement else 0.0625 dimscale = 1.0 dimclrd = dxf.color dimltype = dxf.linetype dimlwd = dxf.lineweight override = None if doc: # get styling attributes from associated DIMSTYLE and/or XDATA override override = leader.override() dimtad = override.get("dimtad", dimtad) dimgap = override.get("dimgap", dimgap) dimscale = override.get("dimscale", dimscale) if dimscale == 0.0: # special but unknown meaning dimscale = 1.0 dimclrd = override.get("dimclrd", dimclrd) dimltype = override.get("dimltype", dimltype) dimlwd = override.get("dimlwd", dimlwd) text_width = dxf.text_width hook_line_vector = Vec3(dxf.horizontal_direction) has_text_annotation = dxf.annotation_type == 0 if has_text_annotation and dxf.has_hookline: if dxf.hookline_direction == 1: hook_line_vector = -hook_line_vector if dimtad != 0 and text_width > 0: hook_line = hook_line_vector * (dimgap * dimscale + text_width) vertices.append(vertices[-1] + hook_line) dxfattribs = leader.graphic_properties() dxfattribs["color"] = dimclrd dxfattribs["linetype"] = dimltype dxfattribs["lineweight"] = dimlwd if dxfattribs.get("color") == BYBLOCK: dxfattribs["color"] = dxf.block_color if dxf.path_type == 1: # Spline start_tangent = vertices[1] - vertices[0] end_tangent = vertices[-1] - vertices[-2] bspline = fit_points_to_cad_cv(vertices, tangents=[start_tangent, end_tangent]) spline = cast("Spline", factory.new("SPLINE", doc=doc)) spline.apply_construction_tool(bspline) yield spline else: attribs = dict(dxfattribs) prev = vertices[0] for vertex in vertices[1:]: attribs["start"] = prev attribs["end"] = vertex yield cast( "Line", factory.new(dxftype="LINE", dxfattribs=attribs, doc=doc), ) prev = vertex if dxf.has_arrowhead and override: arrow_name = override.get("dimldrblk", "") if arrow_name is None: return size = override.get("dimasz", 2.5 if measurement else 0.1875) * dimscale rotation = (vertices[0] - vertices[1]).angle_deg if doc and arrow_name in doc.blocks: dxfattribs.update( { "name": arrow_name, "insert": vertices[0], "rotation": rotation, "xscale": size, "yscale": size, "zscale": size, } ) # create a virtual block reference insert = cast( "Insert", factory.new("INSERT", dxfattribs=dxfattribs, doc=doc) ) yield from insert.virtual_entities() else: # render standard arrows yield from ARROWS.virtual_entities( name=arrow_name, insert=vertices[0], size=size, rotation=rotation, dxfattribs=dxfattribs, )
076fac2ff991e35b02b24a1367b1df91d96e56cc
dfa29402bb0add9560c76c75d72f1e3adbbb6662
/tests/test_an.py
7102c30766bce59cbce09aaa120f2bb5a1e5b841
[ "MIT" ]
permissive
jaraco/inflect
ed833aef3883739b230c7183575454c1e3776bd7
1a2ac489613145e26032b8831d55ba3188893208
refs/heads/main
2023-08-31T22:23:19.830839
2023-08-21T00:44:58
2023-08-21T00:44:58
730,520
397
56
MIT
2023-08-31T16:59:08
2010-06-20T13:43:13
Python
UTF-8
Python
false
false
1,018
py
test_an.py
import inflect def test_an(): p = inflect.engine() assert p.an("cat") == "a cat" assert p.an("ant") == "an ant" assert p.an("a") == "an a" assert p.an("b") == "a b" assert p.an("honest cat") == "an honest cat" assert p.an("dishonest cat") == "a dishonest cat" assert p.an("Honolulu sunset") == "a Honolulu sunset" assert p.an("mpeg") == "an mpeg" assert p.an("onetime holiday") == "a onetime holiday" assert p.an("Ugandan person") == "a Ugandan person" assert p.an("Ukrainian person") == "a Ukrainian person" assert p.an("Unabomber") == "a Unabomber" assert p.an("unanimous decision") == "a unanimous decision" assert p.an("US farmer") == "a US farmer" assert p.an("wild PIKACHU appeared") == "a wild PIKACHU appeared" def test_an_abbreviation(): p = inflect.engine() assert p.an("YAML code block") == "a YAML code block" assert p.an("Core ML function") == "a Core ML function" assert p.an("JSON code block") == "a JSON code block"
783cd9e7e56b287f2221530e408256220fc28334
cb9d0e383eecb4dc8a1234b35300b84bcd958210
/backend/lambdas/tasks/orchestrate_ecs_service_scaling.py
b2e986b552423f5cea8a8da3affe6c4f479497d3
[ "Apache-2.0" ]
permissive
awslabs/amazon-s3-find-and-forget
e7a699ec333ba4120bb22f61a3ec0bd4e7a7f443
283429bd01db843fb8e207122729a97169ee752f
refs/heads/master
2023-08-02T16:45:39.658385
2023-08-02T12:51:34
2023-08-02T12:51:34
238,810,568
212
32
Apache-2.0
2023-08-02T12:51:35
2020-02-07T00:07:08
Python
UTF-8
Python
false
false
486
py
orchestrate_ecs_service_scaling.py
""" Task to orchestrate scaling for a ECS Service """ import boto3 from decorators import with_logging ecs = boto3.client("ecs") @with_logging def handler(event, context): cluster = event["Cluster"] max_tasks = event["DeletionTasksMaxNumber"] queue_size = event["QueueSize"] service = event["DeleteService"] desired_count = min(queue_size, max_tasks) ecs.update_service(cluster=cluster, service=service, desiredCount=desired_count) return desired_count
8b5e88010d0a64c346e2496dd0352af3bd45ae55
7944d899365f7bc849f367b72f4cc285e041826c
/submitit/test_documentation.py
07b57a4be520ebad904d14cf372d5c35b44245ca
[ "MIT" ]
permissive
facebookincubator/submitit
39edc49ad3c97883dd22cb7fe625d2db55f105d7
109f7c7f098de6775397e2b84826d59101a29a9f
refs/heads/main
2023-08-22T05:39:19.476497
2023-08-16T14:00:41
2023-08-16T14:00:41
258,441,818
947
111
MIT
2023-04-05T15:24:09
2020-04-24T07:41:09
Python
UTF-8
Python
false
false
2,895
py
test_documentation.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import re import typing as tp from pathlib import Path import submitit class MarkdownLink: """Handle to a markdown link, for easy existence test and printing (external links are not tested) """ regex = re.compile(r"\[(?P<name>.+?)\]\((?P<link>\S+?)\)") def __init__(self, root: Path, file: Path, name: str, link: str) -> None: self.root = root self.file = file self.name = name self.link = link def exists(self) -> bool: if self.link.startswith("http"): # We don't check external urls. return True link = self.link.split("#")[0] if not link: return False fullpath = self.root / self.file.parent / link return fullpath.exists() def __repr__(self) -> str: return f"[{self.link}]({self.name}) in file {self.file}" def _get_root() -> Path: root = Path(__file__).parent.parent.absolute() assert (root / "pyproject.toml").exists(), f"Wrong root folder: {root}" return root def _get_markdown_files(root: Path) -> tp.List[Path]: return [md for pattern in ("*.md", "submitit/**/*.md", "docs/**/*.md") for md in root.glob(pattern)] def _get_all_markdown_links(root: Path, files: tp.List[Path]) -> tp.List[MarkdownLink]: """Returns a list of all existing markdown links""" pattern = MarkdownLink.regex links = [] for file in files: for match in pattern.finditer(file.read_text()): links.append(MarkdownLink(root, file, match.group("name"), match.group("link"))) return links def test_assert_markdown_links_not_broken() -> None: root = _get_root() files = _get_markdown_files(root) assert len(files) > 3 links = _get_all_markdown_links(root, files) assert len(links) > 5, "There should be several hyperlinks!" broken_links = [l for l in links if not l.exists()] assert not broken_links def _replace_relative_links(regex: tp.Match[str]) -> str: """Converts relative links into links to master so that links on Pypi long description are correct """ string: str = regex.group() link = regex.group("link") name = regex.group("name") version = submitit.__version__ if not link.startswith("http") and Path(link).exists(): github_url = f"github.com/facebookincubator/submitit/blob/{version}" string = f"[{name}](https://{github_url}/{link})" return string def expand_links(): readme = _get_root() / "README.md" assert readme.exists() desc = readme.read_text(encoding="utf-8") desc = re.sub(MarkdownLink.regex, _replace_relative_links, desc) readme.write_text(desc) if __name__ == "__main__": expand_links()
4b1de5b0c509a8c1bea36cba96c0a05485988cb4
5d0433d5cfa29e3e9c49286197337ad166359df0
/icebergShaded/generate_iceberg_jars.py
551638aca90ee80c57a4ec1a0f8823930681ef21
[ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
delta-io/delta
dbc3fb5a119e5131be742e5df262e0a66ce28099
512562dbd43b03bd3debcebd059ed2a36577d140
refs/heads/master
2023-09-01T03:52:45.060128
2023-08-31T16:54:44
2023-08-31T16:54:44
182,849,188
6,505
1,518
Apache-2.0
2023-09-14T21:23:39
2019-04-22T18:56:51
HTML
UTF-8
Python
false
false
7,381
py
generate_iceberg_jars.py
#!/usr/bin/env python3 # # Copyright (2021) The Delta Lake Project Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import os import glob import subprocess import shlex import shutil from os import path iceberg_lib_dir_name = "lib" iceberg_src_dir_name = "iceberg_src" iceberg_patches_dir_name = "iceberg_src_patches" iceberg_src_commit_hash = "ede085d0f7529f24acd0c81dd0a43f7bb969b763" iceberg_src_branch = "master" # only this branch will be downloaded # Relative to iceberg_src directory. # We use * because after applying the patches, a random git hash will be appended to each jar name. # This, for all usages below, we must search for these jar files using `glob.glob(pattern)` iceberg_src_compiled_jar_rel_glob_patterns = [ "bundled-guava/build/libs/iceberg-bundled-guava-*.jar", "common/build/libs/iceberg-common-*.jar", "api/build/libs/iceberg-api-*.jar", "core/build/libs/iceberg-core-*.jar", "parquet/build/libs/iceberg-parquet-*.jar", ] iceberg_root_dir = path.abspath(path.dirname(__file__)) iceberg_src_dir = path.join(iceberg_root_dir, iceberg_src_dir_name) iceberg_patches_dir = path.join(iceberg_root_dir, iceberg_patches_dir_name) iceberg_lib_dir = path.join(iceberg_root_dir, iceberg_lib_dir_name) def iceberg_jars_exists(): for compiled_jar_rel_glob_pattern in iceberg_src_compiled_jar_rel_glob_patterns: jar_file_name_pattern = path.basename(path.normpath(compiled_jar_rel_glob_pattern)) lib_jar_abs_pattern = path.join(iceberg_lib_dir, jar_file_name_pattern) results = glob.glob(lib_jar_abs_pattern) if len(results) > 1: raise Exception("More jars than expected: " + str(results)) if len(results) == 0: return False return True def set_git_config_if_empty(config_key, default_value): curr_val = None try: (_, curr_val, _) = run_cmd("git config --get user.%s" % config_key) curr_val = curr_val.decode("utf-8") except: print("Error getting user.%s" % config_key) if not curr_val: run_cmd("git config user.%s \"%s\"" % (config_key, default_value)) def prepare_iceberg_source(): with WorkingDirectory(iceberg_root_dir): print(">>> Cloning Iceberg repo") shutil.rmtree(iceberg_src_dir_name, ignore_errors=True) set_git_config_if_empty("email", "<>") set_git_config_if_empty("name", "Anonymous") # We just want the shallowest, smallest iceberg clone. We will check out the commit later. run_cmd("git clone --depth 1 --branch %s https://github.com/apache/iceberg.git %s" % (iceberg_src_branch, iceberg_src_dir_name)) with WorkingDirectory(iceberg_src_dir): run_cmd("git config user.email \"<>\"") run_cmd("git config user.name \"Anonymous\"") # Fetch just the single commit (shallow) run_cmd("git fetch origin %s --depth 1" % iceberg_src_commit_hash) run_cmd("git checkout %s" % iceberg_src_commit_hash) print(">>> Applying patch files") patch_files = glob.glob(path.join(iceberg_patches_dir, "*.patch")) patch_files.sort() for patch_file in patch_files: print(">>> Applying '%s'" % patch_file) run_cmd("git apply %s" % patch_file) run_cmd("git add .") run_cmd("git commit -a -m 'applied %s'" % path.basename(patch_file)) def generate_iceberg_jars(): print(">>> Compiling JARs") with WorkingDirectory(iceberg_src_dir): # disable style checks (can fail with patches) and tests build_args = "-x spotlessCheck -x checkstyleMain -x test -x integrationTest" run_cmd("./gradlew :iceberg-core:build %s" % build_args) run_cmd("./gradlew :iceberg-parquet:build %s" % build_args) print(">>> Copying JARs to lib directory") shutil.rmtree(iceberg_lib_dir, ignore_errors=True) os.mkdir(iceberg_lib_dir) # For each relative pattern p ... for compiled_jar_rel_glob_pattern in iceberg_src_compiled_jar_rel_glob_patterns: # Get the absolute pattern compiled_jar_abs_pattern = path.join(iceberg_src_dir, compiled_jar_rel_glob_pattern) # Search for all glob results results = glob.glob(compiled_jar_abs_pattern) # Compiled jars will include tests, sources, javadocs; exclude them results = list(filter(lambda result: all(x not in result for x in ["test", "source", "javadoc"]), results)) if len(results) == 0: raise Exception("Could not find the jar: " + compled_jar_rel_glob_pattern) if len(results) > 1: raise Exception("More jars created than expected: " + str(results)) # Copy the one jar result into the <iceberg root>/lib directory compiled_jar_abs_path = results[0] compiled_jar_name = path.basename(path.normpath(compiled_jar_abs_path)) lib_jar_abs_path = path.join(iceberg_lib_dir, compiled_jar_name) shutil.copyfile(compiled_jar_abs_path, lib_jar_abs_path) if not iceberg_jars_exists(): raise Exception("JAR copying failed") def run_cmd(cmd, throw_on_error=True, env=None, stream_output=False, **kwargs): if isinstance(cmd, str): cmd = shlex.split(cmd) cmd_env = os.environ.copy() if env: cmd_env.update(env) if stream_output: child = subprocess.Popen(cmd, env=cmd_env, **kwargs) exit_code = child.wait() if throw_on_error and exit_code != 0: raise Exception("Non-zero exitcode: %s" % (exit_code)) print("----\n") return exit_code else: child = subprocess.Popen( cmd, env=cmd_env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) (stdout, stderr) = child.communicate() exit_code = child.wait() if throw_on_error and exit_code != 0: raise Exception( "Non-zero exitcode: %s\n\nSTDOUT:\n%s\n\nSTDERR:%s" % (exit_code, stdout, stderr)) return (exit_code, stdout, stderr) # pylint: disable=too-few-public-methods class WorkingDirectory(object): def __init__(self, working_directory): self.working_directory = working_directory self.old_workdir = os.getcwd() def __enter__(self): os.chdir(self.working_directory) def __exit__(self, tpe, value, traceback): os.chdir(self.old_workdir) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--force", required=False, default=False, action="store_true", help="Force the generation even if already generated, useful for testing.") args = parser.parse_args() if args.force or not iceberg_jars_exists(): prepare_iceberg_source() generate_iceberg_jars()
8b264eb2ee7018b9c763f29379eee94c87f8c2a3
5da5473ff3026165a47f98744bac82903cf008e0
/packages/google-cloud-deploy/google/cloud/deploy_v1/services/cloud_deploy/transports/grpc.py
ddfb1966a84a1a56d14d573602a7e36abd2ab974
[ "Apache-2.0" ]
permissive
googleapis/google-cloud-python
ed61a5f03a476ab6053870f4da7bc5534e25558b
93c4e63408c65129422f65217325f4e7d41f7edf
refs/heads/main
2023-09-04T09:09:07.852632
2023-08-31T22:49:26
2023-08-31T22:49:26
16,316,451
2,792
917
Apache-2.0
2023-09-14T21:45:18
2014-01-28T15:51:47
Python
UTF-8
Python
false
false
49,492
py
grpc.py
# -*- coding: utf-8 -*- # Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from typing import Callable, Dict, Optional, Sequence, Tuple, Union import warnings from google.api_core import gapic_v1, grpc_helpers, operations_v1 import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore import grpc # type: ignore from google.cloud.deploy_v1.types import cloud_deploy from .base import DEFAULT_CLIENT_INFO, CloudDeployTransport class CloudDeployGrpcTransport(CloudDeployTransport): """gRPC backend transport for CloudDeploy. CloudDeploy service creates and manages Continuous Delivery operations on Google Cloud Platform via Skaffold (https://skaffold.dev). This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ _stubs: Dict[str, Callable] def __init__( self, *, host: str = "clouddeploy.googleapis.com", credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, channel: Optional[grpc.Channel] = None, api_mtls_endpoint: Optional[str] = None, client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, api_audience: Optional[str] = None, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. channel (Optional[grpc.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for the grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} self._operations_client: Optional[operations_v1.OperationsClient] = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: # Ignore credentials if a channel was passed. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None else: if api_mtls_endpoint: host = api_mtls_endpoint # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: self._ssl_channel_credentials = SslCredentials().ssl_credentials else: if client_cert_source_for_mtls and not ssl_channel_credentials: cert, key = client_cert_source_for_mtls() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) # The base transport sets the host, credentials and scopes super().__init__( host=host, credentials=credentials, credentials_file=credentials_file, scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, api_audience=api_audience, ) if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, # use the credentials which are saved credentials=self._credentials, # Set ``credentials_file`` to ``None`` here as # the credentials that we saved earlier should be used. credentials_file=None, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Wrap messages. This must be done after self._grpc_channel exists self._prep_wrapped_messages(client_info) @classmethod def create_channel( cls, host: str = "clouddeploy.googleapis.com", credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, ) -> grpc.Channel: """Create and return a gRPC channel object. Args: host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is mutually exclusive with credentials. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. quota_project_id (Optional[str]): An optional project to use for billing and quota. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: grpc.Channel: A gRPC channel object. Raises: google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ return grpc_helpers.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs, ) @property def grpc_channel(self) -> grpc.Channel: """Return the channel designed to connect to this service.""" return self._grpc_channel @property def operations_client(self) -> operations_v1.OperationsClient: """Create the client designed to process long-running operations. This property caches on the instance; repeated calls return the same client. """ # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsClient(self.grpc_channel) # Return the client from cache. return self._operations_client @property def list_delivery_pipelines( self, ) -> Callable[ [cloud_deploy.ListDeliveryPipelinesRequest], cloud_deploy.ListDeliveryPipelinesResponse, ]: r"""Return a callable for the list delivery pipelines method over gRPC. Lists DeliveryPipelines in a given project and location. Returns: Callable[[~.ListDeliveryPipelinesRequest], ~.ListDeliveryPipelinesResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_delivery_pipelines" not in self._stubs: self._stubs["list_delivery_pipelines"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/ListDeliveryPipelines", request_serializer=cloud_deploy.ListDeliveryPipelinesRequest.serialize, response_deserializer=cloud_deploy.ListDeliveryPipelinesResponse.deserialize, ) return self._stubs["list_delivery_pipelines"] @property def get_delivery_pipeline( self, ) -> Callable[ [cloud_deploy.GetDeliveryPipelineRequest], cloud_deploy.DeliveryPipeline ]: r"""Return a callable for the get delivery pipeline method over gRPC. Gets details of a single DeliveryPipeline. Returns: Callable[[~.GetDeliveryPipelineRequest], ~.DeliveryPipeline]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_delivery_pipeline" not in self._stubs: self._stubs["get_delivery_pipeline"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/GetDeliveryPipeline", request_serializer=cloud_deploy.GetDeliveryPipelineRequest.serialize, response_deserializer=cloud_deploy.DeliveryPipeline.deserialize, ) return self._stubs["get_delivery_pipeline"] @property def create_delivery_pipeline( self, ) -> Callable[ [cloud_deploy.CreateDeliveryPipelineRequest], operations_pb2.Operation ]: r"""Return a callable for the create delivery pipeline method over gRPC. Creates a new DeliveryPipeline in a given project and location. Returns: Callable[[~.CreateDeliveryPipelineRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_delivery_pipeline" not in self._stubs: self._stubs["create_delivery_pipeline"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/CreateDeliveryPipeline", request_serializer=cloud_deploy.CreateDeliveryPipelineRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["create_delivery_pipeline"] @property def update_delivery_pipeline( self, ) -> Callable[ [cloud_deploy.UpdateDeliveryPipelineRequest], operations_pb2.Operation ]: r"""Return a callable for the update delivery pipeline method over gRPC. Updates the parameters of a single DeliveryPipeline. Returns: Callable[[~.UpdateDeliveryPipelineRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_delivery_pipeline" not in self._stubs: self._stubs["update_delivery_pipeline"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/UpdateDeliveryPipeline", request_serializer=cloud_deploy.UpdateDeliveryPipelineRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["update_delivery_pipeline"] @property def delete_delivery_pipeline( self, ) -> Callable[ [cloud_deploy.DeleteDeliveryPipelineRequest], operations_pb2.Operation ]: r"""Return a callable for the delete delivery pipeline method over gRPC. Deletes a single DeliveryPipeline. Returns: Callable[[~.DeleteDeliveryPipelineRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_delivery_pipeline" not in self._stubs: self._stubs["delete_delivery_pipeline"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/DeleteDeliveryPipeline", request_serializer=cloud_deploy.DeleteDeliveryPipelineRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["delete_delivery_pipeline"] @property def list_targets( self, ) -> Callable[[cloud_deploy.ListTargetsRequest], cloud_deploy.ListTargetsResponse]: r"""Return a callable for the list targets method over gRPC. Lists Targets in a given project and location. Returns: Callable[[~.ListTargetsRequest], ~.ListTargetsResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_targets" not in self._stubs: self._stubs["list_targets"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/ListTargets", request_serializer=cloud_deploy.ListTargetsRequest.serialize, response_deserializer=cloud_deploy.ListTargetsResponse.deserialize, ) return self._stubs["list_targets"] @property def get_target( self, ) -> Callable[[cloud_deploy.GetTargetRequest], cloud_deploy.Target]: r"""Return a callable for the get target method over gRPC. Gets details of a single Target. Returns: Callable[[~.GetTargetRequest], ~.Target]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_target" not in self._stubs: self._stubs["get_target"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/GetTarget", request_serializer=cloud_deploy.GetTargetRequest.serialize, response_deserializer=cloud_deploy.Target.deserialize, ) return self._stubs["get_target"] @property def create_target( self, ) -> Callable[[cloud_deploy.CreateTargetRequest], operations_pb2.Operation]: r"""Return a callable for the create target method over gRPC. Creates a new Target in a given project and location. Returns: Callable[[~.CreateTargetRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_target" not in self._stubs: self._stubs["create_target"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/CreateTarget", request_serializer=cloud_deploy.CreateTargetRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["create_target"] @property def update_target( self, ) -> Callable[[cloud_deploy.UpdateTargetRequest], operations_pb2.Operation]: r"""Return a callable for the update target method over gRPC. Updates the parameters of a single Target. Returns: Callable[[~.UpdateTargetRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_target" not in self._stubs: self._stubs["update_target"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/UpdateTarget", request_serializer=cloud_deploy.UpdateTargetRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["update_target"] @property def delete_target( self, ) -> Callable[[cloud_deploy.DeleteTargetRequest], operations_pb2.Operation]: r"""Return a callable for the delete target method over gRPC. Deletes a single Target. Returns: Callable[[~.DeleteTargetRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_target" not in self._stubs: self._stubs["delete_target"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/DeleteTarget", request_serializer=cloud_deploy.DeleteTargetRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["delete_target"] @property def list_releases( self, ) -> Callable[ [cloud_deploy.ListReleasesRequest], cloud_deploy.ListReleasesResponse ]: r"""Return a callable for the list releases method over gRPC. Lists Releases in a given project and location. Returns: Callable[[~.ListReleasesRequest], ~.ListReleasesResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_releases" not in self._stubs: self._stubs["list_releases"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/ListReleases", request_serializer=cloud_deploy.ListReleasesRequest.serialize, response_deserializer=cloud_deploy.ListReleasesResponse.deserialize, ) return self._stubs["list_releases"] @property def get_release( self, ) -> Callable[[cloud_deploy.GetReleaseRequest], cloud_deploy.Release]: r"""Return a callable for the get release method over gRPC. Gets details of a single Release. Returns: Callable[[~.GetReleaseRequest], ~.Release]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_release" not in self._stubs: self._stubs["get_release"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/GetRelease", request_serializer=cloud_deploy.GetReleaseRequest.serialize, response_deserializer=cloud_deploy.Release.deserialize, ) return self._stubs["get_release"] @property def create_release( self, ) -> Callable[[cloud_deploy.CreateReleaseRequest], operations_pb2.Operation]: r"""Return a callable for the create release method over gRPC. Creates a new Release in a given project and location. Returns: Callable[[~.CreateReleaseRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_release" not in self._stubs: self._stubs["create_release"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/CreateRelease", request_serializer=cloud_deploy.CreateReleaseRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["create_release"] @property def abandon_release( self, ) -> Callable[ [cloud_deploy.AbandonReleaseRequest], cloud_deploy.AbandonReleaseResponse ]: r"""Return a callable for the abandon release method over gRPC. Abandons a Release in the Delivery Pipeline. Returns: Callable[[~.AbandonReleaseRequest], ~.AbandonReleaseResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "abandon_release" not in self._stubs: self._stubs["abandon_release"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/AbandonRelease", request_serializer=cloud_deploy.AbandonReleaseRequest.serialize, response_deserializer=cloud_deploy.AbandonReleaseResponse.deserialize, ) return self._stubs["abandon_release"] @property def approve_rollout( self, ) -> Callable[ [cloud_deploy.ApproveRolloutRequest], cloud_deploy.ApproveRolloutResponse ]: r"""Return a callable for the approve rollout method over gRPC. Approves a Rollout. Returns: Callable[[~.ApproveRolloutRequest], ~.ApproveRolloutResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "approve_rollout" not in self._stubs: self._stubs["approve_rollout"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/ApproveRollout", request_serializer=cloud_deploy.ApproveRolloutRequest.serialize, response_deserializer=cloud_deploy.ApproveRolloutResponse.deserialize, ) return self._stubs["approve_rollout"] @property def advance_rollout( self, ) -> Callable[ [cloud_deploy.AdvanceRolloutRequest], cloud_deploy.AdvanceRolloutResponse ]: r"""Return a callable for the advance rollout method over gRPC. Advances a Rollout in a given project and location. Returns: Callable[[~.AdvanceRolloutRequest], ~.AdvanceRolloutResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "advance_rollout" not in self._stubs: self._stubs["advance_rollout"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/AdvanceRollout", request_serializer=cloud_deploy.AdvanceRolloutRequest.serialize, response_deserializer=cloud_deploy.AdvanceRolloutResponse.deserialize, ) return self._stubs["advance_rollout"] @property def cancel_rollout( self, ) -> Callable[ [cloud_deploy.CancelRolloutRequest], cloud_deploy.CancelRolloutResponse ]: r"""Return a callable for the cancel rollout method over gRPC. Cancels a Rollout in a given project and location. Returns: Callable[[~.CancelRolloutRequest], ~.CancelRolloutResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "cancel_rollout" not in self._stubs: self._stubs["cancel_rollout"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/CancelRollout", request_serializer=cloud_deploy.CancelRolloutRequest.serialize, response_deserializer=cloud_deploy.CancelRolloutResponse.deserialize, ) return self._stubs["cancel_rollout"] @property def list_rollouts( self, ) -> Callable[ [cloud_deploy.ListRolloutsRequest], cloud_deploy.ListRolloutsResponse ]: r"""Return a callable for the list rollouts method over gRPC. Lists Rollouts in a given project and location. Returns: Callable[[~.ListRolloutsRequest], ~.ListRolloutsResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_rollouts" not in self._stubs: self._stubs["list_rollouts"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/ListRollouts", request_serializer=cloud_deploy.ListRolloutsRequest.serialize, response_deserializer=cloud_deploy.ListRolloutsResponse.deserialize, ) return self._stubs["list_rollouts"] @property def get_rollout( self, ) -> Callable[[cloud_deploy.GetRolloutRequest], cloud_deploy.Rollout]: r"""Return a callable for the get rollout method over gRPC. Gets details of a single Rollout. Returns: Callable[[~.GetRolloutRequest], ~.Rollout]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_rollout" not in self._stubs: self._stubs["get_rollout"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/GetRollout", request_serializer=cloud_deploy.GetRolloutRequest.serialize, response_deserializer=cloud_deploy.Rollout.deserialize, ) return self._stubs["get_rollout"] @property def create_rollout( self, ) -> Callable[[cloud_deploy.CreateRolloutRequest], operations_pb2.Operation]: r"""Return a callable for the create rollout method over gRPC. Creates a new Rollout in a given project and location. Returns: Callable[[~.CreateRolloutRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_rollout" not in self._stubs: self._stubs["create_rollout"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/CreateRollout", request_serializer=cloud_deploy.CreateRolloutRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["create_rollout"] @property def ignore_job( self, ) -> Callable[[cloud_deploy.IgnoreJobRequest], cloud_deploy.IgnoreJobResponse]: r"""Return a callable for the ignore job method over gRPC. Ignores the specified Job in a Rollout. Returns: Callable[[~.IgnoreJobRequest], ~.IgnoreJobResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "ignore_job" not in self._stubs: self._stubs["ignore_job"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/IgnoreJob", request_serializer=cloud_deploy.IgnoreJobRequest.serialize, response_deserializer=cloud_deploy.IgnoreJobResponse.deserialize, ) return self._stubs["ignore_job"] @property def retry_job( self, ) -> Callable[[cloud_deploy.RetryJobRequest], cloud_deploy.RetryJobResponse]: r"""Return a callable for the retry job method over gRPC. Retries the specified Job in a Rollout. Returns: Callable[[~.RetryJobRequest], ~.RetryJobResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "retry_job" not in self._stubs: self._stubs["retry_job"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/RetryJob", request_serializer=cloud_deploy.RetryJobRequest.serialize, response_deserializer=cloud_deploy.RetryJobResponse.deserialize, ) return self._stubs["retry_job"] @property def list_job_runs( self, ) -> Callable[[cloud_deploy.ListJobRunsRequest], cloud_deploy.ListJobRunsResponse]: r"""Return a callable for the list job runs method over gRPC. Lists JobRuns in a given project and location. Returns: Callable[[~.ListJobRunsRequest], ~.ListJobRunsResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_job_runs" not in self._stubs: self._stubs["list_job_runs"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/ListJobRuns", request_serializer=cloud_deploy.ListJobRunsRequest.serialize, response_deserializer=cloud_deploy.ListJobRunsResponse.deserialize, ) return self._stubs["list_job_runs"] @property def get_job_run( self, ) -> Callable[[cloud_deploy.GetJobRunRequest], cloud_deploy.JobRun]: r"""Return a callable for the get job run method over gRPC. Gets details of a single JobRun. Returns: Callable[[~.GetJobRunRequest], ~.JobRun]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_job_run" not in self._stubs: self._stubs["get_job_run"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/GetJobRun", request_serializer=cloud_deploy.GetJobRunRequest.serialize, response_deserializer=cloud_deploy.JobRun.deserialize, ) return self._stubs["get_job_run"] @property def terminate_job_run( self, ) -> Callable[ [cloud_deploy.TerminateJobRunRequest], cloud_deploy.TerminateJobRunResponse ]: r"""Return a callable for the terminate job run method over gRPC. Terminates a Job Run in a given project and location. Returns: Callable[[~.TerminateJobRunRequest], ~.TerminateJobRunResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "terminate_job_run" not in self._stubs: self._stubs["terminate_job_run"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/TerminateJobRun", request_serializer=cloud_deploy.TerminateJobRunRequest.serialize, response_deserializer=cloud_deploy.TerminateJobRunResponse.deserialize, ) return self._stubs["terminate_job_run"] @property def get_config( self, ) -> Callable[[cloud_deploy.GetConfigRequest], cloud_deploy.Config]: r"""Return a callable for the get config method over gRPC. Gets the configuration for a location. Returns: Callable[[~.GetConfigRequest], ~.Config]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_config" not in self._stubs: self._stubs["get_config"] = self.grpc_channel.unary_unary( "/google.cloud.deploy.v1.CloudDeploy/GetConfig", request_serializer=cloud_deploy.GetConfigRequest.serialize, response_deserializer=cloud_deploy.Config.deserialize, ) return self._stubs["get_config"] def close(self): self.grpc_channel.close() @property def delete_operation( self, ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: r"""Return a callable for the delete_operation method over gRPC.""" # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_operation" not in self._stubs: self._stubs["delete_operation"] = self.grpc_channel.unary_unary( "/google.longrunning.Operations/DeleteOperation", request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, response_deserializer=None, ) return self._stubs["delete_operation"] @property def cancel_operation( self, ) -> Callable[[operations_pb2.CancelOperationRequest], None]: r"""Return a callable for the cancel_operation method over gRPC.""" # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "cancel_operation" not in self._stubs: self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( "/google.longrunning.Operations/CancelOperation", request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, response_deserializer=None, ) return self._stubs["cancel_operation"] @property def get_operation( self, ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: r"""Return a callable for the get_operation method over gRPC.""" # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_operation" not in self._stubs: self._stubs["get_operation"] = self.grpc_channel.unary_unary( "/google.longrunning.Operations/GetOperation", request_serializer=operations_pb2.GetOperationRequest.SerializeToString, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["get_operation"] @property def list_operations( self, ) -> Callable[ [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse ]: r"""Return a callable for the list_operations method over gRPC.""" # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_operations" not in self._stubs: self._stubs["list_operations"] = self.grpc_channel.unary_unary( "/google.longrunning.Operations/ListOperations", request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, response_deserializer=operations_pb2.ListOperationsResponse.FromString, ) return self._stubs["list_operations"] @property def list_locations( self, ) -> Callable[ [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse ]: r"""Return a callable for the list locations method over gRPC.""" # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_locations" not in self._stubs: self._stubs["list_locations"] = self.grpc_channel.unary_unary( "/google.cloud.location.Locations/ListLocations", request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, response_deserializer=locations_pb2.ListLocationsResponse.FromString, ) return self._stubs["list_locations"] @property def get_location( self, ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: r"""Return a callable for the list locations method over gRPC.""" # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_location" not in self._stubs: self._stubs["get_location"] = self.grpc_channel.unary_unary( "/google.cloud.location.Locations/GetLocation", request_serializer=locations_pb2.GetLocationRequest.SerializeToString, response_deserializer=locations_pb2.Location.FromString, ) return self._stubs["get_location"] @property def set_iam_policy( self, ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: r"""Return a callable for the set iam policy method over gRPC. Sets the IAM access control policy on the specified function. Replaces any existing policy. Returns: Callable[[~.SetIamPolicyRequest], ~.Policy]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_iam_policy" not in self._stubs: self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( "/google.iam.v1.IAMPolicy/SetIamPolicy", request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, ) return self._stubs["set_iam_policy"] @property def get_iam_policy( self, ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: r"""Return a callable for the get iam policy method over gRPC. Gets the IAM access control policy for a function. Returns an empty policy if the function exists and does not have a policy set. Returns: Callable[[~.GetIamPolicyRequest], ~.Policy]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_iam_policy" not in self._stubs: self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( "/google.iam.v1.IAMPolicy/GetIamPolicy", request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, ) return self._stubs["get_iam_policy"] @property def test_iam_permissions( self, ) -> Callable[ [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse, ]: r"""Return a callable for the test iam permissions method over gRPC. Tests the specified permissions against the IAM access control policy for a function. If the function does not exist, this will return an empty set of permissions, not a NOT_FOUND error. Returns: Callable[[~.TestIamPermissionsRequest], ~.TestIamPermissionsResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "test_iam_permissions" not in self._stubs: self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( "/google.iam.v1.IAMPolicy/TestIamPermissions", request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, ) return self._stubs["test_iam_permissions"] @property def kind(self) -> str: return "grpc" __all__ = ("CloudDeployGrpcTransport",)
832d6a6360ef3e10e300c769b64f199cfb10d77c
fdbb74a95924e2677466614f6ab6e2bb13b2a95a
/third_party/python/Tools/scripts/fixdiv.py
1213a4e397603fad502b9d15b6d00cf97864c928
[ "Python-2.0", "ISC", "GPL-1.0-or-later", "LicenseRef-scancode-python-cwi", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-other-copyleft" ]
permissive
jart/cosmopolitan
fb11b5658939023977060a7c6c71a74093d9cb44
0d748ad58e1063dd1f8560f18a0c75293b9415b7
refs/heads/master
2023-09-06T09:17:29.303607
2023-09-02T03:49:13
2023-09-02T03:50:18
272,457,606
11,887
435
ISC
2023-09-14T17:47:58
2020-06-15T14:16:13
C
UTF-8
Python
false
false
13,882
py
fixdiv.py
#! /usr/bin/env python3 """fixdiv - tool to fix division operators. To use this tool, first run `python -Qwarnall yourscript.py 2>warnings'. This runs the script `yourscript.py' while writing warning messages about all uses of the classic division operator to the file `warnings'. The warnings look like this: <file>:<line>: DeprecationWarning: classic <type> division The warnings are written to stderr, so you must use `2>' for the I/O redirect. I know of no way to redirect stderr on Windows in a DOS box, so you will have to modify the script to set sys.stderr to some kind of log file if you want to do this on Windows. The warnings are not limited to the script; modules imported by the script may also trigger warnings. In fact a useful technique is to write a test script specifically intended to exercise all code in a particular module or set of modules. Then run `python fixdiv.py warnings'. This first reads the warnings, looking for classic division warnings, and sorts them by file name and line number. Then, for each file that received at least one warning, it parses the file and tries to match the warnings up to the division operators found in the source code. If it is successful, it writes its findings to stdout, preceded by a line of dashes and a line of the form: Index: <file> If the only findings found are suggestions to change a / operator into a // operator, the output is acceptable input for the Unix 'patch' program. Here are the possible messages on stdout (N stands for a line number): - A plain-diff-style change ('NcN', a line marked by '<', a line containing '---', and a line marked by '>'): A / operator was found that should be changed to //. This is the recommendation when only int and/or long arguments were seen. - 'True division / operator at line N' and a line marked by '=': A / operator was found that can remain unchanged. This is the recommendation when only float and/or complex arguments were seen. - 'Ambiguous / operator (..., ...) at line N', line marked by '?': A / operator was found for which int or long as well as float or complex arguments were seen. This is highly unlikely; if it occurs, you may have to restructure the code to keep the classic semantics, or maybe you don't care about the classic semantics. - 'No conclusive evidence on line N', line marked by '*': A / operator was found for which no warnings were seen. This could be code that was never executed, or code that was only executed with user-defined objects as arguments. You will have to investigate further. Note that // can be overloaded separately from /, using __floordiv__. True division can also be separately overloaded, using __truediv__. Classic division should be the same as either of those. (XXX should I add a warning for division on user-defined objects, to disambiguate this case from code that was never executed?) - 'Phantom ... warnings for line N', line marked by '*': A warning was seen for a line not containing a / operator. The most likely cause is a warning about code executed by 'exec' or eval() (see note below), or an indirect invocation of the / operator, for example via the div() function in the operator module. It could also be caused by a change to the file between the time the test script was run to collect warnings and the time fixdiv was run. - 'More than one / operator in line N'; or 'More than one / operator per statement in lines N-N': The scanner found more than one / operator on a single line, or in a statement split across multiple lines. Because the warnings framework doesn't (and can't) show the offset within the line, and the code generator doesn't always give the correct line number for operations in a multi-line statement, we can't be sure whether all operators in the statement were executed. To be on the safe side, by default a warning is issued about this case. In practice, these cases are usually safe, and the -m option suppresses these warning. - 'Can't find the / operator in line N', line marked by '*': This really shouldn't happen. It means that the tokenize module reported a '/' operator but the line it returns didn't contain a '/' character at the indicated position. - 'Bad warning for line N: XYZ', line marked by '*': This really shouldn't happen. It means that a 'classic XYZ division' warning was read with XYZ being something other than 'int', 'long', 'float', or 'complex'. Notes: - The augmented assignment operator /= is handled the same way as the / operator. - This tool never looks at the // operator; no warnings are ever generated for use of this operator. - This tool never looks at the / operator when a future division statement is in effect; no warnings are generated in this case, and because the tool only looks at files for which at least one classic division warning was seen, it will never look at files containing a future division statement. - Warnings may be issued for code not read from a file, but executed using the exec() or eval() functions. These may have <string> in the filename position, in which case the fixdiv script will attempt and fail to open a file named '<string>' and issue a warning about this failure; or these may be reported as 'Phantom' warnings (see above). You're on your own to deal with these. You could make all recommended changes and add a future division statement to all affected files, and then re-run the test script; it should not issue any warnings. If there are any, and you have a hard time tracking down where they are generated, you can use the -Werror option to force an error instead of a first warning, generating a traceback. - The tool should be run from the same directory as that from which the original script was run, otherwise it won't be able to open files given by relative pathnames. """ import sys import getopt import re import tokenize multi_ok = 0 def main(): try: opts, args = getopt.getopt(sys.argv[1:], "hm") except getopt.error as msg: usage(msg) return 2 for o, a in opts: if o == "-h": print(__doc__) return if o == "-m": global multi_ok multi_ok = 1 if not args: usage("at least one file argument is required") return 2 if args[1:]: sys.stderr.write("%s: extra file arguments ignored\n", sys.argv[0]) warnings = readwarnings(args[0]) if warnings is None: return 1 files = list(warnings.keys()) if not files: print("No classic division warnings read from", args[0]) return files.sort() exit = None for filename in files: x = process(filename, warnings[filename]) exit = exit or x return exit def usage(msg): sys.stderr.write("%s: %s\n" % (sys.argv[0], msg)) sys.stderr.write("Usage: %s [-m] warnings\n" % sys.argv[0]) sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0]) PATTERN = (r"^(.+?):(\d+): DeprecationWarning: " r"classic (int|long|float|complex) division$") def readwarnings(warningsfile): prog = re.compile(PATTERN) try: f = open(warningsfile) except IOError as msg: sys.stderr.write("can't open: %s\n" % msg) return warnings = {} while 1: line = f.readline() if not line: break m = prog.match(line) if not m: if line.find("division") >= 0: sys.stderr.write("Warning: ignored input " + line) continue filename, lineno, what = m.groups() list = warnings.get(filename) if list is None: warnings[filename] = list = [] list.append((int(lineno), sys.intern(what))) f.close() return warnings def process(filename, list): print("-"*70) assert list # if this fails, readwarnings() is broken try: fp = open(filename) except IOError as msg: sys.stderr.write("can't open: %s\n" % msg) return 1 print("Index:", filename) f = FileContext(fp) list.sort() index = 0 # list[:index] has been processed, list[index:] is still to do g = tokenize.generate_tokens(f.readline) while 1: startlineno, endlineno, slashes = lineinfo = scanline(g) if startlineno is None: break assert startlineno <= endlineno is not None orphans = [] while index < len(list) and list[index][0] < startlineno: orphans.append(list[index]) index += 1 if orphans: reportphantomwarnings(orphans, f) warnings = [] while index < len(list) and list[index][0] <= endlineno: warnings.append(list[index]) index += 1 if not slashes and not warnings: pass elif slashes and not warnings: report(slashes, "No conclusive evidence") elif warnings and not slashes: reportphantomwarnings(warnings, f) else: if len(slashes) > 1: if not multi_ok: rows = [] lastrow = None for (row, col), line in slashes: if row == lastrow: continue rows.append(row) lastrow = row assert rows if len(rows) == 1: print("*** More than one / operator in line", rows[0]) else: print("*** More than one / operator per statement", end=' ') print("in lines %d-%d" % (rows[0], rows[-1])) intlong = [] floatcomplex = [] bad = [] for lineno, what in warnings: if what in ("int", "long"): intlong.append(what) elif what in ("float", "complex"): floatcomplex.append(what) else: bad.append(what) lastrow = None for (row, col), line in slashes: if row == lastrow: continue lastrow = row line = chop(line) if line[col:col+1] != "/": print("*** Can't find the / operator in line %d:" % row) print("*", line) continue if bad: print("*** Bad warning for line %d:" % row, bad) print("*", line) elif intlong and not floatcomplex: print("%dc%d" % (row, row)) print("<", line) print("---") print(">", line[:col] + "/" + line[col:]) elif floatcomplex and not intlong: print("True division / operator at line %d:" % row) print("=", line) elif intlong and floatcomplex: print("*** Ambiguous / operator (%s, %s) at line %d:" % ( "|".join(intlong), "|".join(floatcomplex), row)) print("?", line) fp.close() def reportphantomwarnings(warnings, f): blocks = [] lastrow = None lastblock = None for row, what in warnings: if row != lastrow: lastblock = [row] blocks.append(lastblock) lastblock.append(what) for block in blocks: row = block[0] whats = "/".join(block[1:]) print("*** Phantom %s warnings for line %d:" % (whats, row)) f.report(row, mark="*") def report(slashes, message): lastrow = None for (row, col), line in slashes: if row != lastrow: print("*** %s on line %d:" % (message, row)) print("*", chop(line)) lastrow = row class FileContext: def __init__(self, fp, window=5, lineno=1): self.fp = fp self.window = 5 self.lineno = 1 self.eoflookahead = 0 self.lookahead = [] self.buffer = [] def fill(self): while len(self.lookahead) < self.window and not self.eoflookahead: line = self.fp.readline() if not line: self.eoflookahead = 1 break self.lookahead.append(line) def readline(self): self.fill() if not self.lookahead: return "" line = self.lookahead.pop(0) self.buffer.append(line) self.lineno += 1 return line def __getitem__(self, index): self.fill() bufstart = self.lineno - len(self.buffer) lookend = self.lineno + len(self.lookahead) if bufstart <= index < self.lineno: return self.buffer[index - bufstart] if self.lineno <= index < lookend: return self.lookahead[index - self.lineno] raise KeyError def report(self, first, last=None, mark="*"): if last is None: last = first for i in range(first, last+1): try: line = self[first] except KeyError: line = "<missing line>" print(mark, chop(line)) def scanline(g): slashes = [] startlineno = None endlineno = None for type, token, start, end, line in g: endlineno = end[0] if startlineno is None: startlineno = endlineno if token in ("/", "/="): slashes.append((start, line)) if type == tokenize.NEWLINE: break return startlineno, endlineno, slashes def chop(line): if line.endswith("\n"): return line[:-1] else: return line if __name__ == "__main__": sys.exit(main())
f270f4b5a1026b97e72a1d8e915706ba1838c636
4f1ab9e0d6f030c04f49bb40c6989345c024c5a8
/saas/urls/views/subscriber/billing/payment.py
60d62744c351d60ad2d547c947708d21a28b6549
[ "BSD-2-Clause" ]
permissive
djaodjin/djaodjin-saas
2e883b2afe548e6f6abccb6f74d884679e1e0b15
029bfcd9d4b04478950b829aeb0a92f5fd31776e
refs/heads/master
2023-09-06T05:45:27.940652
2023-09-05T13:21:32
2023-09-05T13:21:32
9,898,720
503
153
NOASSERTION
2023-09-08T10:55:07
2013-05-06T22:43:32
Python
UTF-8
Python
false
false
2,811
py
payment.py
# Copyright (c) 2022, DjaoDjin inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ URLs updating processing information and inserting transactions through POST requests. """ from ..... import settings from .....compat import path from .....views.billing import (CartPeriodsView, CartSeatsView, CardUpdateView, CartView, BalanceView, CheckoutView) urlpatterns = [ path('billing/<slug:%s>/checkout/' % settings.PROFILE_URL_KWARG, CheckoutView.as_view(), name='saas_checkout'), path('billing/<slug:%s>/cart-seats/' % settings.PROFILE_URL_KWARG, CartSeatsView.as_view(), name='saas_cart_seats'), path('billing/<slug:%s>/cart-periods/' % settings.PROFILE_URL_KWARG, CartPeriodsView.as_view(), name='saas_cart_periods'), path('billing/<slug:%s>/cart/' % settings.PROFILE_URL_KWARG, CartView.as_view(), name='saas_organization_cart'), path('billing/<slug:%s>/card/' % settings.PROFILE_URL_KWARG, CardUpdateView.as_view(), name='saas_update_card'), # Implementation Note: <subscribed_plan> (not <plan>) such that # the required_manager decorator does not raise a PermissionDenied # for a plan <organization> is subscribed to. path('billing/<slug:%s>/balance/<slug:subscribed_plan>/' % settings.PROFILE_URL_KWARG, BalanceView.as_view(), name='saas_subscription_balance'), path('billing/<slug:%s>/balance/' % settings.PROFILE_URL_KWARG, BalanceView.as_view(), name='saas_organization_balance'), ]
8a2997f4bf5c2b376144e88ce97d6f5110bafd5a
9d7a9cad236d6fc766dc3f0db8a74a824a2073bf
/python/interpret_community/widget/_internal/__init__.py
0d3338327b6752801d006694620be5997d35231c
[ "MIT", "LicenseRef-scancode-free-unknown" ]
permissive
interpretml/interpret-community
fdf03ea65791990aab27d3c6c06b7a5bc44ed801
00922df124204420402e13ec8f1b4ca9781e42f1
refs/heads/main
2023-08-17T11:16:22.596730
2023-08-15T17:16:28
2023-08-15T17:16:28
210,882,381
403
83
MIT
2023-09-11T13:20:21
2019-09-25T15:43:56
Python
UTF-8
Python
false
false
282
py
__init__.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- """Init file for azureml-contrib-explain-model/azureml/contrib/explain/model/visualize/_internal."""
91e64e2ab845c611da56dddcf106c28b5edeb1bf
d4239425234eacb647c4cc4f2f4c8537b618fca0
/onadata/apps/logger/migrations/0019_auto_20160307_0256.py
09c2af5b19a4cc6dbc89941a3d4ea3ca515d26f5
[ "BSD-2-Clause", "BSD-3-Clause" ]
permissive
onaio/onadata
58762d6a606870bd13d43fd27fdaa61720a745c2
e5bdec91cb47179172b515bbcb91701262ff3377
refs/heads/main
2023-09-04T03:12:43.388668
2023-08-24T07:27:08
2023-08-24T07:27:08
12,888,897
177
149
NOASSERTION
2023-09-13T14:19:05
2013-09-17T07:25:01
Python
UTF-8
Python
false
false
982
py
0019_auto_20160307_0256.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ("logger", "0018_auto_20160301_0330"), ] operations = [ migrations.AddField( model_name="widget", name="metadata", field=models.JSONField(default={}, blank=True), preserve_default=True, ), migrations.AlterField( model_name="instance", name="uuid", field=models.CharField(default="", max_length=249), preserve_default=True, ), migrations.AlterField( model_name="instance", name="xform", field=models.ForeignKey( related_name="instances", to="logger.XForm", null=True, on_delete=models.CASCADE, ), preserve_default=True, ), ]
77af56dece82c1626d9b4f7772cc84895e607601
b71a6e7050b0a4368007350d91ee078288a7318c
/examples/mastermind.py
cc7d8aada542a437be76ca9134cecc19172fbe41
[ "Apache-2.0" ]
permissive
jarvisteach/appJar
2dfd0da6cb85ea3535379ed000efd97fb42fe4f8
0b59ce041da2197dcff3410e20f298676f1f7266
refs/heads/appJar
2023-08-29T09:42:01.812005
2019-09-28T18:34:06
2019-09-28T18:34:06
39,996,518
696
103
NOASSERTION
2023-02-20T01:01:16
2015-07-31T08:59:20
Python
UTF-8
Python
false
false
3,581
py
mastermind.py
# -*- coding: utf-8 -*- import sys sys.path.append('../') import random from appJar import gui COLOURS = ['black', 'red', 'orange', 'yellow', 'green', 'blue', 'purple', 'pink', 'white'] HELP_MESSAGE = """Mastermind is a code-breaking game, in which the codebreaker (you) is trying to guess a randomly generated pattern. Each guess is made by choosing four different colours on the decoding board. The codemaker (the computer) will provide feedback by colouring the four pegs to the right of the decoding board. A red peg indicates a guess that is correct in both colour and position, a white peg indicates a correct guess, in the wrong position.""" ERROR_MESSAGE = """Duplicates aren't allowed. Please ensure you have selected four different colours.""" currentRound = 1 codePegs = [0, 0, 0, 0] bestScore = 999 pattern = [] # stores the pegs to guess while len(pattern) < 4: col = random.choice(COLOURS[1:]) if col not in (pattern): pattern.append(col) def change(peg): # called when pressing a peg label if int(peg[1:]) == currentRound: peg = int(peg[0]) codePegs[peg] = (codePegs[peg] + 1) % 9 app.setLabelBg(str(peg)+str(currentRound), COLOURS[codePegs[peg]]) def guess(btn): # called when pressing the guess button global currentRound, bestScore, codePegs guess = [COLOURS[codePegs[0]], COLOURS[codePegs[1]], COLOURS[codePegs[2]], COLOURS[codePegs[3]]] if guess[0] in guess[1:] or guess[1] in guess[2:] or guess[2] in guess[3:]: app.errorBox("Error", ERROR_MESSAGE) else: app.disableButton('GO'+str(currentRound)) if guess == pattern: for res in range(4): app.setLabelBg("p"+str(res)+str(currentRound), 'red') app.infoBox("Winner", "You guessed in " + str(currentRound) + " rounds.") if currentRound < bestScore: bestScore = currentRound currentRound = 1 codePegs = [0, 0, 0, 0] app.removeAllWidgets() else: done = [] for i in range(4): if guess[i] == pattern[i]: done.append("red") elif guess[i] in pattern: done.append("white") done.sort() for i in range(len(done)): app.setLabelBg("p"+str(i)+str(currentRound), done[i]) currentRound += 1 newRow() def getHelp(): app.popUp("Help", HELP_MESSAGE) def newRow(): if currentRound == 1: score = str(bestScore) if bestScore < 999 else "???" app.label("score", "Best score: " + score, fg="yellow", font={'weight':'bold', 'size':16}) app.link("help", getHelp, pos=(0,0), sticky="e", font={'size':9}) with app.labelFrame('Round ' + str(currentRound), row=currentRound, sticky='news', padding=(2,2)): for i in range(4): # add the four labels for player choices app.label(str(i)+str(currentRound), '', bg=COLOURS[codePegs[i]], pos=(currentRound, i), submit=change, width=6) app.addNamedButton('GO', 'GO'+str(currentRound), guess, row=currentRound, column=4) with app.frame('feedback'+str(currentRound), row=currentRound, column=5, sticky='news', padding=(2,2)): for x in range(2): # add agrid of four labels for feedback for y in range(2): app.label('p' + str(x*2+y) + str(currentRound), '', bg='black', pos=(x, y), width=3) app.location=("c", 50) with gui('Mastermind', bg='grey', fg='white', sticky="new", stretch="none") as app: newRow()
13fd799b74b6bdcf008c2470a22c87a7f8e24968
4ddf82eeb31d46fb67802a4375390eb42a8f23b8
/tests/multi_bluetooth/ble_characteristic.py
b5dfefc840dd6ffe685e3accdb5d56aef2dc59c3
[ "MIT" ]
permissive
pulkin/micropython
1437a507b9e90c8824e80c3553e6209d89e64565
c274c947c611f510fd2b1c4ef6cbd9f4283794fc
refs/heads/master
2023-03-08T02:35:28.208819
2022-04-19T12:38:47
2022-04-19T12:38:47
167,732,676
103
36
MIT
2023-02-25T03:02:36
2019-01-26T19:57:59
C
UTF-8
Python
false
false
5,475
py
ble_characteristic.py
# Test characteristic read/write/notify from both GATTS and GATTC. from micropython import const import time, machine, bluetooth TIMEOUT_MS = 5000 _IRQ_CENTRAL_CONNECT = const(1 << 0) _IRQ_CENTRAL_DISCONNECT = const(1 << 1) _IRQ_GATTS_WRITE = const(1 << 2) _IRQ_PERIPHERAL_CONNECT = const(1 << 6) _IRQ_PERIPHERAL_DISCONNECT = const(1 << 7) _IRQ_GATTC_CHARACTERISTIC_RESULT = const(1 << 9) _IRQ_GATTC_READ_RESULT = const(1 << 11) _IRQ_GATTC_WRITE_STATUS = const(1 << 12) _IRQ_GATTC_NOTIFY = const(1 << 13) SERVICE_UUID = bluetooth.UUID("A5A5A5A5-FFFF-9999-1111-5A5A5A5A5A5A") CHAR_UUID = bluetooth.UUID("00000000-1111-2222-3333-444444444444") CHAR = ( CHAR_UUID, bluetooth.FLAG_READ | bluetooth.FLAG_WRITE | bluetooth.FLAG_NOTIFY, ) SERVICE = ( SERVICE_UUID, (CHAR,), ) SERVICES = (SERVICE,) last_event = None last_data = None value_handle = 0 def irq(event, data): global last_event, last_data, value_handle last_event = event last_data = data if event == _IRQ_CENTRAL_CONNECT: print("_IRQ_CENTRAL_CONNECT") elif event == _IRQ_CENTRAL_DISCONNECT: print("_IRQ_CENTRAL_DISCONNECT") elif event == _IRQ_GATTS_WRITE: print("_IRQ_GATTS_WRITE", ble.gatts_read(data[-1])) elif event == _IRQ_PERIPHERAL_CONNECT: print("_IRQ_PERIPHERAL_CONNECT") elif event == _IRQ_PERIPHERAL_DISCONNECT: print("_IRQ_PERIPHERAL_DISCONNECT") elif event == _IRQ_GATTC_CHARACTERISTIC_RESULT: # conn_handle, def_handle, value_handle, properties, uuid = data if data[-1] == CHAR_UUID: print("_IRQ_GATTC_CHARACTERISTIC_RESULT", data[-1]) value_handle = data[2] elif event == _IRQ_GATTC_READ_RESULT: print("_IRQ_GATTC_READ_RESULT", data[-1]) elif event == _IRQ_GATTC_WRITE_STATUS: print("_IRQ_GATTC_WRITE_STATUS", data[-1]) elif event == _IRQ_GATTC_NOTIFY: print("_IRQ_GATTC_NOTIFY", data[-1]) def wait_for_event(event, timeout_ms): t0 = time.ticks_ms() while time.ticks_diff(time.ticks_ms(), t0) < timeout_ms: if isinstance(event, int): if last_event == event: break elif event(): break machine.idle() # Acting in peripheral role. def instance0(): multitest.globals(BDADDR=ble.config("mac")) ((char_handle,),) = ble.gatts_register_services(SERVICES) print("gap_advertise") ble.gap_advertise(20_000, b"\x02\x01\x06\x04\xffMPY") multitest.next() try: # Write initial characteristic value. ble.gatts_write(char_handle, "periph0") # Wait for central to connect to us. wait_for_event(_IRQ_CENTRAL_CONNECT, TIMEOUT_MS) if last_event != _IRQ_CENTRAL_CONNECT: return conn_handle, _, _ = last_data # Wait for a write to the characteristic from the central. wait_for_event(_IRQ_GATTS_WRITE, TIMEOUT_MS) # Wait a bit, then write the characteristic and notify it. time.sleep_ms(1000) ble.gatts_write(char_handle, "periph1") ble.gatts_notify(conn_handle, char_handle) # Wait for a write to the characteristic from the central. wait_for_event(_IRQ_GATTS_WRITE, TIMEOUT_MS) # Wait a bit, then notify a new value on the characteristic. time.sleep_ms(1000) ble.gatts_notify(conn_handle, char_handle, "periph2") # Wait for the central to disconnect. wait_for_event(_IRQ_CENTRAL_DISCONNECT, TIMEOUT_MS) finally: ble.active(0) # Acting in central role. def instance1(): multitest.next() try: # Connect to peripheral and then disconnect. print("gap_connect") ble.gap_connect(0, BDADDR) wait_for_event(_IRQ_PERIPHERAL_CONNECT, TIMEOUT_MS) if last_event != _IRQ_PERIPHERAL_CONNECT: return conn_handle, _, _ = last_data # Discover characteristics. ble.gattc_discover_characteristics(conn_handle, 1, 65535) wait_for_event(lambda: value_handle, TIMEOUT_MS) # Issue read of characteristic, should get initial value. print("gattc_read") ble.gattc_read(conn_handle, value_handle) wait_for_event(_IRQ_GATTC_READ_RESULT, TIMEOUT_MS) # Write to the characteristic, and ask for a response. print("gattc_write") ble.gattc_write(conn_handle, value_handle, "central0", 1) wait_for_event(_IRQ_GATTC_WRITE_STATUS, TIMEOUT_MS) # Wait for a notify, then read new value. wait_for_event(_IRQ_GATTC_NOTIFY, TIMEOUT_MS) print("gattc_read") ble.gattc_read(conn_handle, value_handle) wait_for_event(_IRQ_GATTC_READ_RESULT, TIMEOUT_MS) # Write to the characteristic, and ask for a response. print("gattc_write") ble.gattc_write(conn_handle, value_handle, "central1", 1) wait_for_event(_IRQ_GATTC_WRITE_STATUS, TIMEOUT_MS) # Wait for a notify (should have new data), then read old value (should be unchanged). wait_for_event(_IRQ_GATTC_NOTIFY, TIMEOUT_MS) print("gattc_read") ble.gattc_read(conn_handle, value_handle) wait_for_event(_IRQ_GATTC_READ_RESULT, TIMEOUT_MS) # Disconnect from peripheral. print("gap_disconnect:", ble.gap_disconnect(conn_handle)) wait_for_event(_IRQ_PERIPHERAL_DISCONNECT, TIMEOUT_MS) finally: ble.active(0) ble = bluetooth.BLE() ble.active(1) ble.irq(irq)
e0be5ae3fcbbd630788d40123c82ac6f94f75828
b8bbdfc593b6d816e67a344f720f90ec05236778
/airflow/providers/amazon/aws/operators/lambda_function.py
5d7e980bb53aca76960a4b0a2283adb7ea13c591
[ "Apache-2.0", "BSD-3-Clause", "MIT" ]
permissive
apache/airflow
ed78db0a8bab7e096990e143926e52f518e288ab
1b122c15030e99cef9d4ff26d3781a7a9d6949bc
refs/heads/main
2023-09-01T08:37:34.556097
2023-09-01T06:49:05
2023-09-01T06:49:05
33,884,891
22,756
11,558
Apache-2.0
2023-09-14T20:12:36
2015-04-13T18:04:58
Python
UTF-8
Python
false
false
9,619
py
lambda_function.py
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import annotations import json from datetime import timedelta from functools import cached_property from typing import TYPE_CHECKING, Any, Sequence from airflow import AirflowException from airflow.configuration import conf from airflow.models import BaseOperator from airflow.providers.amazon.aws.hooks.lambda_function import LambdaHook from airflow.providers.amazon.aws.triggers.lambda_function import LambdaCreateFunctionCompleteTrigger if TYPE_CHECKING: from airflow.utils.context import Context class LambdaCreateFunctionOperator(BaseOperator): """ Creates an AWS Lambda function. More information regarding parameters of this operator can be found here https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.create_function .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:LambdaCreateFunctionOperator` :param function_name: The name of the AWS Lambda function, version, or alias. :param runtime: The identifier of the function's runtime. Runtime is required if the deployment package is a .zip file archive. :param role: The Amazon Resource Name (ARN) of the function's execution role. :param handler: The name of the method within your code that Lambda calls to run your function. Handler is required if the deployment package is a .zip file archive. :param code: The code for the function. :param description: A description of the function. :param timeout: The amount of time (in seconds) that Lambda allows a function to run before stopping it. :param config: Optional dictionary for arbitrary parameters to the boto API create_lambda call. :param wait_for_completion: If True, the operator will wait until the function is active. :param waiter_max_attempts: Maximum number of attempts to poll the creation. :param waiter_delay: Number of seconds between polling the state of the creation. :param deferrable: If True, the operator will wait asynchronously for the creation to complete. This implies waiting for creation complete. This mode requires aiobotocore module to be installed. (default: False, but can be overridden in config file by setting default_deferrable to True) :param aws_conn_id: The AWS connection ID to use """ template_fields: Sequence[str] = ( "function_name", "runtime", "role", "handler", "code", "config", ) ui_color = "#ff7300" def __init__( self, *, function_name: str, runtime: str | None = None, role: str, handler: str | None = None, code: dict, description: str | None = None, timeout: int | None = None, config: dict = {}, wait_for_completion: bool = False, waiter_max_attempts: int = 60, waiter_delay: int = 15, deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), aws_conn_id: str = "aws_default", **kwargs, ): super().__init__(**kwargs) self.function_name = function_name self.runtime = runtime self.role = role self.handler = handler self.code = code self.description = description self.timeout = timeout self.config = config self.wait_for_completion = wait_for_completion self.waiter_delay = waiter_delay self.waiter_max_attempts = waiter_max_attempts self.deferrable = deferrable self.aws_conn_id = aws_conn_id @cached_property def hook(self) -> LambdaHook: return LambdaHook(aws_conn_id=self.aws_conn_id) def execute(self, context: Context): self.log.info("Creating AWS Lambda function: %s", self.function_name) response = self.hook.create_lambda( function_name=self.function_name, runtime=self.runtime, role=self.role, handler=self.handler, code=self.code, description=self.description, timeout=self.timeout, **self.config, ) self.log.info("Lambda response: %r", response) if self.deferrable: self.defer( trigger=LambdaCreateFunctionCompleteTrigger( function_name=self.function_name, function_arn=response["FunctionArn"], waiter_delay=self.waiter_delay, waiter_max_attempts=self.waiter_max_attempts, aws_conn_id=self.aws_conn_id, ), method_name="execute_complete", timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay), ) if self.wait_for_completion: self.log.info("Wait for Lambda function to be active") waiter = self.hook.conn.get_waiter("function_active_v2") waiter.wait( FunctionName=self.function_name, ) return response.get("FunctionArn") def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> str: if not event or event["status"] != "success": raise AirflowException(f"Trigger error: event is {event}") self.log.info("Lambda function created successfully") return event["function_arn"] class LambdaInvokeFunctionOperator(BaseOperator): """ Invokes an AWS Lambda function. You can invoke a function synchronously (and wait for the response), or asynchronously. To invoke a function asynchronously, set `invocation_type` to `Event`. For more details, review the boto3 Lambda invoke docs. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:LambdaInvokeFunctionOperator` :param function_name: The name of the AWS Lambda function, version, or alias. :param log_type: Set to Tail to include the execution log in the response. Otherwise, set to "None". :param qualifier: Specify a version or alias to invoke a published version of the function. :param invocation_type: AWS Lambda invocation type (RequestResponse, Event, DryRun) :param client_context: Data about the invoking client to pass to the function in the context object :param payload: JSON provided as input to the Lambda function :param aws_conn_id: The AWS connection ID to use """ template_fields: Sequence[str] = ("function_name", "payload", "qualifier", "invocation_type") ui_color = "#ff7300" def __init__( self, *, function_name: str, log_type: str | None = None, qualifier: str | None = None, invocation_type: str | None = None, client_context: str | None = None, payload: bytes | str | None = None, aws_conn_id: str = "aws_default", **kwargs, ): super().__init__(**kwargs) self.function_name = function_name self.payload = payload self.log_type = log_type self.qualifier = qualifier self.invocation_type = invocation_type self.client_context = client_context self.aws_conn_id = aws_conn_id @cached_property def hook(self) -> LambdaHook: return LambdaHook(aws_conn_id=self.aws_conn_id) def execute(self, context: Context): """ Invoke the target AWS Lambda function from Airflow. :return: The response payload from the function, or an error object. """ success_status_codes = [200, 202, 204] self.log.info("Invoking AWS Lambda function: %s with payload: %s", self.function_name, self.payload) response = self.hook.invoke_lambda( function_name=self.function_name, invocation_type=self.invocation_type, log_type=self.log_type, client_context=self.client_context, payload=self.payload, qualifier=self.qualifier, ) self.log.info("Lambda response metadata: %r", response.get("ResponseMetadata")) if response.get("StatusCode") not in success_status_codes: raise ValueError("Lambda function did not execute", json.dumps(response.get("ResponseMetadata"))) payload_stream = response.get("Payload") payload = payload_stream.read().decode() if "FunctionError" in response: raise ValueError( "Lambda function execution resulted in error", {"ResponseMetadata": response.get("ResponseMetadata"), "Payload": payload}, ) self.log.info("Lambda function invocation succeeded: %r", response.get("ResponseMetadata")) return payload
54394d38bd4b1b110bbf5768d0791d6dce977eac
e39cfcb65565524225fa304fb06ebf5ad3513306
/python-sdk/pachyderm_sdk/api/debug/extension.py
c25c46e09c802407f2aaddc060c0e34daca866da
[ "Apache-2.0", "LicenseRef-scancode-generic-cla" ]
permissive
pachyderm/pachyderm
7b5cbd89c6f3efa5d7bfb3eeed412f5ea1026d04
125f95010125f2df34bafb3ac804c966299c0f98
refs/heads/master
2023-08-31T11:56:47.995626
2023-08-30T22:50:50
2023-08-30T22:50:50
23,653,453
5,718
712
Apache-2.0
2023-09-14T19:37:37
2014-09-04T07:50:02
Go
UTF-8
Python
false
false
1,609
py
extension.py
"""Handwritten classes/methods that augment the existing Debug API.""" from datetime import timedelta from typing import Iterator, List, Optional, TYPE_CHECKING from . import DebugStub from . import DumpChunk, System if TYPE_CHECKING: from ..pps import Pipeline class ApiStub(DebugStub): # noinspection PyMethodOverriding def dump( self, *, system: "System" = None, pipelines: Optional[List["Pipeline"]] = None, input_repos: bool = False, timeout: timedelta = 0, ) -> Iterator["DumpChunk"]: """Collect a standard set of debugging information using the DumpV2 API rather than the now deprecated Dump API. This method is intended to be used in tandem with the `debug.get_dump_v2_template` endpoint. However, if no system or pipelines are specified then this call will automatically be performed for the user. If no system or pipelines are specified, then debug information for all systems and pipelines will be returned. """ if system is None and not pipelines: template = self.get_dump_v2_template() return self.dump_v2( system=template.request.system, pipelines=template.request.pipelines, input_repos=input_repos or template.request.input_repos, timeout=timeout or template.request.timeout, ) return self.dump_v2( system=system, pipelines=pipelines, input_repos=input_repos, timeout=timeout, )
51a195390da4011a3073d7a8591c475e61166b88
069c2295076c482afadfe6351da5ae02be8e18e6
/tests/syndication_tests/feeds.py
a35dc29e20900bf550a7097e04b8d696b2e9fd06
[ "LicenseRef-scancode-other-copyleft", "LicenseRef-scancode-unknown-license-reference", "BSD-3-Clause", "GPL-1.0-or-later", "Python-2.0.1", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-other-permissive", "Python-2.0" ]
permissive
django/django
5eb557f57053631cd4f566f451e43197309dbeeb
c74a6fad5475495756a5bdb18b2cab2b68d429bc
refs/heads/main
2023-09-01T03:43:44.033530
2023-08-31T08:27:32
2023-08-31T08:27:32
4,164,482
73,530
38,187
BSD-3-Clause
2023-09-14T20:03:48
2012-04-28T02:47:18
Python
UTF-8
Python
false
false
7,949
py
feeds.py
from functools import wraps from django.contrib.syndication import views from django.utils import feedgenerator from django.utils.timezone import get_fixed_timezone from .models import Article, Entry def wraps_decorator(f): @wraps(f) def wrapper(*args, **kwargs): value = f(*args, **kwargs) return f"{value} -- decorated by @wraps." return wrapper def common_decorator(f): def wrapper(*args, **kwargs): value = f(*args, **kwargs) return f"{value} -- common decorated." return wrapper class TestRss2Feed(views.Feed): title = "My blog" description = "A more thorough description of my blog." link = "/blog/" feed_guid = "/foo/bar/1234" author_name = "Sally Smith" author_email = "test@example.com" author_link = "http://www.example.com/" categories = ("python", "django") feed_copyright = "Copyright (c) 2007, Sally Smith" ttl = 600 def items(self): return Entry.objects.all() def item_description(self, item): return "Overridden description: %s" % item def item_pubdate(self, item): return item.published def item_updateddate(self, item): return item.updated def item_comments(self, item): return "%scomments" % item.get_absolute_url() item_author_name = "Sally Smith" item_author_email = "test@example.com" item_author_link = "http://www.example.com/" item_categories = ("python", "testing") item_copyright = "Copyright (c) 2007, Sally Smith" class TestRss2FeedWithCallableObject(TestRss2Feed): class TimeToLive: def __call__(self): return 700 ttl = TimeToLive() class TestRss2FeedWithDecoratedMethod(TestRss2Feed): class TimeToLive: @wraps_decorator def __call__(self): return 800 @staticmethod @wraps_decorator def feed_copyright(): return "Copyright (c) 2022, John Doe" ttl = TimeToLive() @staticmethod def categories(): return ("javascript", "vue") @wraps_decorator def title(self): return "Overridden title" @wraps_decorator def item_title(self, item): return f"Overridden item title: {item.title}" @wraps_decorator def description(self, obj): return "Overridden description" @wraps_decorator def item_description(self): return "Overridden item description" class TestRss2FeedWithWrongDecoratedMethod(TestRss2Feed): @common_decorator def item_description(self, item): return f"Overridden item description: {item.title}" class TestRss2FeedWithGuidIsPermaLinkTrue(TestRss2Feed): def item_guid_is_permalink(self, item): return True class TestRss2FeedWithGuidIsPermaLinkFalse(TestRss2Feed): def item_guid(self, item): return str(item.pk) def item_guid_is_permalink(self, item): return False class TestRss091Feed(TestRss2Feed): feed_type = feedgenerator.RssUserland091Feed class TestNoPubdateFeed(views.Feed): title = "Test feed" link = "/feed/" def items(self): return Entry.objects.all() class TestAtomFeed(TestRss2Feed): feed_type = feedgenerator.Atom1Feed subtitle = TestRss2Feed.description class TestLatestFeed(TestRss2Feed): """ A feed where the latest entry date is an `updated` element. """ feed_type = feedgenerator.Atom1Feed subtitle = TestRss2Feed.description def items(self): return Entry.objects.exclude(title="My last entry") class ArticlesFeed(TestRss2Feed): """ A feed to test no link being defined. Articles have no get_absolute_url() method, and item_link() is not defined. """ def items(self): return Article.objects.all() class TestSingleEnclosureRSSFeed(TestRss2Feed): """ A feed to test that RSS feeds work with a single enclosure. """ def item_enclosure_url(self, item): return "http://example.com" def item_enclosure_size(self, item): return 0 def item_mime_type(self, item): return "image/png" class TestMultipleEnclosureRSSFeed(TestRss2Feed): """ A feed to test that RSS feeds raise an exception with multiple enclosures. """ def item_enclosures(self, item): return [ feedgenerator.Enclosure("http://example.com/hello.png", 0, "image/png"), feedgenerator.Enclosure("http://example.com/goodbye.png", 0, "image/png"), ] class TemplateFeed(TestRss2Feed): """ A feed to test defining item titles and descriptions with templates. """ title_template = "syndication/title.html" description_template = "syndication/description.html" # Defining a template overrides any item_title definition def item_title(self): return "Not in a template" class TemplateContextFeed(TestRss2Feed): """ A feed to test custom context data in templates for title or description. """ title_template = "syndication/title_context.html" description_template = "syndication/description_context.html" def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context["foo"] = "bar" return context class TestLanguageFeed(TestRss2Feed): language = "de" class TestGetObjectFeed(TestRss2Feed): def get_object(self, request, entry_id): return Entry.objects.get(pk=entry_id) def items(self, obj): return Article.objects.filter(entry=obj) def item_link(self, item): return "%sarticle/%s/" % (item.entry.get_absolute_url(), item.pk) def item_comments(self, item): return "%scomments" % self.item_link(item) def item_description(self, item): return "Article description: %s" % item.title def item_title(self, item): return "Title: %s" % item.title class NaiveDatesFeed(TestAtomFeed): """ A feed with naive (non-timezone-aware) dates. """ def item_pubdate(self, item): return item.published class TZAwareDatesFeed(TestAtomFeed): """ A feed with timezone-aware dates. """ def item_pubdate(self, item): # Provide a weird offset so that the test can know it's getting this # specific offset and not accidentally getting on from # settings.TIME_ZONE. return item.published.replace(tzinfo=get_fixed_timezone(42)) class TestFeedUrlFeed(TestAtomFeed): feed_url = "http://example.com/customfeedurl/" class MyCustomAtom1Feed(feedgenerator.Atom1Feed): """ Test of a custom feed generator class. """ def root_attributes(self): attrs = super().root_attributes() attrs["django"] = "rocks" return attrs def add_root_elements(self, handler): super().add_root_elements(handler) handler.addQuickElement("spam", "eggs") def item_attributes(self, item): attrs = super().item_attributes(item) attrs["bacon"] = "yum" return attrs def add_item_elements(self, handler, item): super().add_item_elements(handler, item) handler.addQuickElement("ministry", "silly walks") class TestCustomFeed(TestAtomFeed): feed_type = MyCustomAtom1Feed class TestSingleEnclosureAtomFeed(TestAtomFeed): """ A feed to test that Atom feeds work with a single enclosure. """ def item_enclosure_url(self, item): return "http://example.com" def item_enclosure_size(self, item): return 0 def item_mime_type(self, item): return "image/png" class TestMultipleEnclosureAtomFeed(TestAtomFeed): """ A feed to test that Atom feeds work with multiple enclosures. """ def item_enclosures(self, item): return [ feedgenerator.Enclosure("http://example.com/hello.png", "0", "image/png"), feedgenerator.Enclosure("http://example.com/goodbye.png", "0", "image/png"), ]
82b7de4af83edbacc73f5f858ff534838a180c24
64f31364d8defec1f8bfa1e16f02be43d050f275
/twilio/rest/supersim/v1/fleet.py
0017ff4e9228e5eec771fe7abea34111830ae728
[ "MIT" ]
permissive
twilio/twilio-python
198e841186b977d043f84b56abf83f62bd468563
9991fab510b8980b6e3580a7a96c1db504d90c38
refs/heads/main
2023-08-31T09:53:14.427790
2023-08-24T07:32:59
2023-08-24T07:32:59
307,447
1,621
749
MIT
2023-09-09T00:25:23
2009-09-15T05:33:57
Python
UTF-8
Python
false
false
35,221
py
fleet.py
r""" This code was generated by ___ _ _ _ _ _ _ ____ ____ ____ _ ____ ____ _ _ ____ ____ ____ ___ __ __ | | | | | | | | | __ | | |__| | __ | __ |___ |\ | |___ |__/ |__| | | | |__/ | |_|_| | |___ | |__| |__| | | | |__] |___ | \| |___ | \ | | | |__| | \ Twilio - Supersim This is the public Twilio REST API. NOTE: This class is auto generated by OpenAPI Generator. https://openapi-generator.tech Do not edit the class manually. """ from datetime import datetime from typing import Any, Dict, List, Optional, Union, Iterator, AsyncIterator from twilio.base import deserialize, values from twilio.base.instance_context import InstanceContext from twilio.base.instance_resource import InstanceResource from twilio.base.list_resource import ListResource from twilio.base.version import Version from twilio.base.page import Page class FleetInstance(InstanceResource): class DataMetering(object): PAYG = "payg" """ :ivar account_sid: The SID of the [Account](https://www.twilio.com/docs/iam/api/account) that created the Fleet resource. :ivar sid: The unique string that we created to identify the Fleet resource. :ivar unique_name: An application-defined string that uniquely identifies the resource. It can be used in place of the resource's `sid` in the URL to address the resource. :ivar date_created: The date and time in GMT when the resource was created specified in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format. :ivar date_updated: The date and time in GMT when the resource was last updated specified in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format. :ivar url: The absolute URL of the Fleet resource. :ivar data_enabled: Defines whether SIMs in the Fleet are capable of using 2G/3G/4G/LTE/CAT-M data connectivity. Defaults to `true`. :ivar data_limit: The total data usage (download and upload combined) in Megabytes that each Super SIM assigned to the Fleet can consume during a billing period (normally one month). Value must be between 1MB (1) and 2TB (2,000,000). Defaults to 250MB. :ivar data_metering: :ivar sms_commands_enabled: Defines whether SIMs in the Fleet are capable of sending and receiving machine-to-machine SMS via Commands. Defaults to `false`. :ivar sms_commands_url: The URL that will receive a webhook when a Super SIM in the Fleet is used to send an SMS from your device to the SMS Commands number. Your server should respond with an HTTP status code in the 200 range; any response body will be ignored. :ivar sms_commands_method: A string representing the HTTP method to use when making a request to `sms_commands_url`. Can be one of `POST` or `GET`. Defaults to `POST`. :ivar network_access_profile_sid: The SID of the Network Access Profile that controls which cellular networks the Fleet's SIMs can connect to. :ivar ip_commands_url: The URL that will receive a webhook when a Super SIM in the Fleet is used to send an IP Command from your device to a special IP address. Your server should respond with an HTTP status code in the 200 range; any response body will be ignored. :ivar ip_commands_method: A string representing the HTTP method to use when making a request to `ip_commands_url`. Can be one of `POST` or `GET`. Defaults to `POST`. """ def __init__( self, version: Version, payload: Dict[str, Any], sid: Optional[str] = None ): super().__init__(version) self.account_sid: Optional[str] = payload.get("account_sid") self.sid: Optional[str] = payload.get("sid") self.unique_name: Optional[str] = payload.get("unique_name") self.date_created: Optional[datetime] = deserialize.iso8601_datetime( payload.get("date_created") ) self.date_updated: Optional[datetime] = deserialize.iso8601_datetime( payload.get("date_updated") ) self.url: Optional[str] = payload.get("url") self.data_enabled: Optional[bool] = payload.get("data_enabled") self.data_limit: Optional[int] = deserialize.integer(payload.get("data_limit")) self.data_metering: Optional["FleetInstance.DataMetering"] = payload.get( "data_metering" ) self.sms_commands_enabled: Optional[bool] = payload.get("sms_commands_enabled") self.sms_commands_url: Optional[str] = payload.get("sms_commands_url") self.sms_commands_method: Optional[str] = payload.get("sms_commands_method") self.network_access_profile_sid: Optional[str] = payload.get( "network_access_profile_sid" ) self.ip_commands_url: Optional[str] = payload.get("ip_commands_url") self.ip_commands_method: Optional[str] = payload.get("ip_commands_method") self._solution = { "sid": sid or self.sid, } self._context: Optional[FleetContext] = None @property def _proxy(self) -> "FleetContext": """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: FleetContext for this FleetInstance """ if self._context is None: self._context = FleetContext( self._version, sid=self._solution["sid"], ) return self._context def fetch(self) -> "FleetInstance": """ Fetch the FleetInstance :returns: The fetched FleetInstance """ return self._proxy.fetch() async def fetch_async(self) -> "FleetInstance": """ Asynchronous coroutine to fetch the FleetInstance :returns: The fetched FleetInstance """ return await self._proxy.fetch_async() def update( self, unique_name: Union[str, object] = values.unset, network_access_profile: Union[str, object] = values.unset, ip_commands_url: Union[str, object] = values.unset, ip_commands_method: Union[str, object] = values.unset, sms_commands_url: Union[str, object] = values.unset, sms_commands_method: Union[str, object] = values.unset, data_limit: Union[int, object] = values.unset, ) -> "FleetInstance": """ Update the FleetInstance :param unique_name: An application-defined string that uniquely identifies the resource. It can be used in place of the resource's `sid` in the URL to address the resource. :param network_access_profile: The SID or unique name of the Network Access Profile that will control which cellular networks the Fleet's SIMs can connect to. :param ip_commands_url: The URL that will receive a webhook when a Super SIM in the Fleet is used to send an IP Command from your device to a special IP address. Your server should respond with an HTTP status code in the 200 range; any response body will be ignored. :param ip_commands_method: A string representing the HTTP method to use when making a request to `ip_commands_url`. Can be one of `POST` or `GET`. Defaults to `POST`. :param sms_commands_url: The URL that will receive a webhook when a Super SIM in the Fleet is used to send an SMS from your device to the SMS Commands number. Your server should respond with an HTTP status code in the 200 range; any response body will be ignored. :param sms_commands_method: A string representing the HTTP method to use when making a request to `sms_commands_url`. Can be one of `POST` or `GET`. Defaults to `POST`. :param data_limit: The total data usage (download and upload combined) in Megabytes that each Super SIM assigned to the Fleet can consume during a billing period (normally one month). Value must be between 1MB (1) and 2TB (2,000,000). Defaults to 1GB (1,000). :returns: The updated FleetInstance """ return self._proxy.update( unique_name=unique_name, network_access_profile=network_access_profile, ip_commands_url=ip_commands_url, ip_commands_method=ip_commands_method, sms_commands_url=sms_commands_url, sms_commands_method=sms_commands_method, data_limit=data_limit, ) async def update_async( self, unique_name: Union[str, object] = values.unset, network_access_profile: Union[str, object] = values.unset, ip_commands_url: Union[str, object] = values.unset, ip_commands_method: Union[str, object] = values.unset, sms_commands_url: Union[str, object] = values.unset, sms_commands_method: Union[str, object] = values.unset, data_limit: Union[int, object] = values.unset, ) -> "FleetInstance": """ Asynchronous coroutine to update the FleetInstance :param unique_name: An application-defined string that uniquely identifies the resource. It can be used in place of the resource's `sid` in the URL to address the resource. :param network_access_profile: The SID or unique name of the Network Access Profile that will control which cellular networks the Fleet's SIMs can connect to. :param ip_commands_url: The URL that will receive a webhook when a Super SIM in the Fleet is used to send an IP Command from your device to a special IP address. Your server should respond with an HTTP status code in the 200 range; any response body will be ignored. :param ip_commands_method: A string representing the HTTP method to use when making a request to `ip_commands_url`. Can be one of `POST` or `GET`. Defaults to `POST`. :param sms_commands_url: The URL that will receive a webhook when a Super SIM in the Fleet is used to send an SMS from your device to the SMS Commands number. Your server should respond with an HTTP status code in the 200 range; any response body will be ignored. :param sms_commands_method: A string representing the HTTP method to use when making a request to `sms_commands_url`. Can be one of `POST` or `GET`. Defaults to `POST`. :param data_limit: The total data usage (download and upload combined) in Megabytes that each Super SIM assigned to the Fleet can consume during a billing period (normally one month). Value must be between 1MB (1) and 2TB (2,000,000). Defaults to 1GB (1,000). :returns: The updated FleetInstance """ return await self._proxy.update_async( unique_name=unique_name, network_access_profile=network_access_profile, ip_commands_url=ip_commands_url, ip_commands_method=ip_commands_method, sms_commands_url=sms_commands_url, sms_commands_method=sms_commands_method, data_limit=data_limit, ) def __repr__(self) -> str: """ Provide a friendly representation :returns: Machine friendly representation """ context = " ".join("{}={}".format(k, v) for k, v in self._solution.items()) return "<Twilio.Supersim.V1.FleetInstance {}>".format(context) class FleetContext(InstanceContext): def __init__(self, version: Version, sid: str): """ Initialize the FleetContext :param version: Version that contains the resource :param sid: The SID of the Fleet resource to update. """ super().__init__(version) # Path Solution self._solution = { "sid": sid, } self._uri = "/Fleets/{sid}".format(**self._solution) def fetch(self) -> FleetInstance: """ Fetch the FleetInstance :returns: The fetched FleetInstance """ payload = self._version.fetch( method="GET", uri=self._uri, ) return FleetInstance( self._version, payload, sid=self._solution["sid"], ) async def fetch_async(self) -> FleetInstance: """ Asynchronous coroutine to fetch the FleetInstance :returns: The fetched FleetInstance """ payload = await self._version.fetch_async( method="GET", uri=self._uri, ) return FleetInstance( self._version, payload, sid=self._solution["sid"], ) def update( self, unique_name: Union[str, object] = values.unset, network_access_profile: Union[str, object] = values.unset, ip_commands_url: Union[str, object] = values.unset, ip_commands_method: Union[str, object] = values.unset, sms_commands_url: Union[str, object] = values.unset, sms_commands_method: Union[str, object] = values.unset, data_limit: Union[int, object] = values.unset, ) -> FleetInstance: """ Update the FleetInstance :param unique_name: An application-defined string that uniquely identifies the resource. It can be used in place of the resource's `sid` in the URL to address the resource. :param network_access_profile: The SID or unique name of the Network Access Profile that will control which cellular networks the Fleet's SIMs can connect to. :param ip_commands_url: The URL that will receive a webhook when a Super SIM in the Fleet is used to send an IP Command from your device to a special IP address. Your server should respond with an HTTP status code in the 200 range; any response body will be ignored. :param ip_commands_method: A string representing the HTTP method to use when making a request to `ip_commands_url`. Can be one of `POST` or `GET`. Defaults to `POST`. :param sms_commands_url: The URL that will receive a webhook when a Super SIM in the Fleet is used to send an SMS from your device to the SMS Commands number. Your server should respond with an HTTP status code in the 200 range; any response body will be ignored. :param sms_commands_method: A string representing the HTTP method to use when making a request to `sms_commands_url`. Can be one of `POST` or `GET`. Defaults to `POST`. :param data_limit: The total data usage (download and upload combined) in Megabytes that each Super SIM assigned to the Fleet can consume during a billing period (normally one month). Value must be between 1MB (1) and 2TB (2,000,000). Defaults to 1GB (1,000). :returns: The updated FleetInstance """ data = values.of( { "UniqueName": unique_name, "NetworkAccessProfile": network_access_profile, "IpCommandsUrl": ip_commands_url, "IpCommandsMethod": ip_commands_method, "SmsCommandsUrl": sms_commands_url, "SmsCommandsMethod": sms_commands_method, "DataLimit": data_limit, } ) payload = self._version.update( method="POST", uri=self._uri, data=data, ) return FleetInstance(self._version, payload, sid=self._solution["sid"]) async def update_async( self, unique_name: Union[str, object] = values.unset, network_access_profile: Union[str, object] = values.unset, ip_commands_url: Union[str, object] = values.unset, ip_commands_method: Union[str, object] = values.unset, sms_commands_url: Union[str, object] = values.unset, sms_commands_method: Union[str, object] = values.unset, data_limit: Union[int, object] = values.unset, ) -> FleetInstance: """ Asynchronous coroutine to update the FleetInstance :param unique_name: An application-defined string that uniquely identifies the resource. It can be used in place of the resource's `sid` in the URL to address the resource. :param network_access_profile: The SID or unique name of the Network Access Profile that will control which cellular networks the Fleet's SIMs can connect to. :param ip_commands_url: The URL that will receive a webhook when a Super SIM in the Fleet is used to send an IP Command from your device to a special IP address. Your server should respond with an HTTP status code in the 200 range; any response body will be ignored. :param ip_commands_method: A string representing the HTTP method to use when making a request to `ip_commands_url`. Can be one of `POST` or `GET`. Defaults to `POST`. :param sms_commands_url: The URL that will receive a webhook when a Super SIM in the Fleet is used to send an SMS from your device to the SMS Commands number. Your server should respond with an HTTP status code in the 200 range; any response body will be ignored. :param sms_commands_method: A string representing the HTTP method to use when making a request to `sms_commands_url`. Can be one of `POST` or `GET`. Defaults to `POST`. :param data_limit: The total data usage (download and upload combined) in Megabytes that each Super SIM assigned to the Fleet can consume during a billing period (normally one month). Value must be between 1MB (1) and 2TB (2,000,000). Defaults to 1GB (1,000). :returns: The updated FleetInstance """ data = values.of( { "UniqueName": unique_name, "NetworkAccessProfile": network_access_profile, "IpCommandsUrl": ip_commands_url, "IpCommandsMethod": ip_commands_method, "SmsCommandsUrl": sms_commands_url, "SmsCommandsMethod": sms_commands_method, "DataLimit": data_limit, } ) payload = await self._version.update_async( method="POST", uri=self._uri, data=data, ) return FleetInstance(self._version, payload, sid=self._solution["sid"]) def __repr__(self) -> str: """ Provide a friendly representation :returns: Machine friendly representation """ context = " ".join("{}={}".format(k, v) for k, v in self._solution.items()) return "<Twilio.Supersim.V1.FleetContext {}>".format(context) class FleetPage(Page): def get_instance(self, payload: Dict[str, Any]) -> FleetInstance: """ Build an instance of FleetInstance :param payload: Payload response from the API """ return FleetInstance(self._version, payload) def __repr__(self) -> str: """ Provide a friendly representation :returns: Machine friendly representation """ return "<Twilio.Supersim.V1.FleetPage>" class FleetList(ListResource): def __init__(self, version: Version): """ Initialize the FleetList :param version: Version that contains the resource """ super().__init__(version) self._uri = "/Fleets" def create( self, network_access_profile: str, unique_name: Union[str, object] = values.unset, data_enabled: Union[bool, object] = values.unset, data_limit: Union[int, object] = values.unset, ip_commands_url: Union[str, object] = values.unset, ip_commands_method: Union[str, object] = values.unset, sms_commands_enabled: Union[bool, object] = values.unset, sms_commands_url: Union[str, object] = values.unset, sms_commands_method: Union[str, object] = values.unset, ) -> FleetInstance: """ Create the FleetInstance :param network_access_profile: The SID or unique name of the Network Access Profile that will control which cellular networks the Fleet's SIMs can connect to. :param unique_name: An application-defined string that uniquely identifies the resource. It can be used in place of the resource's `sid` in the URL to address the resource. :param data_enabled: Defines whether SIMs in the Fleet are capable of using 2G/3G/4G/LTE/CAT-M data connectivity. Defaults to `true`. :param data_limit: The total data usage (download and upload combined) in Megabytes that each Super SIM assigned to the Fleet can consume during a billing period (normally one month). Value must be between 1MB (1) and 2TB (2,000,000). Defaults to 1GB (1,000). :param ip_commands_url: The URL that will receive a webhook when a Super SIM in the Fleet is used to send an IP Command from your device to a special IP address. Your server should respond with an HTTP status code in the 200 range; any response body will be ignored. :param ip_commands_method: A string representing the HTTP method to use when making a request to `ip_commands_url`. Can be one of `POST` or `GET`. Defaults to `POST`. :param sms_commands_enabled: Defines whether SIMs in the Fleet are capable of sending and receiving machine-to-machine SMS via Commands. Defaults to `true`. :param sms_commands_url: The URL that will receive a webhook when a Super SIM in the Fleet is used to send an SMS from your device to the SMS Commands number. Your server should respond with an HTTP status code in the 200 range; any response body will be ignored. :param sms_commands_method: A string representing the HTTP method to use when making a request to `sms_commands_url`. Can be one of `POST` or `GET`. Defaults to `POST`. :returns: The created FleetInstance """ data = values.of( { "NetworkAccessProfile": network_access_profile, "UniqueName": unique_name, "DataEnabled": data_enabled, "DataLimit": data_limit, "IpCommandsUrl": ip_commands_url, "IpCommandsMethod": ip_commands_method, "SmsCommandsEnabled": sms_commands_enabled, "SmsCommandsUrl": sms_commands_url, "SmsCommandsMethod": sms_commands_method, } ) payload = self._version.create( method="POST", uri=self._uri, data=data, ) return FleetInstance(self._version, payload) async def create_async( self, network_access_profile: str, unique_name: Union[str, object] = values.unset, data_enabled: Union[bool, object] = values.unset, data_limit: Union[int, object] = values.unset, ip_commands_url: Union[str, object] = values.unset, ip_commands_method: Union[str, object] = values.unset, sms_commands_enabled: Union[bool, object] = values.unset, sms_commands_url: Union[str, object] = values.unset, sms_commands_method: Union[str, object] = values.unset, ) -> FleetInstance: """ Asynchronously create the FleetInstance :param network_access_profile: The SID or unique name of the Network Access Profile that will control which cellular networks the Fleet's SIMs can connect to. :param unique_name: An application-defined string that uniquely identifies the resource. It can be used in place of the resource's `sid` in the URL to address the resource. :param data_enabled: Defines whether SIMs in the Fleet are capable of using 2G/3G/4G/LTE/CAT-M data connectivity. Defaults to `true`. :param data_limit: The total data usage (download and upload combined) in Megabytes that each Super SIM assigned to the Fleet can consume during a billing period (normally one month). Value must be between 1MB (1) and 2TB (2,000,000). Defaults to 1GB (1,000). :param ip_commands_url: The URL that will receive a webhook when a Super SIM in the Fleet is used to send an IP Command from your device to a special IP address. Your server should respond with an HTTP status code in the 200 range; any response body will be ignored. :param ip_commands_method: A string representing the HTTP method to use when making a request to `ip_commands_url`. Can be one of `POST` or `GET`. Defaults to `POST`. :param sms_commands_enabled: Defines whether SIMs in the Fleet are capable of sending and receiving machine-to-machine SMS via Commands. Defaults to `true`. :param sms_commands_url: The URL that will receive a webhook when a Super SIM in the Fleet is used to send an SMS from your device to the SMS Commands number. Your server should respond with an HTTP status code in the 200 range; any response body will be ignored. :param sms_commands_method: A string representing the HTTP method to use when making a request to `sms_commands_url`. Can be one of `POST` or `GET`. Defaults to `POST`. :returns: The created FleetInstance """ data = values.of( { "NetworkAccessProfile": network_access_profile, "UniqueName": unique_name, "DataEnabled": data_enabled, "DataLimit": data_limit, "IpCommandsUrl": ip_commands_url, "IpCommandsMethod": ip_commands_method, "SmsCommandsEnabled": sms_commands_enabled, "SmsCommandsUrl": sms_commands_url, "SmsCommandsMethod": sms_commands_method, } ) payload = await self._version.create_async( method="POST", uri=self._uri, data=data, ) return FleetInstance(self._version, payload) def stream( self, network_access_profile: Union[str, object] = values.unset, limit: Optional[int] = None, page_size: Optional[int] = None, ) -> Iterator[FleetInstance]: """ Streams FleetInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param str network_access_profile: The SID or unique name of the Network Access Profile that controls which cellular networks the Fleet's SIMs can connect to. :param limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results """ limits = self._version.read_limits(limit, page_size) page = self.page( network_access_profile=network_access_profile, page_size=limits["page_size"] ) return self._version.stream(page, limits["limit"]) async def stream_async( self, network_access_profile: Union[str, object] = values.unset, limit: Optional[int] = None, page_size: Optional[int] = None, ) -> AsyncIterator[FleetInstance]: """ Asynchronously streams FleetInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param str network_access_profile: The SID or unique name of the Network Access Profile that controls which cellular networks the Fleet's SIMs can connect to. :param limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results """ limits = self._version.read_limits(limit, page_size) page = await self.page_async( network_access_profile=network_access_profile, page_size=limits["page_size"] ) return self._version.stream_async(page, limits["limit"]) def list( self, network_access_profile: Union[str, object] = values.unset, limit: Optional[int] = None, page_size: Optional[int] = None, ) -> List[FleetInstance]: """ Lists FleetInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param str network_access_profile: The SID or unique name of the Network Access Profile that controls which cellular networks the Fleet's SIMs can connect to. :param limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: list that will contain up to limit results """ return list( self.stream( network_access_profile=network_access_profile, limit=limit, page_size=page_size, ) ) async def list_async( self, network_access_profile: Union[str, object] = values.unset, limit: Optional[int] = None, page_size: Optional[int] = None, ) -> List[FleetInstance]: """ Asynchronously lists FleetInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param str network_access_profile: The SID or unique name of the Network Access Profile that controls which cellular networks the Fleet's SIMs can connect to. :param limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: list that will contain up to limit results """ return [ record async for record in await self.stream_async( network_access_profile=network_access_profile, limit=limit, page_size=page_size, ) ] def page( self, network_access_profile: Union[str, object] = values.unset, page_token: Union[str, object] = values.unset, page_number: Union[int, object] = values.unset, page_size: Union[int, object] = values.unset, ) -> FleetPage: """ Retrieve a single page of FleetInstance records from the API. Request is executed immediately :param network_access_profile: The SID or unique name of the Network Access Profile that controls which cellular networks the Fleet's SIMs can connect to. :param page_token: PageToken provided by the API :param page_number: Page Number, this value is simply for client state :param page_size: Number of records to return, defaults to 50 :returns: Page of FleetInstance """ data = values.of( { "NetworkAccessProfile": network_access_profile, "PageToken": page_token, "Page": page_number, "PageSize": page_size, } ) response = self._version.page(method="GET", uri=self._uri, params=data) return FleetPage(self._version, response) async def page_async( self, network_access_profile: Union[str, object] = values.unset, page_token: Union[str, object] = values.unset, page_number: Union[int, object] = values.unset, page_size: Union[int, object] = values.unset, ) -> FleetPage: """ Asynchronously retrieve a single page of FleetInstance records from the API. Request is executed immediately :param network_access_profile: The SID or unique name of the Network Access Profile that controls which cellular networks the Fleet's SIMs can connect to. :param page_token: PageToken provided by the API :param page_number: Page Number, this value is simply for client state :param page_size: Number of records to return, defaults to 50 :returns: Page of FleetInstance """ data = values.of( { "NetworkAccessProfile": network_access_profile, "PageToken": page_token, "Page": page_number, "PageSize": page_size, } ) response = await self._version.page_async( method="GET", uri=self._uri, params=data ) return FleetPage(self._version, response) def get_page(self, target_url: str) -> FleetPage: """ Retrieve a specific page of FleetInstance records from the API. Request is executed immediately :param target_url: API-generated URL for the requested results page :returns: Page of FleetInstance """ response = self._version.domain.twilio.request("GET", target_url) return FleetPage(self._version, response) async def get_page_async(self, target_url: str) -> FleetPage: """ Asynchronously retrieve a specific page of FleetInstance records from the API. Request is executed immediately :param target_url: API-generated URL for the requested results page :returns: Page of FleetInstance """ response = await self._version.domain.twilio.request_async("GET", target_url) return FleetPage(self._version, response) def get(self, sid: str) -> FleetContext: """ Constructs a FleetContext :param sid: The SID of the Fleet resource to update. """ return FleetContext(self._version, sid=sid) def __call__(self, sid: str) -> FleetContext: """ Constructs a FleetContext :param sid: The SID of the Fleet resource to update. """ return FleetContext(self._version, sid=sid) def __repr__(self) -> str: """ Provide a friendly representation :returns: Machine friendly representation """ return "<Twilio.Supersim.V1.FleetList>"
f6f31601e12188dd400ea3ce445ca58618149f7c
a63d907ad63ba6705420a6fb2788196d1bd3763c
/src/api/dataflow/tests/test_uc/test_graph/test_unified_computing_graph.py
48fa0dcd61af95a225ec05b75ce70e603ee71062
[ "MIT" ]
permissive
Tencent/bk-base
a38461072811667dc2880a13a5232004fe771a4b
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
refs/heads/master
2022-07-30T04:24:53.370661
2022-04-02T10:30:55
2022-04-02T10:30:55
381,257,882
101
51
NOASSERTION
2022-04-02T10:30:56
2021-06-29T06:10:01
Python
UTF-8
Python
false
false
3,232
py
test_unified_computing_graph.py
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available. Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. BK-BASE 蓝鲸基础平台 is licensed under the MIT License. License for BK-BASE 蓝鲸基础平台: -------------------------------------------------------------------- Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from rest_framework.test import APITestCase from dataflow.uc.graph.unified_computing_graph import UnifiedComputingGraph class TestUnifiedComputingGraph(APITestCase): def test_set_chaining(self): graph = UnifiedComputingGraph() assert graph.chaining graph.set_chaining(False) assert not graph.chaining def test_add_node(self): node_conf = {"processing_id": "123"} graph = UnifiedComputingGraph() graph.add_node(node_conf) assert "123" in graph.nodes def test_add_edge(self): source_node_conf = {"processing_id": "123_source"} target_node_conf = {"processing_id": "123_target"} graph = UnifiedComputingGraph() graph.add_node(source_node_conf) graph.add_node(target_node_conf) graph.add_edge("123_source", "123_target") assert "123_target" == graph.nodes["123_source"].out_edges[0].target_id assert "123_source" == graph.nodes["123_target"].in_edges[0].source_id def test_get_job_graph(self): source_node_conf = { "processing_id": "123_source", "component_type": "spark", "processor_type": "code", "implement_type": "code", "programming_language": "python", } target_node_conf = { "processing_id": "123_target", "component_type": "spark", "processor_type": "code", "implement_type": "code", "programming_language": "python", } graph = UnifiedComputingGraph() graph.add_node(source_node_conf) graph.add_node(target_node_conf) graph.add_edge("123_source", "123_target") graph.add_head("123_source") job_graph = graph.get_job_graph() assert 2 == len(job_graph.vertices)
9f7d10d578db62ce7ff6837a9b93e7f9cb79dd76
f576f0ea3725d54bd2551883901b25b863fe6688
/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/resource_configuration.py
b41718715b3337ded997f8b76aaeacd6b46ae366
[ "LicenseRef-scancode-generic-cla", "MIT", "LGPL-2.1-or-later", "LicenseRef-scancode-python-cwi", "PSF-2.0", "LGPL-2.0-or-later", "GPL-3.0-or-later", "GPL-1.0-or-later", "LicenseRef-scancode-warranty-disclaimer", "LGPL-2.1-only", "Python-2.0", "MPL-2.0", "LicenseRef-scancode-other-copyleft", "HPND", "ODbL-1.0", "GPL-3.0-only", "ZPL-2.1", "Apache-2.0", "BSD-2-Clause", "BSD-3-Clause", "LicenseRef-scancode-free-unknown" ]
permissive
Azure/azure-sdk-for-python
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
c2ca191e736bb06bfbbbc9493e8325763ba990bb
refs/heads/main
2023-09-06T09:30:13.135012
2023-09-06T01:08:06
2023-09-06T01:08:06
4,127,088
4,046
2,755
MIT
2023-09-14T21:48:49
2012-04-24T16:46:12
Python
UTF-8
Python
false
false
4,041
py
resource_configuration.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import json import logging from typing import Any, Dict, Optional from azure.ai.ml._restclient.v2023_04_01_preview.models import ResourceConfiguration as RestResourceConfiguration from azure.ai.ml.constants._job.job import JobComputePropertyFields from azure.ai.ml.entities._mixins import DictMixin, RestTranslatableMixin module_logger = logging.getLogger(__name__) class ResourceConfiguration(RestTranslatableMixin, DictMixin): """Resource configuration for a job. This class should not be instantiated directly. Instead, use its subclasses. :keyword instance_count: The number of instances to use for the job. :paramtype instance_count: Optional[int] :keyword instance_type: The type of instance to use for the job. :paramtype instance_type: Optional[str] :keyword properties: The resource's property dictionary. :paramtype properties: Optional[dict[str, Any]] """ def __init__( self, *, instance_count: Optional[int] = None, instance_type: Optional[str] = None, properties: Optional[Dict[str, Any]] = None, **kwargs # pylint: disable=unused-argument ) -> None: self.instance_count = instance_count self.instance_type = instance_type self.properties = {} if properties is not None: for key, value in properties.items(): if key == JobComputePropertyFields.AISUPERCOMPUTER: self.properties[JobComputePropertyFields.SINGULARITY.lower()] = value else: self.properties[key] = value def _to_rest_object(self) -> RestResourceConfiguration: serialized_properties = {} if self.properties: for key, value in self.properties.items(): try: if ( key.lower() == JobComputePropertyFields.SINGULARITY.lower() or key.lower() == JobComputePropertyFields.AISUPERCOMPUTER.lower() ): # Map Singularity -> AISupercomputer in SDK until MFE does mapping key = JobComputePropertyFields.AISUPERCOMPUTER # recursively convert Ordered Dict to dictionary serialized_properties[key] = json.loads(json.dumps(value)) except Exception: # pylint: disable=broad-except pass return RestResourceConfiguration( instance_count=self.instance_count, instance_type=self.instance_type, properties=serialized_properties, ) @classmethod def _from_rest_object( # pylint: disable=arguments-renamed cls, rest_obj: Optional[RestResourceConfiguration] ) -> Optional["ResourceConfiguration"]: if rest_obj is None: return None return ResourceConfiguration( instance_count=rest_obj.instance_count, instance_type=rest_obj.instance_type, properties=rest_obj.properties, deserialize_properties=True, ) def __eq__(self, other: object) -> bool: if not isinstance(other, ResourceConfiguration): return NotImplemented return self.instance_count == other.instance_count and self.instance_type == other.instance_type def __ne__(self, other: object) -> bool: if not isinstance(other, ResourceConfiguration): return NotImplemented return not self.__eq__(other) def _merge_with(self, other: "ResourceConfiguration") -> None: if other: if other.instance_count: self.instance_count = other.instance_count if other.instance_type: self.instance_type = other.instance_type if other.properties: self.properties = other.properties
5144410eadc40660fe339b11f0847daf50739bbd
6189f34eff2831e3e727cd7c5e43bc5b591adffc
/WebMirror/management/rss_parser_funcs/feed_parse_extractChrononTranslations.py
f967f0c8613e3c88ba7b7aae509599d9a47e71a2
[ "BSD-3-Clause" ]
permissive
fake-name/ReadableWebProxy
24603660b204a9e7965cfdd4a942ff62d7711e27
ca2e086818433abc08c014dd06bfd22d4985ea2a
refs/heads/master
2023-09-04T03:54:50.043051
2023-08-26T16:08:46
2023-08-26T16:08:46
39,611,770
207
20
BSD-3-Clause
2023-09-11T15:48:15
2015-07-24T04:30:43
Python
UTF-8
Python
false
false
1,651
py
feed_parse_extractChrononTranslations.py
def extractChrononTranslations(item): """ """ vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol or frag) or 'preview' in item['title'].lower(): return None item['title'] = item['title'].replace('’', '') if 'Weapons cheat'.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, 'Modern weapons cheat in another world', vol, chp, frag=frag, postfix=postfix) if 'Heavenly Tribulation'.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, 'Heavenly Tribulation', vol, chp, frag=frag, postfix=postfix) if 'I can speak'.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, 'I Can Speak with Animals and Demons', vol, chp, frag=frag, postfix=postfix) if 'I Bought a Girl'.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, 'I Bought a Girl', vol, chp, frag=frag, postfix=postfix) if 'Girl Corps'.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, 'Girl Corps', vol, chp, frag=frag, postfix=postfix) if 'Modern Weapons'.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, 'Modern weapons cheat in another world', vol, chp, frag=frag, postfix=postfix) if 'Upper World'.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, 'Reincarnation ~ From the lower world to the upper world', vol, chp, frag=frag, postfix=postfix) if 'I work as a healer'.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, "I Work As A Healer In Another World's Labyrinth City", vol, chp, frag=frag, postfix=postfix) return False
a34e7410e522eb674682abd5dd5eebc8cf6d3306
975b2d421d3661e6770b601929d5f11d981d8985
/msgraph/generated/models/security/ediscovery_estimate_operation.py
c73cc3fab2c682f0a34b738213f71f16c9af6d23
[ "MIT" ]
permissive
microsoftgraph/msgraph-sdk-python
a7c551b85daadeebf76ec4ae12668664ea639b42
27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949
refs/heads/main
2023-09-03T21:45:27.989672
2023-08-31T06:22:18
2023-08-31T06:22:18
534,665,999
135
18
MIT
2023-09-14T11:04:11
2022-09-09T14:00:17
Python
UTF-8
Python
false
false
3,923
py
ediscovery_estimate_operation.py
from __future__ import annotations from dataclasses import dataclass, field from kiota_abstractions.serialization import Parsable, ParseNode, SerializationWriter from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union if TYPE_CHECKING: from .case_operation import CaseOperation from .ediscovery_search import EdiscoverySearch from .case_operation import CaseOperation @dataclass class EdiscoveryEstimateOperation(CaseOperation): # The estimated count of items for the search that matched the content query. indexed_item_count: Optional[int] = None # The estimated size of items for the search that matched the content query. indexed_items_size: Optional[int] = None # The number of mailboxes that had search hits. mailbox_count: Optional[int] = None # The OdataType property odata_type: Optional[str] = None # eDiscovery search. search: Optional[EdiscoverySearch] = None # The number of mailboxes that had search hits. site_count: Optional[int] = None # The estimated count of unindexed items for the collection. unindexed_item_count: Optional[int] = None # The estimated size of unindexed items for the collection. unindexed_items_size: Optional[int] = None @staticmethod def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> EdiscoveryEstimateOperation: """ Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: EdiscoveryEstimateOperation """ if not parse_node: raise TypeError("parse_node cannot be null.") return EdiscoveryEstimateOperation() def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]: """ The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]] """ from .case_operation import CaseOperation from .ediscovery_search import EdiscoverySearch from .case_operation import CaseOperation from .ediscovery_search import EdiscoverySearch fields: Dict[str, Callable[[Any], None]] = { "indexedItemCount": lambda n : setattr(self, 'indexed_item_count', n.get_int_value()), "indexedItemsSize": lambda n : setattr(self, 'indexed_items_size', n.get_int_value()), "mailboxCount": lambda n : setattr(self, 'mailbox_count', n.get_int_value()), "search": lambda n : setattr(self, 'search', n.get_object_value(EdiscoverySearch)), "siteCount": lambda n : setattr(self, 'site_count', n.get_int_value()), "unindexedItemCount": lambda n : setattr(self, 'unindexed_item_count', n.get_int_value()), "unindexedItemsSize": lambda n : setattr(self, 'unindexed_items_size', n.get_int_value()), } super_fields = super().get_field_deserializers() fields.update(super_fields) return fields def serialize(self,writer: SerializationWriter) -> None: """ Serializes information the current object Args: writer: Serialization writer to use to serialize this model """ if not writer: raise TypeError("writer cannot be null.") super().serialize(writer) writer.write_int_value("indexedItemCount", self.indexed_item_count) writer.write_int_value("indexedItemsSize", self.indexed_items_size) writer.write_int_value("mailboxCount", self.mailbox_count) writer.write_object_value("search", self.search) writer.write_int_value("siteCount", self.site_count) writer.write_int_value("unindexedItemCount", self.unindexed_item_count) writer.write_int_value("unindexedItemsSize", self.unindexed_items_size)
d6c7e9474c86227d0e7af968125e3ae2d38e998b
a5a99f646e371b45974a6fb6ccc06b0a674818f2
/Validation/RecoEgamma/python/egammaPostValidation_cff.py
40ddc0c77bf59eb178f5eb33ef90e56785459bd0
[ "Apache-2.0" ]
permissive
cms-sw/cmssw
4ecd2c1105d59c66d385551230542c6615b9ab58
19c178740257eb48367778593da55dcad08b7a4f
refs/heads/master
2023-08-23T21:57:42.491143
2023-08-22T20:22:40
2023-08-22T20:22:40
10,969,551
1,006
3,696
Apache-2.0
2023-09-14T19:14:28
2013-06-26T14:09:07
C++
UTF-8
Python
false
false
262
py
egammaPostValidation_cff.py
import FWCore.ParameterSet.Config as cms from Validation.RecoEgamma.photonPostProcessor_cff import * from Validation.RecoEgamma.electronPostValidationSequence_cff import * egammaPostValidation = cms.Sequence(photonPostProcessor+electronPostValidationSequence)
50092aa1fe288ba143ead833c788902d77ea516d
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
/tests/st/ops/gpu/test_dataformatdimmap_op.py
9ceda9b18b843fffd51576b3168b1c51dcbe4123
[ "Apache-2.0", "LicenseRef-scancode-proprietary-license", "MPL-1.0", "OpenSSL", "LGPL-3.0-only", "LicenseRef-scancode-warranty-disclaimer", "BSD-3-Clause-Open-MPI", "MIT", "MPL-2.0-no-copyleft-exception", "NTP", "BSD-3-Clause", "GPL-1.0-or-later", "0BSD", "MPL-2.0", "LicenseRef-scancode-free-unknown", "AGPL-3.0-only", "Libpng", "MPL-1.1", "IJG", "GPL-2.0-only", "BSL-1.0", "Zlib", "LicenseRef-scancode-public-domain", "LicenseRef-scancode-python-cwi", "BSD-2-Clause", "LicenseRef-scancode-gary-s-brown", "LGPL-2.1-only", "LicenseRef-scancode-other-permissive", "Python-2.0", "LicenseRef-scancode-mit-nagy", "LicenseRef-scancode-other-copyleft", "LicenseRef-scancode-unknown-license-reference", "Unlicense" ]
permissive
mindspore-ai/mindspore
ca7d5bb51a3451c2705ff2e583a740589d80393b
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
refs/heads/master
2023-07-29T09:17:11.051569
2023-07-17T13:14:15
2023-07-17T13:14:15
239,714,835
4,178
768
Apache-2.0
2023-07-26T22:31:11
2020-02-11T08:43:48
C++
UTF-8
Python
false
false
3,472
py
test_dataformatdimmap_op.py
# Copyright 2022 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ import numpy as np import pytest from mindspore.common.api import jit from mindspore.common.api import _pynative_executor from mindspore.ops import operations as P from mindspore.ops import functional as F from mindspore.ops.functional import vmap from mindspore import Tensor from mindspore import context def np_all_close_with_loss(out, expect): """np_all_close_with_loss""" return np.allclose(out, expect, 0.0005, 0.0005, equal_nan=True) @pytest.mark.level1 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard @pytest.mark.parametrize("data_type", [np.int32, np.int64]) def test_data_formata_dim_map_gpu(data_type): """ Feature: DataFormatDimMapNet gpu kernel. Description: test the rightness of DataFormatDimMapNet gpu kernel. Expectation: Success. """ x_np_1_gpu = np.array([-4, -3, -2, -1, 0, 1, 2, 3]).astype(data_type) output_1_gpu = P.DataFormatDimMap()(Tensor(x_np_1_gpu)) output_1_expect_gpu = np.array([0, 3, 1, 2, 0, 3, 1, 2]).astype(data_type) assert np.allclose(output_1_gpu.asnumpy(), output_1_expect_gpu) output_2_gpu = P.DataFormatDimMap(src_format="NHWC", dst_format="NHWC")(Tensor(x_np_1_gpu)) output_2_expect_gpu = np.array([0, 1, 2, 3, 0, 1, 2, 3]).astype(data_type) assert np.allclose(output_2_gpu.asnumpy(), output_2_expect_gpu) output_3_gpu = P.DataFormatDimMap(src_format="NCHW", dst_format="NHWC")(Tensor(x_np_1_gpu)) output_3_expect_gpu = np.array([0, 2, 3, 1, 0, 2, 3, 1]).astype(data_type) assert np.allclose(output_3_gpu.asnumpy(), output_3_expect_gpu) @pytest.mark.level1 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard @pytest.mark.parametrize("data_type", [np.int32, np.int64]) def test_data_formata_dim_map_vmap_gpu(data_type): """ Feature: DataFormatDimMapNet gpu kernel Description: test the rightness of DataFormatDimMapNet gpu kernel vmap feature. Expectation: Success. """ context.set_context(mode=context.GRAPH_MODE, device_target="GPU") def data_formata_dim_map_fun_gpu(x): """data_formata_dim_map_fun_gpu""" return P.DataFormatDimMap()(x) x_np_gpu = np.random.randint(low=-4, high=4, size=(100, 100)).astype(data_type) x_gpu = Tensor(x_np_gpu) x_gpu = F.sub(x_gpu, 0) output_vmap_gpu = vmap(data_formata_dim_map_fun_gpu, in_axes=(0,))(x_gpu) _pynative_executor.sync() @jit def manually_batched_gpu(xs): """manually_batched_gpu""" output_gpu = [] for i in range(xs.shape[0]): output_gpu.append(data_formata_dim_map_fun_gpu(xs[i])) return F.stack(output_gpu) output_manually_gpu = manually_batched_gpu(x_gpu) _pynative_executor.sync() assert np_all_close_with_loss(output_vmap_gpu.asnumpy(), output_manually_gpu.asnumpy())
a33b9f722c947aa42f8d157c86d89cce4d691719
c3fb19c2e228fd1d32516fe64f38a9006772d260
/botfw/liquid/trade.py
559e2c54f6bee759c5b9f2571429735a75479e9f
[ "MIT" ]
permissive
penta2019/btc_bot_framework
0642725cfd5c7b9f1c4e011f30b292cc4fc7ba50
3793c4c1d170db31a8017096f16946610c0a062e
refs/heads/master
2022-01-24T01:25:17.346272
2022-01-18T10:29:28
2022-01-18T10:29:28
224,544,310
131
43
MIT
2022-01-18T10:29:29
2019-11-28T01:11:14
Python
UTF-8
Python
false
false
724
py
trade.py
import json from ..base.trade import TradeBase from .websocket import LiquidWebsocket class LiquidTrade(TradeBase): def __init__(self, symbol, ws=None): super().__init__() self.symbol = symbol self.ws = ws or LiquidWebsocket() market_id = self.symbol.replace('/', '').lower() self.ws.subscribe( f'execution_details_cash_{market_id}', self.__on_message) def __on_message(self, msg): data = json.loads(msg['data']) ts = data['created_at'] price = data['price'] size = float(data['quantity']) if data['taker_side'] == 'sell': size *= -1 self.ltp = price self._trigger_callback(ts, price, size)
413f72421274506c2819fa70bb1d4fc7d31e2e5a
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
/alipay/aop/api/domain/Command.py
060ae5d99343502c16d76ca8ee07e10f3fd3b691
[ "Apache-2.0" ]
permissive
alipay/alipay-sdk-python-all
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
1fad300587c9e7e099747305ba9077d4cd7afde9
refs/heads/master
2023-08-27T21:35:01.778771
2023-08-23T07:12:26
2023-08-23T07:12:26
133,338,689
247
70
Apache-2.0
2023-04-25T04:54:02
2018-05-14T09:40:54
Python
UTF-8
Python
false
false
2,173
py
Command.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class Command(object): def __init__(self): self._commander = None self._content = None self._executor = None self._key = None @property def commander(self): return self._commander @commander.setter def commander(self, value): self._commander = value @property def content(self): return self._content @content.setter def content(self, value): self._content = value @property def executor(self): return self._executor @executor.setter def executor(self, value): self._executor = value @property def key(self): return self._key @key.setter def key(self, value): self._key = value def to_alipay_dict(self): params = dict() if self.commander: if hasattr(self.commander, 'to_alipay_dict'): params['commander'] = self.commander.to_alipay_dict() else: params['commander'] = self.commander if self.content: if hasattr(self.content, 'to_alipay_dict'): params['content'] = self.content.to_alipay_dict() else: params['content'] = self.content if self.executor: if hasattr(self.executor, 'to_alipay_dict'): params['executor'] = self.executor.to_alipay_dict() else: params['executor'] = self.executor if self.key: if hasattr(self.key, 'to_alipay_dict'): params['key'] = self.key.to_alipay_dict() else: params['key'] = self.key return params @staticmethod def from_alipay_dict(d): if not d: return None o = Command() if 'commander' in d: o.commander = d['commander'] if 'content' in d: o.content = d['content'] if 'executor' in d: o.executor = d['executor'] if 'key' in d: o.key = d['key'] return o
d3c899446b439c28c9780aa8e0d7767f1fb755c1
3c41443364da8b44c74dce08ef94a1acd1b66b3e
/osf/models/action.py
d9ad78afa840493d60e7c2e04052dc402634c60c
[ "BSD-3-Clause", "MIT", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-warranty-disclaimer", "AGPL-3.0-only", "LGPL-2.0-or-later", "LicenseRef-scancode-proprietary-license", "MPL-1.1", "CPAL-1.0", "LicenseRef-scancode-unknown-license-reference", "BSD-2-Clause", "Apache-2.0" ]
permissive
CenterForOpenScience/osf.io
71d9540be7989f7118a33e15bc4a6ce2d2492ac1
a3e0a0b9ddda5dd75fc8248d58f3bcdeece0323e
refs/heads/develop
2023-09-04T03:21:14.970917
2023-08-31T14:49:20
2023-08-31T14:49:20
10,199,599
683
390
Apache-2.0
2023-09-14T17:07:52
2013-05-21T15:53:37
Python
UTF-8
Python
false
false
3,616
py
action.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models from osf.models.base import BaseModel, ObjectIDMixin from osf.utils.workflows import ( ApprovalStates, DefaultStates, DefaultTriggers, ReviewStates, ReviewTriggers, RegistrationModerationTriggers, RegistrationModerationStates, SchemaResponseTriggers, CollectionSubmissionStates, CollectionSubmissionsTriggers, ) from osf.utils import permissions from osf.utils.fields import NonNaiveDateTimeField class BaseAction(ObjectIDMixin, BaseModel): class Meta: abstract = True creator = models.ForeignKey('OSFUser', related_name='+', on_delete=models.CASCADE) trigger = models.CharField(max_length=31, choices=DefaultTriggers.choices()) from_state = models.CharField(max_length=31, choices=DefaultStates.choices()) to_state = models.CharField(max_length=31, choices=DefaultStates.choices()) comment = models.TextField(blank=True) is_deleted = models.BooleanField(default=False) auto = models.BooleanField(default=False) @property def target(self): raise NotImplementedError() class ReviewAction(BaseAction): target = models.ForeignKey('Preprint', related_name='actions', on_delete=models.CASCADE) trigger = models.CharField(max_length=31, choices=ReviewTriggers.choices()) from_state = models.CharField(max_length=31, choices=ReviewStates.choices()) to_state = models.CharField(max_length=31, choices=ReviewStates.choices()) class NodeRequestAction(BaseAction): target = models.ForeignKey('NodeRequest', related_name='actions', on_delete=models.CASCADE) permissions = models.CharField( max_length=5, choices=[(permission, permission.title()) for permission in permissions.API_CONTRIBUTOR_PERMISSIONS], default=permissions.READ ) visible = models.BooleanField(default=True) class PreprintRequestAction(BaseAction): target = models.ForeignKey('PreprintRequest', related_name='actions', on_delete=models.CASCADE) class RegistrationAction(BaseAction): target = models.ForeignKey('Registration', related_name='actions', on_delete=models.CASCADE) trigger = models.CharField( max_length=31, choices=RegistrationModerationTriggers.char_field_choices()) from_state = models.CharField( max_length=31, choices=RegistrationModerationStates.char_field_choices()) to_state = models.CharField( max_length=31, choices=RegistrationModerationStates.char_field_choices()) class SchemaResponseAction(BaseAction): target = models.ForeignKey('SchemaResponse', related_name='actions', on_delete=models.CASCADE) trigger = models.CharField(max_length=31, choices=SchemaResponseTriggers.char_field_choices()) from_state = models.CharField(max_length=31, choices=ApprovalStates.char_field_choices()) to_state = models.CharField(max_length=31, choices=ApprovalStates.char_field_choices()) class CollectionSubmissionAction(ObjectIDMixin, BaseModel): creator = models.ForeignKey('OSFUser', related_name='+', on_delete=models.CASCADE) target = models.ForeignKey('CollectionSubmission', related_name='actions', on_delete=models.CASCADE) trigger = models.IntegerField(choices=CollectionSubmissionsTriggers.char_field_choices()) from_state = models.IntegerField(choices=CollectionSubmissionStates.char_field_choices()) to_state = models.IntegerField(choices=CollectionSubmissionStates.char_field_choices()) comment = models.TextField(blank=True) deleted = NonNaiveDateTimeField(null=True, blank=True)
a447fe4a49bbf2217fc76fc80a2aacb2feae37f6
056f10d9f99506bb9b5abf7e91633f3ad0c76061
/CheckMembership.py
ca0020f47c3018de73b39bc1201184f649d3b929
[]
no_license
taers232c/GAM-Scripts3
5f171b620b2ac19514ab7198e39720f59a60ba9e
a59c5adb7b03b6bc9a4e054b9b41eabae2779f13
refs/heads/master
2023-08-31T06:43:57.645295
2023-08-22T17:32:21
2023-08-22T17:32:21
108,921,186
176
46
null
2023-02-28T15:52:32
2017-10-30T23:48:44
Python
UTF-8
Python
false
false
2,579
py
CheckMembership.py
#!/usr/bin/env python3 """ # Purpose: For a list of group members and a list of users, produce a CSV file that lists the users that are not group members # Note: This script can use Basic or Advanced GAM: # https://github.com/GAM-team/GAM # https://github.com/taers232c/GAMADV-XTD3 # Python: Use python or python3 below as appropriate to your system; verify that you have version 3 # $ python -V or python3 -V # Python 3.x.y # Usage: # 1: Get group members # $ gam group <GroupName> print > Members.csv # 2: Get users; replace <UserTypeEntity> as desired, e.g. ou /Teachers # $ gam <UserTypeEntity> print > Users.csv # 3: Make a CSV file NonMembers.csv that lists the users that are not group members # $ python3 CheckMembership.py Members.csv Users.csv NonMembers.csv """ import csv import sys # Default is that Members.csv does not have a header row; the following sets a field name MembersEmailField = 'primaryEmail' MembersFieldNames = [MembersEmailField] # If Members.csv does have a header row, edit the following line and remove the # from both lines #MembersEmailField = 'primaryEmail' #MembersFieldNames = None # Default is that Users.csv does not have a header row; the following sets a field name UsersEmailField = 'primaryEmail' UsersFieldNames = [UsersEmailField] # If Users.csv does have a header row, edit the following line and remove the # from both lines #UsersEmailField = 'primaryEmail' #UsersFieldNames = None # Edit the following row if you want a different header for NonMembers.csv NonMembersEmailField = 'primaryEmail' NonMembersFieldNames = [NonMembersEmailField] QUOTE_CHAR = '"' # Adjust as needed LINE_TERMINATOR = '\n' # On Windows, you probably want '\r\n' MembersSet = set() inputFile = open(sys.argv[1], 'r', encoding='utf-8') inputCSV = csv.DictReader(inputFile, fieldnames=MembersFieldNames, quotechar=QUOTE_CHAR) for row in inputCSV: MembersSet.add(row[MembersEmailField]) inputFile.close() inputFile = open(sys.argv[2], 'r', encoding='utf-8') inputCSV = csv.DictReader(inputFile, fieldnames=UsersFieldNames, quotechar=QUOTE_CHAR) if (len(sys.argv) > 3) and (sys.argv[2] != '-'): outputFile = open(sys.argv[3], 'w', encoding='utf-8', newline='') else: outputFile = sys.stdout outputCSV = csv.DictWriter(outputFile, NonMembersFieldNames, lineterminator=LINE_TERMINATOR, quotechar=QUOTE_CHAR) outputCSV.writeheader() for row in inputCSV: if row[UsersEmailField] not in MembersSet: outputCSV.writerow({NonMembersEmailField: row[UsersEmailField]}) inputFile.close() if outputFile != sys.stdout: outputFile.close()
771aedbd9f2a08f3fcb2bc1dd2b98a7563d8a52e
064adb670110b02615b9a5feceb39bd6cc56f640
/example_services/benchmark/client.py
2b8e9d676d3b0912d7e0ba57b932b054d2faacaf
[ "BSD-2-Clause" ]
permissive
almarklein/mypaas
3e7f00562e83d936f3feadc9f9fd75f448252a8b
d3f7559f66cda89245e55416a2e704234a65da6f
refs/heads/main
2023-05-25T01:33:30.749796
2023-05-10T11:29:33
2023-05-10T11:29:33
219,710,639
334
18
BSD-2-Clause
2023-05-10T11:29:34
2019-11-05T09:49:45
Python
UTF-8
Python
false
false
1,546
py
client.py
""" Run this from your workstation, or anywhere, to connect to your server and stress it! """ import time import random import threading import requests names = [f"foo{i}" for i in range(1000)] ## # Run this once to create a bunch of databases at the server if True: for name in names: print(name) items = [dict(key=str(i), value=random.random()) for i in range(10000)] r = requests.put( f"https://mypaas2.canpute.com/dbtest/sqlite/{name}", json={"items": items} ) assert r.status_code == 200 ## N = 99 nthreads = 20 counters = [0 for i in range(nthreads)] statuses = [0 for i in range(nthreads)] def print_status(clear=True): text = " ".join([f"{x:02d}" for x in counters]) if clear: text = "\b" * len(text) + text print(text, end="") def make_query_some(thread_index): def query_some(): try: for i in range(N): user = names[random.randint(0, len(names) - 1)] r = requests.get("https://mypaas2.canpute.com/dbtest/sqlite/" + user) assert r.status_code == 200 counters[thread_index] += 1 finally: statuses[thread_index] = 1 return query_some threads = [threading.Thread(target=make_query_some(i)) for i in range(nthreads)] t0 = time.perf_counter() for t in threads: t.start() print_status(False) while not all(statuses): time.sleep(0.2) print_status() print() t1 = time.perf_counter() print((N * nthreads) / (t1 - t0), "RPS")
9ecee25f2e4b5013575b5bb0c5434b99c1cf4d3f
01857ef455ea60eccaf03b5a9059ec83e9803c2e
/nicegui/tailwind_types/backdrop_grayscale.py
95c09b97621f1b2cb3aa3fdebbeadfb3f89227c9
[ "MIT" ]
permissive
zauberzeug/nicegui
f08312cc1f393deca79e0e84a2506d3a35efff16
c61b1315f29d51e26cc1168207f5616b302f8df0
refs/heads/main
2023-08-18T18:09:30.937322
2023-08-18T15:04:00
2023-08-18T15:04:00
365,250,183
5,128
271
MIT
2023-09-14T01:50:56
2021-05-07T13:55:05
Python
UTF-8
Python
false
false
76
py
backdrop_grayscale.py
from typing import Literal BackdropGrayscale = Literal[ '0', '', ]
3e45709b997f9bce07d5a577fe2e96503c692319
5105403f2b75990654519438d8ceabcf80962ebf
/tests/unit/bokeh/plotting/test__stack.py
5668bbce2c00f738c34ceb3cf1879d9d7197459c
[ "BSD-3-Clause" ]
permissive
bokeh/bokeh
ed1d81eb07d27d27c6710c9fec9114886047f528
310cb2cbeabc4c4b8180cbda566df16039737cdc
refs/heads/branch-3.3
2023-08-31T23:53:06.537061
2023-08-30T03:43:05
2023-08-30T03:43:05
3,834,332
17,174
5,251
BSD-3-Clause
2023-09-14T11:37:23
2012-03-26T15:40:01
Python
UTF-8
Python
false
false
9,034
py
test__stack.py
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2023, Anaconda, Inc., and Bokeh Contributors. # All rights reserved. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- from __future__ import annotations # isort:skip import pytest ; pytest #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Module under test import bokeh.plotting._stack as bps # isort:skip #----------------------------------------------------------------------------- # Setup #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- class Test_single_stack: def test_raises_when_spec_in_kwargs(self) -> None: with pytest.raises(ValueError) as e: bps.single_stack(['a', 'b'], 'foo', foo=10) assert str(e.value).endswith("Stack property 'foo' cannot appear in keyword args") def test_raises_when_kwargs_list_lengths_differ(self) -> None: with pytest.raises(ValueError) as e: bps.single_stack(['a', 'b'], 'foo', baz=[1, 2], quux=[3,4,5]) assert str(e.value).endswith("Keyword argument sequences for broadcasting must all be the same lengths. Got lengths: [2, 3]") def test_raises_when_kwargs_list_lengths_and_stackers_lengths_differ(self) -> None: with pytest.raises(ValueError) as e: bps.single_stack(['a', 'b', 'c'], 'foo', baz=[1, 2], quux=[3,4]) assert str(e.value).endswith("Keyword argument sequences for broadcasting must be the same length as stackers") def test_broadcast_with_no_kwargs(self) -> None: stackers = ['a', 'b', 'c', 'd'] kws = bps.single_stack(stackers, 'start') assert len(kws) == len(stackers) for i, kw in enumerate(kws): assert {"start", "name"} == set(kw.keys()) assert list(kw["start"]["expr"].fields) == stackers[: i + 1] def test_broadcast_with_scalar_kwargs(self) -> None: stackers = ['a', 'b', 'c', 'd'] kws = bps.single_stack(stackers, 'start', foo=10, bar="baz") assert len(kws) == len(stackers) for i, kw in enumerate(kws): assert {"start", "foo", "bar", "name"} == set(kw.keys()) assert list(kw["start"]["expr"].fields) == stackers[: i + 1] assert kw["foo"] == 10 assert kw["bar"] == "baz" assert kw["name"] == stackers[i] def test_broadcast_with_list_kwargs(self) -> None: stackers = ['a', 'b', 'c', 'd'] kws = bps.single_stack(stackers, 'start', foo=[10, 20, 30, 40], bar="baz") assert len(kws) == len(stackers) for i, kw in enumerate(kws): assert {"start", "foo", "bar", "name"} == set(kw.keys()) assert list(kw["start"]["expr"].fields) == stackers[: i + 1] assert kw["foo"] == [10, 20, 30, 40][i] assert kw["bar"] == "baz" assert kw["name"] == stackers[i] def test_broadcast_name_scalar_overrides(self) -> None: stackers = ['a', 'b', 'c', 'd'] kws = bps.single_stack(stackers, 'start', foo=[10, 20, 30, 40], bar="baz", name="name") assert len(kws) == len(stackers) for i, kw in enumerate(kws): assert {"start", "foo", "bar", "name"} == set(kw.keys()) assert list(kw["start"]["expr"].fields) == stackers[: i + 1] assert kw["foo"] == [10, 20, 30, 40][i] assert kw["bar"] == "baz" assert kw["name"] == "name" def test_broadcast_name_list_overrides(self) -> None: names = ["aa", "bb", "cc", "dd"] stackers = ['a', 'b', 'c', 'd'] kws = bps.single_stack(stackers, 'start', foo=[10, 20, 30, 40], bar="baz", name=names) assert len(kws) == len(stackers) for i, kw in enumerate(kws): assert {"start", "foo", "bar", "name"} == set(kw.keys()) assert list(kw["start"]["expr"].fields) == stackers[: i + 1] assert kw["foo"] == [10, 20, 30, 40][i] assert kw["bar"] == "baz" assert kw["name"] == names[i] class Test_double_stack: def test_raises_when_spec_in_kwargs(self) -> None: with pytest.raises(ValueError) as e: bps.double_stack(['a', 'b'], 'foo', 'bar', foo=10) assert str(e.value).endswith("Stack property 'foo' cannot appear in keyword args") with pytest.raises(ValueError) as e: bps.double_stack(['a', 'b'], 'foo', 'bar', bar=10) assert str(e.value).endswith("Stack property 'bar' cannot appear in keyword args") def test_raises_when_kwargs_list_lengths_differ(self) -> None: with pytest.raises(ValueError) as e: bps.double_stack(['a', 'b'], 'foo', 'bar', baz=[1, 2], quux=[3,4,5]) assert str(e.value).endswith("Keyword argument sequences for broadcasting must all be the same lengths. Got lengths: [2, 3]") def test_raises_when_kwargs_list_lengths_and_stackers_lengths_differ(self) -> None: with pytest.raises(ValueError) as e: bps.double_stack(['a', 'b', 'c'], 'foo', 'bar', baz=[1, 2], quux=[3,4]) assert str(e.value).endswith("Keyword argument sequences for broadcasting must be the same length as stackers") def test_broadcast_with_no_kwargs(self) -> None: stackers = ['a', 'b', 'c', 'd'] kws = bps.double_stack(stackers, 'start', 'end') assert len(kws) == len(stackers) for i, kw in enumerate(kws): assert {"start", "end", "name"} == set(kw.keys()) assert list(kw["start"]["expr"].fields) == stackers[:i] assert list(kw["end"]["expr"].fields) == stackers[: (i + 1)] def test_broadcast_with_scalar_kwargs(self) -> None: stackers = ['a', 'b', 'c', 'd'] kws = bps.double_stack(stackers, 'start', 'end', foo=10, bar="baz") assert len(kws) == len(stackers) for i, kw in enumerate(kws): assert {"start", "end", "foo", "bar", "name"} == set(kw.keys()) assert list(kw["start"]["expr"].fields) == stackers[:i] assert list(kw["end"]["expr"].fields) == stackers[: (i + 1)] assert kw["foo"] == 10 assert kw["bar"] == "baz" assert kw["name"] == stackers[i] def test_broadcast_with_list_kwargs(self) -> None: stackers = ['a', 'b', 'c', 'd'] kws = bps.double_stack(stackers, 'start', 'end', foo=[10, 20, 30, 40], bar="baz") assert len(kws) == len(stackers) for i, kw in enumerate(kws): assert {"start", "end", "foo", "bar", "name"} == set(kw.keys()) assert list(kw["start"]["expr"].fields) == stackers[:i] assert list(kw["end"]["expr"].fields) == stackers[: (i + 1)] assert kw["foo"] == [10, 20, 30, 40][i] assert kw["bar"] == "baz" assert kw["name"] == stackers[i] def test_broadcast_name_scalar_overrides(self) -> None: stackers = ['a', 'b', 'c', 'd'] kws = bps.double_stack(stackers, 'start', 'end', foo=[10, 20, 30, 40], bar="baz", name="name") assert len(kws) == len(stackers) for i, kw in enumerate(kws): assert {"start", "end", "foo", "bar", "name"} == set(kw.keys()) assert list(kw["start"]["expr"].fields) == stackers[:i] assert list(kw["end"]["expr"].fields) == stackers[: (i + 1)] assert kw["foo"] == [10, 20, 30, 40][i] assert kw["bar"] == "baz" assert kw["name"] == "name" def test_broadcast_name_list_overrides(self) -> None: names = ["aa", "bb", "cc", "dd"] stackers = ['a', 'b', 'c', 'd'] kws = bps.double_stack(stackers, 'start', 'end', foo=[10, 20, 30, 40], bar="baz", name=names) assert len(kws) == len(stackers) for i, kw in enumerate(kws): assert {"start", "end", "foo", "bar", "name"} == set(kw.keys()) assert list(kw["start"]["expr"].fields) == stackers[:i] assert list(kw["end"]["expr"].fields) == stackers[: (i + 1)] assert kw["foo"] == [10, 20, 30, 40][i] assert kw["bar"] == "baz" assert kw["name"] == names[i] #----------------------------------------------------------------------------- # Private API #-----------------------------------------------------------------------------
4a7d6b740a2c7f4ceecf8d36b34b499bd34b728c
b58d2a25194bf70b05787a71e3158eed63609fd6
/preprocess-features.py
9394318332de6b5981e45522a184ff8d38e90ec6
[ "GPL-3.0-only" ]
permissive
KaihuaTang/VQA2.0-Recent-Approachs-2018.pytorch
22818d8d39106a37c0fb83486761187bb8e55579
1a4aa392a510a6fb7e1037ca90d06c2eb0dd6cd9
refs/heads/master
2022-02-02T23:38:55.718214
2022-01-12T13:54:10
2022-01-12T13:54:10
162,952,449
316
57
MIT
2019-10-11T09:28:44
2018-12-24T05:14:21
Python
UTF-8
Python
false
false
2,611
py
preprocess-features.py
import sys import argparse import base64 import os import csv import itertools csv.field_size_limit(sys.maxsize) import h5py import torch.utils.data import numpy as np from tqdm import tqdm import config import data import utils def main(): parser = argparse.ArgumentParser() parser.add_argument('--test', action='store_true') args = parser.parse_args() FIELDNAMES = ['image_id', 'image_w','image_h','num_boxes', 'boxes', 'features'] features_shape = ( 82783 + 40504 if not args.test else 81434, # number of images in trainval or in test config.output_features, config.output_size, ) boxes_shape = ( features_shape[0], 4, config.output_size, ) if not args.test: path = config.preprocessed_trainval_path else: path = config.preprocessed_test_path with h5py.File(path, libver='latest') as fd: features = fd.create_dataset('features', shape=features_shape, dtype='float32') boxes = fd.create_dataset('boxes', shape=boxes_shape, dtype='float32') coco_ids = fd.create_dataset('ids', shape=(features_shape[0],), dtype='int32') widths = fd.create_dataset('widths', shape=(features_shape[0],), dtype='int32') heights = fd.create_dataset('heights', shape=(features_shape[0],), dtype='int32') readers = [] if not args.test: path = config.bottom_up_trainval_path else: path = config.bottom_up_test_path for filename in os.listdir(path): if not '.tsv' in filename: continue full_filename = os.path.join(path, filename) fd = open(full_filename, 'r') reader = csv.DictReader(fd, delimiter='\t', fieldnames=FIELDNAMES) readers.append(reader) reader = itertools.chain.from_iterable(readers) for i, item in enumerate(tqdm(reader, total=features_shape[0])): coco_ids[i] = int(item['image_id']) widths[i] = int(item['image_w']) heights[i] = int(item['image_h']) buf = base64.decodestring(item['features'].encode('utf8')) array = np.frombuffer(buf, dtype='float32') array = array.reshape((-1, config.output_features)).transpose() features[i, :, :array.shape[1]] = array buf = base64.decodestring(item['boxes'].encode('utf8')) array = np.frombuffer(buf, dtype='float32') array = array.reshape((-1, 4)).transpose() boxes[i, :, :array.shape[1]] = array if __name__ == '__main__': main()
3391b01294ca34984626c54d90a77ff14a532647
b57aab2567c3e17f7460be188e58c736eeed743a
/python/text/textindex.py
5cb5d71a4904d52073d42a9bd5d557e873251339
[ "Apache-2.0" ]
permissive
GoogleCloudPlatform/cloud-vision
af1e0b9337d047cbf85a57ffd1cd48f49234a90d
7d50fb62950ab3ebd46f230ed6bd1510c553d1f3
refs/heads/master
2023-04-01T10:30:40.615451
2022-04-18T16:37:45
2022-04-18T16:37:45
50,685,592
1,217
1,119
Apache-2.0
2022-04-08T20:44:08
2016-01-29T19:22:07
Python
UTF-8
Python
false
false
10,855
py
textindex.py
#!/usr/bin/env python # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script uses the Vision API's OCR capabilities to find and index any text a set of images. It builds an inverted index, and uses redis (http://redis.io/) to persist the index. By default, the script asumes a local redis install set up to persist to disk. Assuming the redis database is persisted between runs, the script can be run multiple times on the same set of files without redoing previous work. The script uses also nltk (http://www.nltk.org/index.html) to do stemming and tokenizing. To run the example, install the necessary libraries by running: pip install -r requirements.txt Then, follow the instructions here: http://www.nltk.org/data.html to download the necessary nltk data. Run the script on a directory of images to create the index, E.g.: ./textindex.py <path-to-image-directory> Then, instantiate an instance of the Index() object (via a script or the Python interpreter) and use it to look up words via the Index.lookup() or Index.print_lookup() methods. E.g.: import textindex index = textindex.Index() index.print_lookup('cats', 'dogs') This will return all the images that include both 'cats' and 'dogs' in recognizable text. More exactly, it will return all images that include text with the same stems. """ import argparse # [START detect_text] import base64 import os import re import sys from googleapiclient import discovery from googleapiclient import errors import nltk from nltk.stem.snowball import EnglishStemmer from oauth2client.client import GoogleCredentials import redis DISCOVERY_URL = 'https://{api}.googleapis.com/$discovery/rest?version={apiVersion}' # noqa BATCH_SIZE = 10 class VisionApi: """Construct and use the Google Vision API service.""" def __init__(self, api_discovery_file='vision_api.json'): self.credentials = GoogleCredentials.get_application_default() self.service = discovery.build( 'vision', 'v1', credentials=self.credentials, discoveryServiceUrl=DISCOVERY_URL) def detect_text(self, input_filenames, num_retries=3, max_results=6): """Uses the Vision API to detect text in the given file. """ images = {} for filename in input_filenames: with open(filename, 'rb') as image_file: images[filename] = image_file.read() batch_request = [] for filename in images: batch_request.append({ 'image': { 'content': base64.b64encode( images[filename]).decode('UTF-8') }, 'features': [{ 'type': 'TEXT_DETECTION', 'maxResults': max_results, }] }) request = self.service.images().annotate( body={'requests': batch_request}) try: responses = request.execute(num_retries=num_retries) if 'responses' not in responses: return {} text_response = {} for filename, response in zip(images, responses['responses']): if 'error' in response: print("API Error for %s: %s" % ( filename, response['error']['message'] if 'message' in response['error'] else '')) continue if 'textAnnotations' in response: text_response[filename] = response['textAnnotations'] else: text_response[filename] = [] return text_response except errors.HttpError as e: print("Http Error for %s: %s" % (filename, e)) except KeyError as e2: print("Key error: %s" % e2) # [END detect_text] # The inverted index is based in part on this example: # http://tech.swamps.io/simple-inverted-index-using-nltk/ class Index: """ Inverted index datastructure """ def __init__(self, tokenizer=nltk.word_tokenize, stemmer=EnglishStemmer(), stopwords=nltk.corpus.stopwords.words('english')): """Create an inverted index. Args: tokenizer -- NLTK compatible tokenizer function stemmer -- NLTK compatible stemmer stopwords -- list of ignored words This code assumes that a local redis server is running, and assumes that you're not already using 'db0' and 'db1' of that installation for some other purpose. Change these client calls if necessary for your redis config. """ # db 0 holds the token (words) inverted index. self.redis_token_client = redis.StrictRedis(db=0) # db 1 holds the filename--> text mapping. self.redis_docs_client = redis.StrictRedis(db=1) # Do an initial check on the redis connection. If redis is not up, # the constructor call will fail. self.redis_docs_client.ping() self.tokenizer = tokenizer self.stemmer = stemmer self.__unique_id = 0 self.stopwords = set(stopwords) if stopwords else set() def lookup(self, *words): """Look up words in the index; return the intersection of the hits.""" conjunct = set() for word in words: word = word.lower() if self.stemmer: word = self.stemmer.stem(word) docs_with_word = self.redis_token_client.smembers(word) hits = set([ (id, self.redis_docs_client.get(id)) for id in docs_with_word ]) conjunct = conjunct & hits if conjunct else hits return conjunct def print_lookup(self, *words): """Print lookup results to stdout.""" hits = self.lookup(*words) if not hits: print("No hits found.") return for i in hits: print("***Image %s has text:\n%s" % i) def document_is_processed(self, filename): """Check whether a document (image file) has already been processed. """ res = self.redis_docs_client.get(filename) if res: print("%s already added to index." % filename) return True if res == '': print('File %s was already checked, and contains no text.' % filename) return True return False def set_contains_no_text(self, filename): """Add bookkeeping to indicate that the given file had no discernible text.""" self.redis_docs_client.set(filename, '') def add(self, filename, document): """ Add a document string to the index. """ # You can uncomment the following line to see the words found in each # image. # print("Words found in %s: %s" % (filename, document)) for token in [t.lower() for t in nltk.word_tokenize(document)]: if token in self.stopwords: continue if token in ['.', ',', ':', '']: continue if self.stemmer: token = self.stemmer.stem(token) # Add the filename to the set associated with the token. self.redis_token_client.sadd(token, filename) # store the 'document text' for the filename. self.redis_docs_client.set(filename, document) def get_words(text): return re.compile('\w+').findall(text) # [START extract_descrs] def extract_description(texts): """Returns all the text in text annotations as a single string""" document = '' for text in texts: try: document += text['description'] except KeyError as e: print('KeyError: %s\n%s' % (e, text)) return document def extract_descriptions(input_filename, index, texts): """Gets and indexes the text that was detected in the image.""" if texts: document = extract_description(texts) index.add(input_filename, document) sys.stdout.write('.') # Output a progress indicator. sys.stdout.flush() else: if texts == []: print('%s had no discernible text.' % input_filename) index.set_contains_no_text(input_filename) # [END extract_descrs] # [START get_text] def get_text_from_files(vision, index, input_filenames): """Call the Vision API on a file and index the results.""" texts = vision.detect_text(input_filenames) for filename, text in texts.items(): extract_descriptions(filename, index, text) def batch(iterable, batch_size=BATCH_SIZE): """Group an iterable into batches of size batch_size. >>> tuple(batch([1, 2, 3, 4, 5], batch_size=2)) ((1, 2), (3, 4), (5)) """ b = [] for i in iterable: b.append(i) if len(b) == batch_size: yield tuple(b) b = [] if b: yield tuple(b) def main(input_dir): """Walk through all the not-yet-processed image files in the given directory, extracting any text from them and adding that text to an inverted index. """ # Create a client object for the Vision API vision = VisionApi() # Create an Index object to build query the inverted index. index = Index() allfileslist = [] # Recursively construct a list of all the files in the given input # directory. for folder, subs, files in os.walk(input_dir): for filename in files: allfileslist.append(os.path.join(folder, filename)) fileslist = [] for filename in allfileslist: # Look for text in any files that have not yet been processed. if index.document_is_processed(filename): continue fileslist.append(filename) for filenames in batch(fileslist): get_text_from_files(vision, index, filenames) # [END get_text] if __name__ == '__main__': parser = argparse.ArgumentParser( description='Detects text in the images in the given directory.') parser.add_argument( 'input_directory', help='the image directory you\'d like to detect text in.') args = parser.parse_args() main(args.input_directory)
ea08b2f0bf7160cfa596991985bc808163e07ad7
f6d5108d05e97f58ddb24c3669a7a52c413ffe03
/docstring_parser/tests/test_google.py
201f14159212b8a810b28fe182e21fd6850d8b7a
[ "MIT" ]
permissive
rr-/docstring_parser
cc5364c2fc892f3a4e13baa4befa0aa076ddca0c
703c6cafd96d4889b01a70f7481d9caad5b84dda
refs/heads/master
2023-07-07T02:45:13.684287
2022-09-18T09:04:53
2022-09-18T09:04:53
129,903,472
161
51
MIT
2023-06-30T23:52:00
2018-04-17T12:47:17
Python
UTF-8
Python
false
false
26,577
py
test_google.py
"""Tests for Google-style docstring routines.""" import typing as T import pytest from docstring_parser.common import ParseError, RenderingStyle from docstring_parser.google import ( GoogleParser, Section, SectionType, compose, parse, ) def test_google_parser_unknown_section() -> None: """Test parsing an unknown section with default GoogleParser configuration. """ parser = GoogleParser() docstring = parser.parse( """ Unknown: spam: a """ ) assert docstring.short_description == "Unknown:" assert docstring.long_description == "spam: a" assert len(docstring.meta) == 0 def test_google_parser_custom_sections() -> None: """Test parsing an unknown section with custom GoogleParser configuration. """ parser = GoogleParser( [ Section("DESCRIPTION", "desc", SectionType.SINGULAR), Section("ARGUMENTS", "param", SectionType.MULTIPLE), Section("ATTRIBUTES", "attribute", SectionType.MULTIPLE), Section("EXAMPLES", "examples", SectionType.SINGULAR), ], title_colon=False, ) docstring = parser.parse( """ DESCRIPTION This is the description. ARGUMENTS arg1: first arg arg2: second arg ATTRIBUTES attr1: first attribute attr2: second attribute EXAMPLES Many examples More examples """ ) assert docstring.short_description is None assert docstring.long_description is None assert len(docstring.meta) == 6 assert docstring.meta[0].args == ["desc"] assert docstring.meta[0].description == "This is the description." assert docstring.meta[1].args == ["param", "arg1"] assert docstring.meta[1].description == "first arg" assert docstring.meta[2].args == ["param", "arg2"] assert docstring.meta[2].description == "second arg" assert docstring.meta[3].args == ["attribute", "attr1"] assert docstring.meta[3].description == "first attribute" assert docstring.meta[4].args == ["attribute", "attr2"] assert docstring.meta[4].description == "second attribute" assert docstring.meta[5].args == ["examples"] assert docstring.meta[5].description == "Many examples\nMore examples" def test_google_parser_custom_sections_after() -> None: """Test parsing an unknown section with custom GoogleParser configuration that was set at a runtime. """ parser = GoogleParser(title_colon=False) parser.add_section(Section("Note", "note", SectionType.SINGULAR)) docstring = parser.parse( """ short description Note: a note """ ) assert docstring.short_description == "short description" assert docstring.long_description == "Note:\n a note" docstring = parser.parse( """ short description Note a note """ ) assert docstring.short_description == "short description" assert docstring.long_description == "Note a note" docstring = parser.parse( """ short description Note a note """ ) assert len(docstring.meta) == 1 assert docstring.meta[0].args == ["note"] assert docstring.meta[0].description == "a note" @pytest.mark.parametrize( "source, expected", [ ("", None), ("\n", None), ("Short description", "Short description"), ("\nShort description\n", "Short description"), ("\n Short description\n", "Short description"), ], ) def test_short_description(source: str, expected: str) -> None: """Test parsing short description.""" docstring = parse(source) assert docstring.short_description == expected assert docstring.long_description is None assert not docstring.meta @pytest.mark.parametrize( "source, expected_short_desc, expected_long_desc, expected_blank", [ ( "Short description\n\nLong description", "Short description", "Long description", True, ), ( """ Short description Long description """, "Short description", "Long description", True, ), ( """ Short description Long description Second line """, "Short description", "Long description\nSecond line", True, ), ( "Short description\nLong description", "Short description", "Long description", False, ), ( """ Short description Long description """, "Short description", "Long description", False, ), ( "\nShort description\nLong description\n", "Short description", "Long description", False, ), ( """ Short description Long description Second line """, "Short description", "Long description\nSecond line", False, ), ], ) def test_long_description( source: str, expected_short_desc: str, expected_long_desc: str, expected_blank: bool, ) -> None: """Test parsing long description.""" docstring = parse(source) assert docstring.short_description == expected_short_desc assert docstring.long_description == expected_long_desc assert docstring.blank_after_short_description == expected_blank assert not docstring.meta @pytest.mark.parametrize( "source, expected_short_desc, expected_long_desc, " "expected_blank_short_desc, expected_blank_long_desc", [ ( """ Short description Args: asd: """, "Short description", None, False, False, ), ( """ Short description Long description Args: asd: """, "Short description", "Long description", False, False, ), ( """ Short description First line Second line Args: asd: """, "Short description", "First line\n Second line", False, False, ), ( """ Short description First line Second line Args: asd: """, "Short description", "First line\n Second line", True, False, ), ( """ Short description First line Second line Args: asd: """, "Short description", "First line\n Second line", True, True, ), ( """ Args: asd: """, None, None, False, False, ), ], ) def test_meta_newlines( source: str, expected_short_desc: T.Optional[str], expected_long_desc: T.Optional[str], expected_blank_short_desc: bool, expected_blank_long_desc: bool, ) -> None: """Test parsing newlines around description sections.""" docstring = parse(source) assert docstring.short_description == expected_short_desc assert docstring.long_description == expected_long_desc assert docstring.blank_after_short_description == expected_blank_short_desc assert docstring.blank_after_long_description == expected_blank_long_desc assert len(docstring.meta) == 1 def test_meta_with_multiline_description() -> None: """Test parsing multiline meta documentation.""" docstring = parse( """ Short description Args: spam: asd 1 2 3 """ ) assert docstring.short_description == "Short description" assert len(docstring.meta) == 1 assert docstring.meta[0].args == ["param", "spam"] assert docstring.meta[0].arg_name == "spam" assert docstring.meta[0].description == "asd\n1\n 2\n3" def test_default_args() -> None: """Test parsing default arguments.""" docstring = parse( """A sample function A function the demonstrates docstrings Args: arg1 (int): The firsty arg arg2 (str): The second arg arg3 (float, optional): The third arg. Defaults to 1.0. arg4 (Optional[Dict[str, Any]], optional): The last arg. Defaults to None. arg5 (str, optional): The fifth arg. Defaults to DEFAULT_ARG5. Returns: Mapping[str, Any]: The args packed in a mapping """ ) assert docstring is not None assert len(docstring.params) == 5 arg4 = docstring.params[3] assert arg4.arg_name == "arg4" assert arg4.is_optional assert arg4.type_name == "Optional[Dict[str, Any]]" assert arg4.default == "None" assert arg4.description == "The last arg. Defaults to None." def test_multiple_meta() -> None: """Test parsing multiple meta.""" docstring = parse( """ Short description Args: spam: asd 1 2 3 Raises: bla: herp yay: derp """ ) assert docstring.short_description == "Short description" assert len(docstring.meta) == 3 assert docstring.meta[0].args == ["param", "spam"] assert docstring.meta[0].arg_name == "spam" assert docstring.meta[0].description == "asd\n1\n 2\n3" assert docstring.meta[1].args == ["raises", "bla"] assert docstring.meta[1].type_name == "bla" assert docstring.meta[1].description == "herp" assert docstring.meta[2].args == ["raises", "yay"] assert docstring.meta[2].type_name == "yay" assert docstring.meta[2].description == "derp" def test_params() -> None: """Test parsing params.""" docstring = parse("Short description") assert len(docstring.params) == 0 docstring = parse( """ Short description Args: name: description 1 priority (int): description 2 sender (str?): description 3 ratio (Optional[float], optional): description 4 """ ) assert len(docstring.params) == 4 assert docstring.params[0].arg_name == "name" assert docstring.params[0].type_name is None assert docstring.params[0].description == "description 1" assert not docstring.params[0].is_optional assert docstring.params[1].arg_name == "priority" assert docstring.params[1].type_name == "int" assert docstring.params[1].description == "description 2" assert not docstring.params[1].is_optional assert docstring.params[2].arg_name == "sender" assert docstring.params[2].type_name == "str" assert docstring.params[2].description == "description 3" assert docstring.params[2].is_optional assert docstring.params[3].arg_name == "ratio" assert docstring.params[3].type_name == "Optional[float]" assert docstring.params[3].description == "description 4" assert docstring.params[3].is_optional docstring = parse( """ Short description Args: name: description 1 with multi-line text priority (int): description 2 """ ) assert len(docstring.params) == 2 assert docstring.params[0].arg_name == "name" assert docstring.params[0].type_name is None assert docstring.params[0].description == ( "description 1\nwith multi-line text" ) assert docstring.params[1].arg_name == "priority" assert docstring.params[1].type_name == "int" assert docstring.params[1].description == "description 2" def test_attributes() -> None: """Test parsing attributes.""" docstring = parse("Short description") assert len(docstring.params) == 0 docstring = parse( """ Short description Attributes: name: description 1 priority (int): description 2 sender (str?): description 3 ratio (Optional[float], optional): description 4 """ ) assert len(docstring.params) == 4 assert docstring.params[0].arg_name == "name" assert docstring.params[0].type_name is None assert docstring.params[0].description == "description 1" assert not docstring.params[0].is_optional assert docstring.params[1].arg_name == "priority" assert docstring.params[1].type_name == "int" assert docstring.params[1].description == "description 2" assert not docstring.params[1].is_optional assert docstring.params[2].arg_name == "sender" assert docstring.params[2].type_name == "str" assert docstring.params[2].description == "description 3" assert docstring.params[2].is_optional assert docstring.params[3].arg_name == "ratio" assert docstring.params[3].type_name == "Optional[float]" assert docstring.params[3].description == "description 4" assert docstring.params[3].is_optional docstring = parse( """ Short description Attributes: name: description 1 with multi-line text priority (int): description 2 """ ) assert len(docstring.params) == 2 assert docstring.params[0].arg_name == "name" assert docstring.params[0].type_name is None assert docstring.params[0].description == ( "description 1\nwith multi-line text" ) assert docstring.params[1].arg_name == "priority" assert docstring.params[1].type_name == "int" assert docstring.params[1].description == "description 2" def test_returns() -> None: """Test parsing returns.""" docstring = parse( """ Short description """ ) assert docstring.returns is None assert docstring.many_returns is not None assert len(docstring.many_returns) == 0 docstring = parse( """ Short description Returns: description """ ) assert docstring.returns is not None assert docstring.returns.type_name is None assert docstring.returns.description == "description" assert docstring.many_returns is not None assert len(docstring.many_returns) == 1 assert docstring.many_returns[0] == docstring.returns docstring = parse( """ Short description Returns: description with: a colon! """ ) assert docstring.returns is not None assert docstring.returns.type_name is None assert docstring.returns.description == "description with: a colon!" assert docstring.many_returns is not None assert len(docstring.many_returns) == 1 assert docstring.many_returns[0] == docstring.returns docstring = parse( """ Short description Returns: int: description """ ) assert docstring.returns is not None assert docstring.returns.type_name == "int" assert docstring.returns.description == "description" assert docstring.many_returns is not None assert len(docstring.many_returns) == 1 assert docstring.many_returns[0] == docstring.returns docstring = parse( """ Returns: Optional[Mapping[str, List[int]]]: A description: with a colon """ ) assert docstring.returns is not None assert docstring.returns.type_name == "Optional[Mapping[str, List[int]]]" assert docstring.returns.description == "A description: with a colon" assert docstring.many_returns is not None assert len(docstring.many_returns) == 1 assert docstring.many_returns[0] == docstring.returns docstring = parse( """ Short description Yields: int: description """ ) assert docstring.returns is not None assert docstring.returns.type_name == "int" assert docstring.returns.description == "description" assert docstring.many_returns is not None assert len(docstring.many_returns) == 1 assert docstring.many_returns[0] == docstring.returns docstring = parse( """ Short description Returns: int: description with much text even some spacing """ ) assert docstring.returns is not None assert docstring.returns.type_name == "int" assert docstring.returns.description == ( "description\nwith much text\n\neven some spacing" ) assert docstring.many_returns is not None assert len(docstring.many_returns) == 1 assert docstring.many_returns[0] == docstring.returns def test_raises() -> None: """Test parsing raises.""" docstring = parse( """ Short description """ ) assert len(docstring.raises) == 0 docstring = parse( """ Short description Raises: ValueError: description """ ) assert len(docstring.raises) == 1 assert docstring.raises[0].type_name == "ValueError" assert docstring.raises[0].description == "description" def test_examples() -> None: """Test parsing examples.""" docstring = parse( """ Short description Example: example: 1 Examples: long example more here """ ) assert len(docstring.examples) == 2 assert docstring.examples[0].description == "example: 1" assert docstring.examples[1].description == "long example\n\nmore here" def test_broken_meta() -> None: """Test parsing broken meta.""" with pytest.raises(ParseError): parse("Args:") with pytest.raises(ParseError): parse("Args:\n herp derp") def test_unknown_meta() -> None: """Test parsing unknown meta.""" docstring = parse( """Short desc Unknown 0: title0: content0 Args: arg0: desc0 arg1: desc1 Unknown1: title1: content1 Unknown2: title2: content2 """ ) assert docstring.params[0].arg_name == "arg0" assert docstring.params[0].description == "desc0" assert docstring.params[1].arg_name == "arg1" assert docstring.params[1].description == "desc1" def test_broken_arguments() -> None: """Test parsing broken arguments.""" with pytest.raises(ParseError): parse( """This is a test Args: param - poorly formatted """ ) def test_empty_example() -> None: """Test parsing empty examples section.""" docstring = parse( """Short description Example: Raises: IOError: some error """ ) assert len(docstring.examples) == 1 assert docstring.examples[0].args == ["examples"] assert docstring.examples[0].description == "" @pytest.mark.parametrize( "source, expected", [ ("", ""), ("\n", ""), ("Short description", "Short description"), ("\nShort description\n", "Short description"), ("\n Short description\n", "Short description"), ( "Short description\n\nLong description", "Short description\n\nLong description", ), ( """ Short description Long description """, "Short description\n\nLong description", ), ( """ Short description Long description Second line """, "Short description\n\nLong description\nSecond line", ), ( "Short description\nLong description", "Short description\nLong description", ), ( """ Short description Long description """, "Short description\nLong description", ), ( "\nShort description\nLong description\n", "Short description\nLong description", ), ( """ Short description Long description Second line """, "Short description\nLong description\nSecond line", ), ( """ Short description Meta: asd """, "Short description\nMeta:\n asd", ), ( """ Short description Long description Meta: asd """, "Short description\nLong description\nMeta:\n asd", ), ( """ Short description First line Second line Meta: asd """, "Short description\n" "First line\n" " Second line\n" "Meta:\n" " asd", ), ( """ Short description First line Second line Meta: asd """, "Short description\n" "\n" "First line\n" " Second line\n" "Meta:\n" " asd", ), ( """ Short description First line Second line Meta: asd """, "Short description\n" "\n" "First line\n" " Second line\n" "\n" "Meta:\n" " asd", ), ( """ Short description Meta: asd 1 2 3 """, "Short description\n" "\n" "Meta:\n" " asd\n" " 1\n" " 2\n" " 3", ), ( """ Short description Meta1: asd 1 2 3 Meta2: herp Meta3: derp """, "Short description\n" "\n" "Meta1:\n" " asd\n" " 1\n" " 2\n" " 3\n" "Meta2:\n" " herp\n" "Meta3:\n" " derp", ), ( """ Short description Args: name: description 1 priority (int): description 2 sender (str, optional): description 3 message (str, optional): description 4, defaults to 'hello' multiline (str?): long description 5, defaults to 'bye' """, "Short description\n" "\n" "Args:\n" " name: description 1\n" " priority (int): description 2\n" " sender (str?): description 3\n" " message (str?): description 4, defaults to 'hello'\n" " multiline (str?): long description 5,\n" " defaults to 'bye'", ), ( """ Short description Raises: ValueError: description """, "Short description\nRaises:\n ValueError: description", ), ], ) def test_compose(source: str, expected: str) -> None: """Test compose in default mode.""" assert compose(parse(source)) == expected @pytest.mark.parametrize( "source, expected", [ ( """ Short description Args: name: description 1 priority (int): description 2 sender (str, optional): description 3 message (str, optional): description 4, defaults to 'hello' multiline (str?): long description 5, defaults to 'bye' """, "Short description\n" "\n" "Args:\n" " name: description 1\n" " priority (int): description 2\n" " sender (str, optional): description 3\n" " message (str, optional): description 4, defaults to 'hello'\n" " multiline (str, optional): long description 5,\n" " defaults to 'bye'", ), ], ) def test_compose_clean(source: str, expected: str) -> None: """Test compose in clean mode.""" assert ( compose(parse(source), rendering_style=RenderingStyle.CLEAN) == expected ) @pytest.mark.parametrize( "source, expected", [ ( """ Short description Args: name: description 1 priority (int): description 2 sender (str, optional): description 3 message (str, optional): description 4, defaults to 'hello' multiline (str?): long description 5, defaults to 'bye' """, "Short description\n" "\n" "Args:\n" " name:\n" " description 1\n" " priority (int):\n" " description 2\n" " sender (str, optional):\n" " description 3\n" " message (str, optional):\n" " description 4, defaults to 'hello'\n" " multiline (str, optional):\n" " long description 5,\n" " defaults to 'bye'", ), ], ) def test_compose_expanded(source: str, expected: str) -> None: """Test compose in expanded mode.""" assert ( compose(parse(source), rendering_style=RenderingStyle.EXPANDED) == expected )
7875a644d798dd3ebb690af1fef72e1fc51b1bce
0d7bcd65699549f615a9a529d733f7f50001cd3b
/back-end/www/show_video_transforms.py
ad854671472a09e07867886040b28935256e6edf
[ "BSD-3-Clause", "CC0-1.0" ]
permissive
CMU-CREATE-Lab/deep-smoke-machine
99ad9936faf089117a927a59684a853463a2094e
2501963c5bdb56202ae1792051f94adfd1ecfc65
refs/heads/master
2023-05-02T18:15:02.406452
2023-04-21T15:30:02
2023-04-21T15:30:02
184,317,473
103
27
BSD-3-Clause
2020-12-08T09:32:16
2019-04-30T19:10:41
Python
UTF-8
Python
false
false
1,220
py
show_video_transforms.py
import os thread = "1" os.environ["MKL_NUM_THREADS"] = thread os.environ["NUMEXPR_NUM_THREADS"] = thread os.environ["OMP_NUM_THREADS"] = thread os.environ["VECLIB_MAXIMUM_THREADS"] = thread os.environ["OPENBLAS_NUM_THREADS"] = thread import cv2 cv2.setNumThreads(0) import sys import matplotlib matplotlib.use("TkAgg") # a fix for Mac OS X error from optical_flow.optical_flow import OpticalFlow import numpy as np from base_learner import BaseLearner class TestLearner(BaseLearner): def fit(self): pass def predict(self): pass def main(argv): if len(argv) < 2: print("Usage: python show_video_transforms.py [video_file_path]") return # Read frames op = OpticalFlow(rgb_vid_in_p=argv[1]) rgb_4d = op.vid_to_frames().astype(np.uint8) # ColorJitter need uint8 tl = TestLearner() T = tl.get_transform("rgb", phase="train") rgb_4d = T(rgb_4d).numpy().transpose(1, 2, 3, 0) print(np.amin(rgb_4d), np.amax(rgb_4d)) print(rgb_4d.shape) rgb_4d = cv2.normalize(rgb_4d, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) op.frames_to_vid(rgb_4d, "../data/transformed.mp4") if __name__ == "__main__": main(sys.argv)
900c9d3a3b91117c335e212c89ac27245ec3f6ec
187414dcb264fb49d82507a099fd5fdca6e55e38
/python/pyspark/pandas/tests/computation/test_any_all.py
64f293c48d64a588319b7dac66dd1aa3217599b3
[ "BSD-3-Clause", "CC0-1.0", "CDDL-1.1", "Apache-2.0", "LicenseRef-scancode-public-domain", "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "EPL-2.0", "CDDL-1.0", "MIT", "LGPL-2.0-or-later", "Python-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "EPL-1.0", "Classpath-exception-2.0", "GCC-exception-3.1", "CC-BY-SA-3.0", "LGPL-2.1-only", "LicenseRef-scancode-unicode", "CPL-1.0", "LicenseRef-scancode-other-permissive", "GPL-2.0-only", "CC-PDDC", "NAIST-2003", "LicenseRef-scancode-other-copyleft" ]
permissive
apache/spark
8aeba2d80465a262acc95781ede105a5b5886f6d
60d8fc49bec5dae1b8cf39a0670cb640b430f520
refs/heads/master
2023-09-04T04:33:36.058199
2023-09-04T03:48:52
2023-09-04T03:48:52
17,165,658
39,983
32,449
Apache-2.0
2023-09-14T19:46:24
2014-02-25T08:00:08
Scala
UTF-8
Python
false
false
6,364
py
test_any_all.py
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from distutils.version import LooseVersion import unittest import numpy as np import pandas as pd from pyspark import pandas as ps from pyspark.testing.pandasutils import ComparisonTestBase from pyspark.testing.sqlutils import SQLTestUtils class FrameAnyAllMixin: @property def pdf(self): return pd.DataFrame( {"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]}, index=np.random.rand(9), ) @property def df_pair(self): pdf = self.pdf psdf = ps.from_pandas(pdf) return pdf, psdf def test_all(self): pdf = pd.DataFrame( { "col1": [False, False, False], "col2": [True, False, False], "col3": [0, 0, 1], "col4": [0, 1, 2], "col5": [False, False, None], "col6": [True, False, None], }, index=np.random.rand(3), ) pdf.name = "x" psdf = ps.from_pandas(pdf) self.assert_eq(psdf.all(), pdf.all()) self.assert_eq(psdf.all(bool_only=True), pdf.all(bool_only=True)) self.assert_eq(psdf.all(bool_only=False), pdf.all(bool_only=False)) self.assert_eq(psdf[["col5"]].all(bool_only=True), pdf[["col5"]].all(bool_only=True)) self.assert_eq(psdf[["col5"]].all(bool_only=False), pdf[["col5"]].all(bool_only=False)) columns = pd.MultiIndex.from_tuples( [ ("a", "col1"), ("a", "col2"), ("a", "col3"), ("b", "col4"), ("b", "col5"), ("c", "col6"), ] ) pdf.columns = columns psdf.columns = columns self.assert_eq(psdf.all(), pdf.all()) self.assert_eq(psdf.all(bool_only=True), pdf.all(bool_only=True)) self.assert_eq(psdf.all(bool_only=False), pdf.all(bool_only=False)) columns.names = ["X", "Y"] pdf.columns = columns psdf.columns = columns self.assert_eq(psdf.all(), pdf.all()) self.assert_eq(psdf.all(bool_only=True), pdf.all(bool_only=True)) self.assert_eq(psdf.all(bool_only=False), pdf.all(bool_only=False)) with self.assertRaisesRegex( NotImplementedError, 'axis should be either 0 or "index" currently.' ): psdf.all(axis=1) # Test skipna pdf = pd.DataFrame({"A": [True, True], "B": [1, np.nan], "C": [True, None]}) pdf.name = "x" psdf = ps.from_pandas(pdf) self.assert_eq(psdf[["A", "B"]].all(skipna=False), pdf[["A", "B"]].all(skipna=False)) self.assert_eq(psdf[["A", "C"]].all(skipna=False), pdf[["A", "C"]].all(skipna=False)) self.assert_eq(psdf[["B", "C"]].all(skipna=False), pdf[["B", "C"]].all(skipna=False)) self.assert_eq(psdf.all(skipna=False), pdf.all(skipna=False)) self.assert_eq(psdf.all(skipna=True), pdf.all(skipna=True)) self.assert_eq(psdf.all(), pdf.all()) self.assert_eq( ps.DataFrame([np.nan]).all(skipna=False), pd.DataFrame([np.nan]).all(skipna=False), almost=True, ) self.assert_eq( ps.DataFrame([None]).all(skipna=True), pd.DataFrame([None]).all(skipna=True), almost=True, ) def test_any(self): pdf = pd.DataFrame( { "col1": [False, False, False], "col2": [True, False, False], "col3": [0, 0, 1], "col4": [0, 1, 2], "col5": [False, False, None], "col6": [True, False, None], }, index=np.random.rand(3), ) pdf.name = "x" psdf = ps.from_pandas(pdf) self.assert_eq(psdf.any(), pdf.any()) self.assert_eq(psdf.any(bool_only=True), pdf.any(bool_only=True)) self.assert_eq(psdf.any(bool_only=False), pdf.any(bool_only=False)) self.assert_eq(psdf[["col5"]].all(bool_only=True), pdf[["col5"]].all(bool_only=True)) self.assert_eq(psdf[["col5"]].all(bool_only=False), pdf[["col5"]].all(bool_only=False)) columns = pd.MultiIndex.from_tuples( [ ("a", "col1"), ("a", "col2"), ("a", "col3"), ("b", "col4"), ("b", "col5"), ("c", "col6"), ] ) pdf.columns = columns psdf.columns = columns self.assert_eq(psdf.any(), pdf.any()) self.assert_eq(psdf.any(bool_only=True), pdf.any(bool_only=True)) self.assert_eq(psdf.any(bool_only=False), pdf.any(bool_only=False)) columns.names = ["X", "Y"] pdf.columns = columns psdf.columns = columns self.assert_eq(psdf.any(), pdf.any()) self.assert_eq(psdf.any(bool_only=True), pdf.any(bool_only=True)) self.assert_eq(psdf.any(bool_only=False), pdf.any(bool_only=False)) with self.assertRaisesRegex( NotImplementedError, 'axis should be either 0 or "index" currently.' ): psdf.any(axis=1) class FrameAnyAllTests(FrameAnyAllMixin, ComparisonTestBase, SQLTestUtils): pass if __name__ == "__main__": from pyspark.pandas.tests.computation.test_any_all import * # noqa: F401 try: import xmlrunner testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2) except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=2)
6a7f62fb4c3e6bac12718db44577ce405a027520
cf759ef1264f39a97709f47d0ea6fe2c5e0338d1
/andriller/gui/wa_crypt.py
9ec696eebb53df3cd1c16a9dc04b5d41f77d69d1
[ "MIT" ]
permissive
den4uk/andriller
809595d0845726c090fddd5b34a4fa771290b4c7
3b8c9f7b8d9bb4f567ea05c03522341033ce2d3b
refs/heads/master
2022-07-15T01:15:42.042722
2022-06-27T22:00:44
2022-06-27T22:00:44
227,929,809
1,188
205
MIT
2022-06-27T22:00:45
2019-12-13T22:06:36
Python
UTF-8
Python
false
false
6,382
py
wa_crypt.py
import os.path import pathlib import tkinter as tk from tkinter import ttk, messagebox from .core import BaseWindow from .. import decrypts from .. import statics from ..utils import threaded, human_bytes # WhatsApp Crypt -------------------------------------------------------------- class WhatsAppCrypt(BaseWindow): KEY_SIZE = decrypts.WhatsAppCrypt.KEY_SIZE DECODED_DIR = decrypts.WhatsAppCrypt.DECODED_DIR DECODED_EXT = decrypts.WhatsAppCrypt.DECODED_EXT def __init__(self, root=None, title='WhatsApp Crypt Decryptor'): super().__init__(root=root, title=title) self.guide = statics.WHATSAPP_CRYPT self.work_dir = None self.crypts = {} self.key_file = None self.supported = self.get_supported() self._info = tk.StringVar() self._info_but = tk.StringVar() self._info_but.set('Show Info') ttk.Label(self.mainframe, text=title, font=self.FontTitle).grid(row=1, column=0, columnspan=2) tk.Button(self.mainframe, textvariable=self._info_but, relief='flat', command=self.info_toggle)\ .grid(row=1, column=2, columnspan=1, sticky=tk.E) ttk.Label(self.mainframe, textvar=self._info).grid(row=5, column=0, columnspan=3, sticky=self.WE) self.dir_label = tk.StringVar() self.dir_but = ttk.Button(self.mainframe, text='Select directory', command=self.set_dir) self.dir_but.grid(row=10, column=0, columnspan=1, sticky=tk.W) ttk.Label(self.mainframe, textvar=self.dir_label).grid(row=10, column=1, columnspan=2, sticky=tk.W) self.key_label = tk.StringVar() self.key_but = ttk.Button(self.mainframe, text="Select 'key' file", command=self.set_key) self.key_but.grid(row=11, column=0, columnspan=1, sticky=tk.W) ttk.Label(self.mainframe, textvar=self.key_label).grid(row=11, column=1, columnspan=2, sticky=tk.W) self.file_box = ttk.Treeview(self.mainframe, columns=['size', 'done'], selectmode=tk.EXTENDED) self.file_box.heading('#0', text='File Name') self.file_box.heading('size', text='Size') self.file_box.heading('done', text='Decrypted') self.file_box.column('size', width=30) self.file_box.column('done', width=20) self.file_box.tag_configure('success', background='light green') self.file_box.tag_configure('failure', background='#ff8080') self.file_box.grid(row=20, column=0, columnspan=3, sticky=self.WE) self.dec_all = ttk.Button(self.mainframe, text='Decrypt All', command=self.decrypt_all) self.dec_all.grid(row=30, column=0, sticky=tk.W) self.dec_sel = ttk.Button(self.mainframe, text='Decrypt Selected', command=self.decrypt_sel) self.dec_sel.grid(row=30, column=2, sticky=tk.E) def info_toggle(self): (self._info.set(''), self._info_but.set('Show Info')) if self._info.get() \ else (self._info.set(statics.WHATSAPP_CRYPT), self._info_but.set('Hide Info')) def controls_state(self, state): for c in [self.dir_but, self.key_but, self.dec_all, self.dec_sel]: c.configure(state=state) def set_dir(self): dialog = self.get_dir() if dialog: self.work_dir = dialog self.dir_label.set(self.work_dir) self.check_dir() self.try_key_file() def set_key(self, key=None): dialog = key or self.get_file('key', fsize=self.KEY_SIZE) if dialog: self.key_file = None self.key_label.set('') self.key_file = dialog self.key_label.set(self.key_file) def try_key_file(self): key = os.path.join(self.work_dir, 'key') if os.path.isfile(key) and os.path.getsize(key) == self.KEY_SIZE: self.logger.info('WhatsAppCrypt: key file was detected & automatically selected') self.set_key(key=key) def check_dir(self): self.crypts.clear() self.file_box.delete(*self.file_box.get_children()) path_ = pathlib.Path(self.work_dir) for f in path_.glob('*.crypt*'): done = f.parent.joinpath(self.DECODED_DIR, f'{f.name}{self.DECODED_EXT}').exists() size = human_bytes(os.path.getsize(f)) item = self.file_box.insert('', tk.END, text=f.name, values=[size, done]) self.crypts[item] = str(f) def tree_update(self, iid, values): self.file_box.item(iid, values=values) def decrypt_all(self): self.file_box.selection_add(self.file_box.get_children()) self.decrypt_sel() def decrypt_sel(self): sel = self.file_box.selection() if not sel: messagebox.showwarning('No selection made', 'Select at least one database to decrypt.') return self.run_decrypt(sel) @threaded def run_decrypt(self, sel): try: self.controls_state(tk.DISABLED) for i in sel: file_ = self.crypts[i] fname = os.path.basename(file_) file_ext = file_.split('.')[-1].lower() decrypter = self.supported.get(file_ext) if decrypter: try: wadec = decrypter( pathlib.Path(file_), pathlib.Path(self.key_file) ) if wadec.decrypt(): vals = self.file_box.item(i)['values'] vals[1] = True self.file_box.item(i, values=vals, tags='success') self.logger.info(f'WhatsAppCrypt: {fname} successfully decrypted.') except decrypts.WhatsAppCryptError as err: self.logger.error(f'WhatsAppCrypt: {err}') self.file_box.item(i, tags='failure') messagebox.showerror('WhatsApp decryption error', str(err)) except Exception as err: self.logger.exception(f'WhatsAppCrypt: {fname}: {err}') self.file_box.item(i, tags='failure') finally: self.file_box.selection_set() self.controls_state(tk.NORMAL) def get_supported(self): return {kls.CRYPT: kls for kls in decrypts.WhatsAppCrypt.__subclasses__()}
ecb7530fd1d8f41376c069edb87d6c6f22b4db2f
8f7320c10f2c5fc8475753dc5256d1a66067e15c
/pykeops/pykeops/examples/pytorch/plot_generic_syntax_pytorch_LSE.py
8b3a0dc0e68bad8a6eb9916d00a8d8c506ca094b
[ "MIT" ]
permissive
getkeops/keops
947a5409710379893c6c7a46d0a256133a6d8aff
52ed22a7fbbcf4bd02dbdf5dc2b00bf79cceddf5
refs/heads/main
2023-08-25T12:44:22.092925
2023-08-09T13:33:58
2023-08-09T13:33:58
182,054,091
910
69
MIT
2023-09-03T20:35:44
2019-04-18T09:04:07
Python
UTF-8
Python
false
false
6,211
py
plot_generic_syntax_pytorch_LSE.py
""" LogSumExp reduction ============================== """ #################################################################### # Let's compute the (3000,1) tensor :math:`c` whose entries # :math:`c_i` are given by: # # .. math:: # c_i = \log \left[ \sum_j \exp\left( (p-a_j)^2 \exp(x_i+y_j) \right) \right] # # where # # * :math:`x` is a (3000,1) tensor, with entries :math:`x_i`. # * :math:`y` is a (5000,1) tensor, with entries :math:`y_j`. # * :math:`a` is a (5000,1) tensor, with entries :math:`a_j`. # * :math:`p` is a scalar, encoded as a vector of size (1,). # #################################################################### # Setup # ----- # # Standard imports: import time import torch from matplotlib import pyplot as plt from torch.autograd import grad from pykeops.torch import Genred ##################################################################### # Declare random inputs: M = 3000 N = 5000 dtype = "float32" # Could be 'float32' or 'float64' torchtype = torch.float32 if dtype == "float32" else torch.float64 x = torch.rand(M, 1, dtype=torchtype) y = torch.rand(N, 1, dtype=torchtype, requires_grad=True) a = torch.rand(N, 1, dtype=torchtype) p = torch.rand(1, dtype=torchtype) #################################################################### # Define a custom formula # ----------------------- formula = "Square(p-a)*Exp(x+y)" variables = [ "x = Vi(1)", # First arg : i-variable, of size 1 (scalar) "y = Vj(1)", # Second arg : j-variable, of size 1 (scalar) "a = Vj(1)", # Third arg : j-variable, of size 1 (scalar) "p = Pm(1)", ] # Fourth arg : Parameter, of size 1 (scalar) start = time.time() #################################################################### # Our log-sum-exp reduction is performed over the index :math:`j`, # i.e. on the axis ``1`` of the kernel matrix. # The output c is an :math:`x`-variable indexed by :math:`i`. my_routine = Genred(formula, variables, reduction_op="LogSumExp", axis=1) c = my_routine(x, y, a, p, backend="CPU") # N.B.: By specifying backend='CPU', we can make sure that the result is computed using a simple C++ for loop. print( "Time to compute the convolution operation on the cpu: ", round(time.time() - start, 5), "s", end=" ", ) ####################################################################### # We compare with the unstable, naive computation "Log of Sum of Exp": my_routine2 = Genred( "Exp(" + formula + ")", variables, reduction_op="Sum", axis=1, dtype=dtype ) c2 = torch.log(my_routine2(x, y, a, p, backend="CPU")) print("(relative error: ", ((c2 - c).norm() / c.norm()).item(), ")") # Plot the results next to each other: plt.plot(c.detach().cpu().numpy()[:40], "-", label="KeOps - Stable") plt.plot(c2.detach().cpu().numpy()[:40], "--", label="KeOps - Unstable") plt.legend(loc="lower right") plt.tight_layout() plt.show() #################################################################### # Compute the gradient # -------------------- # Now, let's compute the gradient of :math:`c` with # respect to :math:`y`. Since :math:`c` is not scalar valued, # its "gradient" :math:`\partial c` should be understood as the adjoint of the # differential operator, i.e. as the linear operator that: # # - takes as input a new tensor :math:`e` with the shape of :math:`c` # - outputs a tensor :math:`g` with the shape of :math:`y` # # such that for all variation :math:`\delta y` of :math:`y` we have: # # .. math:: # # \langle \text{d} c . \delta y , e \rangle = \langle g , \delta y \rangle = \langle \delta y , \partial c . e \rangle # # Backpropagation is all about computing the tensor :math:`g=\partial c . e` efficiently, for arbitrary values of :math:`e`: # Declare a new tensor of shape (M,1) used as the input of the gradient operator. # It can be understood as a "gradient with respect to the output c" # and is thus called "grad_output" in the documentation of PyTorch. e = torch.rand_like(c) # Call the gradient op: start = time.time() g = grad(c, y, e)[0] # PyTorch remark : grad(c, y, e) alone outputs a length 1 tuple, hence the need for [0] at the end. print( "Time to compute gradient of convolution operation on the cpu: ", round(time.time() - start, 5), "s", end=" ", ) #################################################################### # We compare with gradient of Log of Sum of Exp: g2 = grad(c2, y, e)[0] print("(relative error: ", ((g2 - g).norm() / g.norm()).item(), ")") # Plot the results next to each other: plt.plot(g.detach().cpu().numpy()[:40], "-", label="KeOps - Stable") plt.plot(g2.detach().cpu().numpy()[:40], "--", label="KeOps - Unstable") plt.legend(loc="lower right") plt.tight_layout() plt.show() #################################################################### # Same operations performed on the Gpu # ------------------------------------ # # Of course, this will only work if you own a Gpu... if torch.cuda.is_available(): # first transfer data on gpu pc, ac, xc, yc, ec = p.cuda(), a.cuda(), x.cuda(), y.cuda(), e.cuda() # then call the operations start = time.time() c3 = my_routine(xc, yc, ac, pc, backend="GPU") print( "Time to compute convolution operation on the gpu:", round(time.time() - start, 5), "s ", end="", ) print("(relative error:", float(torch.abs((c2 - c3.cpu()) / c2).mean()), ")") start = time.time() g3 = grad(c3, yc, ec)[0] print( "Time to compute gradient of convolution operation on the gpu:", round(time.time() - start, 5), "s ", end="", ) print("(relative error:", float(torch.abs((g2 - g3.cpu()) / g2).mean()), ")") # Plot the results next to each other: plt.plot(c.detach().cpu().numpy()[:40], "-", label="KeOps - CPU") plt.plot(c3.detach().cpu().numpy()[:40], "--", label="KeOps - GPU") plt.legend(loc="lower right") plt.tight_layout() plt.show() # Plot the results next to each other: plt.plot(g.detach().cpu().numpy()[:40], "-", label="KeOps - CPU") plt.plot(g3.detach().cpu().numpy()[:40], "--", label="KeOps - GPU") plt.legend(loc="lower right") plt.tight_layout() plt.show()
66b4640024fb8f4ea96bed47af7550756bbb5147
2bb7bc07df02a17735c2cacc7b2ba0c6de77b63c
/tests/mesos_test.py
38f99769551d2bb7e5d260c09c7080b6fdb38904
[ "Apache-2.0" ]
permissive
Yelp/Tron
2c30a301055a732c3b33a39e05dbdcfc84ac8e02
958a2e22a6ac733cba043bc4238f3bf2b8048f4b
refs/heads/master
2023-08-29T11:35:11.716532
2023-08-21T19:27:45
2023-08-21T19:27:45
899,771
226
53
NOASSERTION
2023-08-21T19:26:45
2010-09-09T20:54:04
Python
UTF-8
Python
false
false
23,731
py
mesos_test.py
from collections import namedtuple from unittest import mock import staticconf.testing from testifycompat import assert_equal from testifycompat import setup_teardown from testifycompat import TestCase from tron.mesos import MesosCluster from tron.mesos import MesosClusterRepository from tron.mesos import MesosTask class TestMesosClusterRepository(TestCase): @setup_teardown def mock_cluster(self): # Ensure different mock is returned each time class is instantiated def init_cluster(*args, **kwargs): return mock.MagicMock(spec_set=MesosCluster) with mock.patch("tron.mesos.MesosCluster", side_effect=init_cluster, autospec=True,) as self.cluster_cls: yield def test_get_cluster_repeated_mesos_address(self): first = MesosClusterRepository.get_cluster("master-a.com") second = MesosClusterRepository.get_cluster("master-a.com") assert_equal(first, second) assert_equal(self.cluster_cls.call_count, 1) def test_shutdown(self): clusters = [MesosClusterRepository.get_cluster(address) for address in ["a", "b", "c"]] assert_equal(self.cluster_cls.call_count, 3) MesosClusterRepository.shutdown() for cluster in clusters: assert_equal(cluster.stop.call_count, 1) def test_configure(self): clusters = [MesosClusterRepository.get_cluster(address) for address in ["d", "e"]] mock_volume = mock.Mock() options = mock.Mock( master_port=5000, secret="/dev/null", principal="fake-principal", role="tron", enabled=False, default_volumes=[mock_volume], dockercfg_location="auth", offer_timeout=1000, ) with mock.patch( "tron.mesos.get_secret_from_file", autospec=True, return_value="test-secret", ): MesosClusterRepository.configure(options) expected_volume = mock_volume._asdict.return_value for cluster in clusters: cluster.set_enabled.assert_called_once_with(False) cluster.configure_tasks.assert_called_once_with( default_volumes=[expected_volume], dockercfg_location="auth", offer_timeout=1000, ) # Next cluster we get should be initialized with the same settings MesosClusterRepository.get_cluster("f") self.cluster_cls.assert_called_with( mesos_address="f", mesos_master_port=5000, secret="test-secret", principal="fake-principal", mesos_role="tron", framework_id=None, enabled=False, default_volumes=[expected_volume], dockercfg_location="auth", offer_timeout=1000, ) def mock_task_event( task_id, platform_type, raw=None, terminal=False, success=False, **kwargs, ): return mock.MagicMock( kind="task", task_id=task_id, platform_type=platform_type, raw=raw or {}, terminal=terminal, success=success, **kwargs, ) class TestMesosTask(TestCase): @setup_teardown def setup(self): TaskConfig = namedtuple("TaskConfig", "cmd task_id cpus mem disk env") self.action_run_id = "my_service.job.1.action" self.task_id = "123abcuuid" with mock.patch( "tron.mesos.logging.getLogger", return_value=mock.Mock(handlers=[mock.Mock()]), autospec=None, ): self.task = MesosTask( id=self.action_run_id, task_config=TaskConfig( cmd="echo hello world", task_id=self.task_id, cpus=0.1, mem=100, disk=100, env={ "INITIAL_VAR": "baz", "AWS_SECRET_ACCESS_KEY": "THISISASECRET", "SOME_VAR": "bar", "AWS_ACCESS_KEY_ID": "THISISASECRETTOO", "SOME_OTHER_VAR": "foo", }, ), ) yield def test_aws_credentials_redacted(self): assert all(["THISISASECRET" not in text[0][0] for text in self.task.log.info.call_args_list]) assert all(["foo" in text[0][0] for text in self.task.log.info.call_args_list]) assert all(["bar" in text[0][0] for text in self.task.log.info.call_args_list]) assert all(["baz" in text[0][0] for text in self.task.log.info.call_args_list]) def test_handle_staging(self): event = mock_task_event(task_id=self.task_id, platform_type="staging",) self.task.handle_event(event) assert self.task.state == MesosTask.PENDING def test_handle_starting(self): event = mock_task_event(task_id=self.task_id, platform_type="starting",) self.task.handle_event(event) assert self.task.state == MesosTask.RUNNING def test_handle_running(self): event = mock_task_event(task_id=self.task_id, platform_type="running",) self.task.handle_event(event) assert self.task.state == MesosTask.RUNNING def test_handle_running_for_other_task(self): event = mock_task_event(task_id="other321", platform_type="running",) self.task.handle_event(event) assert self.task.state == MesosTask.PENDING def test_handle_finished(self): self.task.started() event = mock_task_event(task_id=self.task_id, platform_type="finished", terminal=True, success=True,) self.task.handle_event(event) assert self.task.is_complete def test_handle_failed(self): self.task.started() event = mock_task_event(task_id=self.task_id, platform_type="failed", terminal=True, success=False,) self.task.handle_event(event) assert self.task.is_failed assert self.task.is_done def test_handle_killed(self): self.task.started() event = mock_task_event(task_id=self.task_id, platform_type="killed", terminal=True, success=False,) self.task.handle_event(event) assert self.task.is_failed assert self.task.is_done def test_handle_lost(self): self.task.started() event = mock_task_event(task_id=self.task_id, platform_type="lost", terminal=True, success=False,) self.task.handle_event(event) assert self.task.is_unknown assert self.task.is_done def test_handle_error(self): self.task.started() event = mock_task_event(task_id=self.task_id, platform_type="error", terminal=True, success=False,) self.task.handle_event(event) assert self.task.is_failed assert self.task.is_done def test_handle_terminal_event_offer_timeout(self): self.task.started() event = mock_task_event( task_id=self.task_id, platform_type=None, terminal=True, success=False, raw="failed due to offer timeout", message="stop", ) self.task.handle_event(event) assert self.task.is_failed assert self.task.is_done def test_handle_success_sequence(self): self.task.handle_event(mock_task_event(task_id=self.task_id, platform_type="staging",),) self.task.handle_event(mock_task_event(task_id=self.task_id, platform_type="starting",),) self.task.handle_event(mock_task_event(task_id=self.task_id, platform_type="running",),) self.task.handle_event( mock_task_event(task_id=self.task_id, platform_type="finished", terminal=True, success=True,), ) assert self.task.is_complete def test_log_event_error(self): with mock.patch.object(self.task, "log_event_info",) as mock_log_event, mock.patch.object( self.task.log, "warning", ) as mock_log: mock_log_event.side_effect = Exception self.task.handle_event(mock_task_event(task_id=self.task_id, platform_type="running",),) assert mock_log_event.called assert mock_log.called assert self.task.state == MesosTask.RUNNING def test_get_event_logger_add_unique_handlers(self): """ Ensures that only a single handler (for stderr) is added to the MesosTask event logger, to prevent duplicate log output. """ # Call 2 times to make sure 2nd call doesn't add another handler logger = self.task.get_event_logger() logger = self.task.get_event_logger() assert len(logger.handlers) == 1 class TestMesosCluster(TestCase): @setup_teardown def setup_mocks(self): with mock.patch("tron.mesos.PyDeferredQueue", autospec=True,) as queue_cls, mock.patch( "tron.mesos.TaskProcessor", autospec=True, ) as processor_cls, mock.patch("tron.mesos.Subscription", autospec=True,) as runner_cls, mock.patch( "tron.mesos.get_mesos_leader", autospec=True, ) as mock_get_leader: self.mock_queue = queue_cls.return_value self.mock_processor = processor_cls.return_value self.mock_runner_cls = runner_cls self.mock_runner_cls.return_value.configure_mock( stopping=False, TASK_CONFIG_INTERFACE=mock.Mock(), ) self.mock_get_leader = mock_get_leader yield @mock.patch("tron.mesos.socket", autospec=True) def test_init(self, mock_socket): mock_socket.gethostname.return_value = "hostname" cluster = MesosCluster( mesos_address="mesos-cluster-a.me", mesos_master_port=5000, secret="my_secret", mesos_role="tron", framework_id="fake_framework_id", principal="fake-principal", ) assert_equal(cluster.queue, self.mock_queue) assert_equal(cluster.processor, self.mock_processor) self.mock_get_leader.assert_called_once_with( "mesos-cluster-a.me", 5000, ) self.mock_processor.executor_from_config.assert_has_calls( [ mock.call( provider="mesos_task", provider_config={ "secret": "my_secret", "principal": "fake-principal", "mesos_address": self.mock_get_leader.return_value, "role": "tron", "framework_name": "tron-hostname", "framework_id": "fake_framework_id", "failover": True, }, ), mock.call(provider="logging", provider_config=mock.ANY,), ] ) self.mock_runner_cls.assert_called_once_with( self.mock_processor.executor_from_config.return_value, self.mock_queue, ) assert_equal(cluster.runner, self.mock_runner_cls.return_value) get_event_deferred = cluster.deferred assert_equal(get_event_deferred, self.mock_queue.get.return_value) get_event_deferred.addCallback.assert_has_calls( [mock.call(cluster._process_event), mock.call(cluster.handle_next_event),] ) def test_init_disabled(self): cluster = MesosCluster("mesos-cluster-a.me", enabled=False) assert_equal(cluster.queue, self.mock_queue) assert_equal(cluster.processor, self.mock_processor) assert_equal(self.mock_processor.executor_from_config.call_count, 0) assert cluster.runner is None def test_set_enabled_off(self): cluster = MesosCluster("mesos-cluster-a.me", enabled=True) mock_task = mock.Mock() cluster.tasks = {"task": mock_task} cluster.set_enabled(False) assert not cluster.enabled assert cluster.runner.stop.call_count == 1 assert cluster.tasks == {} assert mock_task.exited.call_count == 1 def test_set_enabled_on(self): cluster = MesosCluster("mesos-cluster-a.me", enabled=False) cluster.set_enabled(True) assert_equal(cluster.enabled, True) # Basically the same as regular initialization assert_equal(self.mock_processor.executor_from_config.call_count, 2) self.mock_runner_cls.assert_called_once_with( self.mock_processor.executor_from_config.return_value, self.mock_queue, ) assert_equal(cluster.runner, self.mock_runner_cls.return_value) get_event_deferred = cluster.deferred assert_equal(get_event_deferred, self.mock_queue.get.return_value) get_event_deferred.addCallback.assert_has_calls( [mock.call(cluster._process_event), mock.call(cluster.handle_next_event),] ) def test_set_enabled_on_already(self): cluster = MesosCluster("mesos-cluster-a.me", enabled=True) cluster.set_enabled(True) assert_equal(cluster.enabled, True) # Runner should have only be created once assert_equal(self.mock_runner_cls.call_count, 1) def test_configure_tasks(self): cluster = MesosCluster("mesos-cluster-a.me", default_volumes=[], dockercfg_location="first", offer_timeout=60,) assert_equal(cluster.default_volumes, []) assert_equal(cluster.dockercfg_location, "first") assert_equal(cluster.offer_timeout, 60) expected_volumes = [{"container_path": "/tmp", "host_path": "/host", "mode": "RO",}] cluster.configure_tasks( default_volumes=expected_volumes, dockercfg_location="second", offer_timeout=300, ) assert_equal(cluster.default_volumes, expected_volumes) assert_equal(cluster.dockercfg_location, "second") assert_equal(cluster.offer_timeout, 300) def test_submit(self): mock_clusterman_metrics = mock.MagicMock() cluster = MesosCluster("mesos-cluster-a.me") mock_task = mock.MagicMock(get_config=mock.Mock(return_value={"environment": {}})) mock_task.get_mesos_id.return_value = "this_task" with mock.patch( "tron.mesos.get_clusterman_metrics", return_value=(mock_clusterman_metrics), autospec=True, ): cluster.submit(mock_task) assert "this_task" in cluster.tasks assert cluster.tasks["this_task"] == mock_task cluster.runner.run.assert_called_once_with(mock_task.get_config.return_value,) assert mock_clusterman_metrics.ClustermanMetricsBotoClient.call_count == 0 def test_submit_with_clusterman(self): mock_clusterman_metrics = mock.MagicMock() cluster = MesosCluster("mesos-cluster-a.me") mock_task = mock.MagicMock( get_config=mock.Mock( return_value={ "environment": { "CLUSTERMAN_RESOURCES": '{"required_cpus|blah=x": 4}', "EXECUTOR_CLUSTER": "fake-cluster", "EXECUTOR_POOL": "fake-pool", }, }, ), ) mock_task.get_mesos_id.return_value = "this_task" with mock.patch( "tron.mesos.get_clusterman_metrics", return_value=mock_clusterman_metrics, autospec=True, ), staticconf.testing.MockConfiguration( {"clusters": {"fake-cluster": {"aws_region": "fake-region"}}}, namespace="clusterman", ): cluster.submit(mock_task) assert "this_task" in cluster.tasks assert cluster.tasks["this_task"] == mock_task cluster.runner.run.assert_called_once_with(mock_task.get_config.return_value,) assert mock_clusterman_metrics.ClustermanMetricsBotoClient.call_count == 1 def test_submit_disabled(self): cluster = MesosCluster("mesos-cluster-a.me", enabled=False) mock_task = mock.MagicMock() mock_task.get_mesos_id.return_value = "this_task" with mock.patch( "tron.mesos.get_clusterman_metrics", return_value=(None, None), autospec=True, ): cluster.submit(mock_task) assert "this_task" not in cluster.tasks mock_task.exited.assert_called_once_with(1) def test_recover(self): cluster = MesosCluster("mesos-cluster-a.me") mock_task = mock.MagicMock() mock_task.get_mesos_id.return_value = "this_task" cluster.recover(mock_task) assert "this_task" in cluster.tasks assert cluster.tasks["this_task"] == mock_task cluster.runner.reconcile.assert_called_once_with(mock_task.get_config.return_value,) assert mock_task.started.call_count == 1 def test_recover_disabled(self): cluster = MesosCluster("mesos-cluster-a.me", enabled=False) mock_task = mock.MagicMock() mock_task.get_mesos_id.return_value = "this_task" cluster.recover(mock_task) assert "this_task" not in cluster.tasks mock_task.exited.assert_called_once_with(None) @mock.patch("tron.mesos.MesosTask", autospec=True) def test_create_task_defaults(self, mock_task): cluster = MesosCluster("mesos-cluster-a.me") mock_serializer = mock.MagicMock() task = cluster.create_task( action_run_id="action_c", command="echo hi", cpus=1, mem=10, disk=20, constraints=[], docker_image="container:latest", docker_parameters=[], env={"TESTING": "true"}, extra_volumes=[], serializer=mock_serializer, ) cluster.runner.TASK_CONFIG_INTERFACE.assert_called_once_with( name="action_c", cmd="echo hi", cpus=1, mem=10, disk=20, constraints=[], image="container:latest", docker_parameters=[], environment={"TESTING": "true"}, volumes=[], uris=[], offer_timeout=None, ) assert_equal(task, mock_task.return_value) mock_task.assert_called_once_with( "action_c", cluster.runner.TASK_CONFIG_INTERFACE.return_value, mock_serializer, ) @mock.patch("tron.mesos.MesosTask", autospec=True) def test_create_task_with_task_id(self, mock_task): cluster = MesosCluster("mesos-cluster-a.me") mock_serializer = mock.MagicMock() task_id = "task.0123-fabc" task = cluster.create_task( action_run_id="action_c", command="echo hi", cpus=1, mem=10, disk=20, constraints=[], docker_image="container:latest", docker_parameters=[], env={"TESTING": "true"}, extra_volumes=[], serializer=mock_serializer, task_id=task_id, ) assert cluster.runner.TASK_CONFIG_INTERFACE.call_count == 1 assert task == mock_task.return_value task_config = cluster.runner.TASK_CONFIG_INTERFACE.return_value task_config.set_task_id.assert_called_once_with(task_id) mock_task.assert_called_once_with( "action_c", task_config.set_task_id.return_value, mock_serializer, ) @mock.patch("tron.mesos.MesosTask", autospec=True) def test_create_task_disabled(self, mock_task): # If Mesos is disabled, should return None cluster = MesosCluster("mesos-cluster-a.me", enabled=False) mock_serializer = mock.MagicMock() task = cluster.create_task( action_run_id="action_c", command="echo hi", cpus=1, mem=10, disk=20, constraints=[], docker_image="container:latest", docker_parameters=[], env={"TESTING": "true"}, extra_volumes=[], serializer=mock_serializer, ) assert task is None @mock.patch("tron.mesos.MesosTask", autospec=True) def test_create_task_with_configuration(self, mock_task): cluster = MesosCluster( "mesos-cluster-a.me", default_volumes=[ {"container_path": "/tmp", "host_path": "/host", "mode": "RO",}, {"container_path": "/other", "host_path": "/other", "mode": "RW",}, ], dockercfg_location="some_place", offer_timeout=202, ) mock_serializer = mock.MagicMock() task = cluster.create_task( action_run_id="action_c", command="echo hi", cpus=1, mem=10, disk=20, constraints=[], docker_image="container:latest", docker_parameters=[], env={"TESTING": "true"}, # This should override the default volume for /tmp extra_volumes=[{"container_path": "/tmp", "host_path": "/custom", "mode": "RW",},], serializer=mock_serializer, ) cluster.runner.TASK_CONFIG_INTERFACE.assert_called_once_with( name="action_c", cmd="echo hi", cpus=1, mem=10, disk=20, constraints=[], image="container:latest", docker_parameters=[], environment={"TESTING": "true"}, volumes=[ {"container_path": "/tmp", "host_path": "/custom", "mode": "RW",}, {"container_path": "/other", "host_path": "/other", "mode": "RW",}, ], uris=["some_place"], offer_timeout=202, ) assert_equal(task, mock_task.return_value) mock_task.assert_called_once_with( "action_c", cluster.runner.TASK_CONFIG_INTERFACE.return_value, mock_serializer, ) def test_process_event_task(self): event = mock_task_event("this_task", "some_platform_type") cluster = MesosCluster("mesos-cluster-a.me") mock_task = mock.MagicMock(spec_set=MesosTask) mock_task.get_mesos_id.return_value = "this_task" cluster.tasks["this_task"] = mock_task cluster._process_event(event) mock_task.handle_event.assert_called_once_with(event) def test_process_event_task_id_invalid(self): event = mock_task_event("other_task", "some_platform_type") cluster = MesosCluster("mesos-cluster-a.me") mock_task = mock.MagicMock(spec_set=MesosTask) mock_task.get_mesos_id.return_value = "this_task" cluster.tasks["this_task"] = mock_task cluster._process_event(event) assert_equal(mock_task.handle_event.call_count, 0) def test_process_event_control_stop(self): event = mock.MagicMock(kind="control", message="stop",) cluster = MesosCluster("mesos-cluster-a.me") cluster._process_event(event) assert cluster.runner.stop.call_count == 1 assert cluster.deferred is None def test_stop_default(self): # When stopping, tasks should not exit. They will be recovered cluster = MesosCluster("mesos-cluster-a.me") mock_task = mock.MagicMock() cluster.tasks = {"task_id": mock_task} cluster.stop() assert cluster.runner.stop.call_count == 1 assert cluster.deferred is None assert mock_task.exited.call_count == 0 assert len(cluster.tasks) == 1 def test_stop_disabled(self): # Shouldn't raise an error cluster = MesosCluster("mesos-cluster-a.me", enabled=False) cluster.stop() def test_kill(self): cluster = MesosCluster("mesos-cluster-a.me") cluster.kill("fake_task_id") cluster.runner.kill.assert_called_once_with("fake_task_id")
3e89fd6081b88f9ed651c3874e663036b2fbe7a5
3779caa500c53f0ee12baa039cda4cde1e7391b9
/Generate Parentheses.py
25ecc56e0c1cf005a52936aff266319d5497ccbb
[]
no_license
kongzhidea/leetcode
924d66bcbc8b7b0f793399184870d4f8da309dba
57fc31718ee9cd6a4282c752382e538e42ff02ce
refs/heads/master
2021-10-29T07:07:41.603363
2021-10-27T13:49:29
2021-10-27T13:49:29
63,241,496
125
2
null
null
null
null
UTF-8
Python
false
false
455
py
Generate Parentheses.py
class Solution: def __init__(self): self.ret = [] # @param an integer # @return a list of string def generateParenthesis(self, n): self.dfs("",0,0,n) return self.ret def dfs(self,tmp,lc,rc,n): if lc + rc == n * 2: self.ret.append(tmp) return if lc< n: self.dfs(tmp + "(",lc + 1,rc,n) if rc < n and lc > rc: self.dfs(tmp + ")" ,lc,rc + 1,n)
f384559345601f6d01c33bc8445b984e1ec3ff7b
db6f3e6486ad8367c62163a4f124da185a64ab5d
/scripts/type_extractor/type_extractor/lti_types.py
7c0af5b8c6dcb2b5a4261523558e71c2035be2c5
[ "MIT", "Zlib", "JSON", "LicenseRef-scancode-unknown-license-reference", "MPL-2.0", "BSD-3-Clause", "GPL-2.0-only", "NCSA", "WTFPL", "BSL-1.0", "LicenseRef-scancode-proprietary-license", "Apache-2.0" ]
permissive
avast/retdec
c199854e06454a0e41f5af046ba6f5b9bfaaa4b4
b9791c884ad8f5b1c1c7f85c88301316010bc6f2
refs/heads/master
2023-08-31T16:03:49.626430
2023-08-07T08:15:07
2023-08-14T14:09:09
113,967,646
3,111
483
MIT
2023-08-17T05:02:35
2017-12-12T09:04:24
C++
UTF-8
Python
false
false
1,654
py
lti_types.py
"""Dictionary of types used in headers and types for lti.""" LTI_TYPES = { # default C types and some typedefs '__int64': 'i64', 'bool': 'i1', 'double': 'double', 'long double': 'double', 'float': 'float', 'char': 'i8', 'int': 'i32', 'long': 'i32', 'long int': 'i64', 'long long': 'i64', 'long long int': 'i64', 'short': 'i16', 'short int': 'i16', 'size_t': 'i32', 'va_list': '...', 'void': 'void', 'wchar_t': '%wchar_t', # WINAPI 'BOOL': 'i1', 'COLORREF': 'i32', 'DWORD': 'i32', 'DWORD_PTR': 'i32*', 'FINDEX_INFO_LEVELS': 'i32', 'FINDEX_SEARCH_OPS': 'i32', 'HANDLE': 'i32*', 'HBITMAP': 'i32*', 'HDC': 'i32*', 'HGLOBAL': 'i32*', 'HKEY': 'i32*', 'HLOCAL': 'i32*', 'HMENU': 'i32*', 'HPALETTE': 'i32*', 'HRESULT': 'i32', 'HWND': 'i32*', 'INT': 'i32', 'INT_PTR': 'i32*', 'LONG': 'i32', 'LPARAM': 'i32*', 'LPCTSTR': '%wchar_t*', 'LPCVOID': 'i8*', 'LPSTR': '%wchar_t*', 'LPTSTR': '%wchar_t*', 'LPVOID': 'i8*', 'LRESULT': 'i32*', 'PFORMAT_STRING': 'i8*', 'PSTR': '%wchar_t*', 'PDWORD_PTR': 'i32*', 'SIZE_T': 'i32*', 'UINT': 'i32', 'UINT_PTR': 'i32*', 'ULONG_PTR': 'i32*', 'USHORT': 'i16', 'VOID': 'void', 'WORD': 'i16', 'WPARAM': 'i32*', # modifiers '*': '*', 'const': '', 'signed': '', 'unsigned': '', # specials '_In_': '', '_In_opt_': '', '_Inout_': 'OUT ', '_Inout_opt_': 'OUT ', '_Out_': 'OUT ', '_Out_opt_': 'OUT ', '_Reserved_': '', # structs 'struct': '%struct.', }
7cf8dfe4467ca1a78dd0508e275bf8d196d2c8de
fce81b804cae23f525a5ad4370b684bf0dc531a5
/numpy/matrixlib/defmatrix.pyi
9d0d1ee50b6600bce80f1f5b1363e5ee3102a02a
[ "Zlib", "BSD-3-Clause", "MIT", "Apache-2.0" ]
permissive
numpy/numpy
ba2abcc1d2d46affbb6aabe5aed6407b4b57507e
dc2ff125493777a1084044e6cd6857a42ee323d4
refs/heads/main
2023-09-05T10:10:52.767363
2023-09-04T18:03:29
2023-09-04T18:03:29
908,607
25,725
11,968
BSD-3-Clause
2023-09-14T21:26:09
2010-09-13T23:02:39
Python
UTF-8
Python
false
false
451
pyi
defmatrix.pyi
from collections.abc import Sequence, Mapping from typing import Any from numpy import matrix as matrix from numpy._typing import ArrayLike, DTypeLike, NDArray __all__: list[str] def bmat( obj: str | Sequence[ArrayLike] | NDArray[Any], ldict: None | Mapping[str, Any] = ..., gdict: None | Mapping[str, Any] = ..., ) -> matrix[Any, Any]: ... def asmatrix(data: ArrayLike, dtype: DTypeLike = ...) -> matrix[Any, Any]: ... mat = asmatrix
986f6d4f1b7c482ac11dd9bf836c2d8d71bc90bf
974d04d2ea27b1bba1c01015a98112d2afb78fe5
/test/cinn/ops/test_cholesky_op.py
661c51e17980722d9f93940d010ef8f6a8149613
[ "Apache-2.0" ]
permissive
PaddlePaddle/Paddle
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
refs/heads/develop
2023-08-17T21:27:30.568889
2023-08-17T12:38:22
2023-08-17T12:38:22
65,711,522
20,414
5,891
Apache-2.0
2023-09-14T19:20:51
2016-08-15T06:59:08
C++
UTF-8
Python
false
false
5,561
py
test_cholesky_op.py
#!/usr/bin/env python3 # Copyright (c) 2021 CINN Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from cinn.common import is_compiled_with_cuda from cinn.frontend import NetBuilder from op_test import OpTest, OpTestTool from op_test_helper import TestCaseHelper import paddle @OpTestTool.skip_if( not is_compiled_with_cuda(), "x86 test will be skipped due to timeout." ) class TestCholeskyOp(OpTest): def setUp(self): print(f"\nRunning {self.__class__.__name__}: {self.case}") self.inputs = {} self.prepare_inputs() def prepare_inputs(self): if "batch_dim" in self.case and self.case["batch_dim"] > 0: x = [] for _ in range(self.case["batch_dim"]): matrix = self.random( self.case["shape"], self.case["dtype"], -1.0, 1.0 ) matrix_t = np.transpose(matrix, [1, 0]) x.append(np.dot(matrix, matrix_t)) x = np.stack(x) else: matrix = self.random( self.case["shape"], self.case["dtype"], -1.0, 1.0 ) matrix_t = np.transpose(matrix, [1, 0]) x = np.dot(matrix, matrix_t) self.inputs = {"x": x} self.upper = self.case["upper"] def build_paddle_program(self, target): x = paddle.to_tensor(self.inputs["x"], stop_gradient=False) y = paddle.linalg.cholesky(x, upper=self.upper) self.paddle_outputs = [y] def build_cinn_program(self, target): builder = NetBuilder("cholesky") x = builder.create_input( self.nptype2cinntype(self.inputs["x"].dtype), self.inputs["x"].shape, "x", ) out = builder.cholesky(x, self.upper) prog = builder.build() res = self.get_cinn_output( prog, target, [x], [self.inputs["x"]], [out], passes=[] ) self.cinn_outputs = [res[0]] def test_check_results(self): self.check_outputs_and_grads() class TestCholeskyOpShape(TestCaseHelper): def init_attrs(self): self.class_name = "TestCholeskyOpShape" self.cls = TestCholeskyOp self.inputs = [ { "shape": [1, 1], }, { "shape": [8, 8], }, { "shape": [10, 10], }, ] self.dtypes = [ {"dtype": "float32"}, ] self.attrs = [ {"upper": False}, ] class TestCholeskyOpLargeShape(TestCaseHelper): def init_attrs(self): self.class_name = "TestCholeskyOpLargeShape" self.cls = TestCholeskyOp self.inputs = [ { "shape": [1024, 1024], }, { "shape": [2048, 2048], }, ] self.dtypes = [ {"dtype": "float64"}, ] self.attrs = [ {"upper": False, "batch_dim": 2}, {"upper": False, "batch_dim": 4}, {"upper": True, "batch_dim": 8}, ] class TestCholeskyOpDtype(TestCaseHelper): def init_attrs(self): self.class_name = "TestCholeskyOpDtype" self.cls = TestCholeskyOp self.inputs = [ { "shape": [1, 1], }, { "shape": [8, 8], }, { "shape": [10, 10], }, ] self.dtypes = [ {"dtype": "float32"}, {"dtype": "float64"}, ] self.attrs = [ {"upper": False}, ] class TestCholeskyOpBatch(TestCaseHelper): def init_attrs(self): self.class_name = "TestCholeskyOpBatch" self.cls = TestCholeskyOp self.inputs = [ { "shape": [1, 1], }, { "shape": [8, 8], }, { "shape": [10, 10], }, ] self.dtypes = [ {"dtype": "float32"}, ] self.attrs = [ {"upper": False, "batch_dim": 1}, {"upper": False, "batch_dim": 4}, {"upper": False, "batch_dim": 8}, ] class TestCholeskyOpAttrs(TestCaseHelper): def init_attrs(self): self.class_name = "TestCholeskyOpAttrs" self.cls = TestCholeskyOp self.inputs = [ { "shape": [1, 1], }, { "shape": [8, 8], }, { "shape": [10, 10], }, ] self.dtypes = [ {"dtype": "float32"}, {"dtype": "float64"}, ] self.attrs = [ { "upper": True, }, ] if __name__ == "__main__": TestCholeskyOpShape().run() TestCholeskyOpLargeShape().run() TestCholeskyOpDtype().run() TestCholeskyOpBatch().run() TestCholeskyOpAttrs().run()
7558dc4ae5eabc9c6d40c0ef406cc0a5df18ef13
29eacf3b29753d65d8ec0ab4a60ea1f7ddecbd68
/tests/api_workflow/test_api_workflow.py
1f5d3fac99ebd3152becbbc18175e01d4f0ea0ae
[ "MIT" ]
permissive
lightly-ai/lightly
5b655fe283b7cc2ddf1d7f5bd098603fc1cce627
5650ee8d4057139acf8aa10c884d5d5cdc2ccb17
refs/heads/master
2023-08-17T11:08:00.135920
2023-08-16T12:43:02
2023-08-16T12:43:02
303,705,119
2,473
229
MIT
2023-09-14T14:47:16
2020-10-13T13:02:56
Python
UTF-8
Python
false
false
4,903
py
test_api_workflow.py
import os from unittest import mock import numpy as np import lightly from tests.api_workflow import utils from tests.api_workflow.mocked_api_workflow_client import ( MockedApiWorkflowClient, MockedApiWorkflowSetup, ) class TestApiWorkflow(MockedApiWorkflowSetup): def setUp(self) -> None: lightly.api.api_workflow_client.__version__ = lightly.__version__ self.api_workflow_client = MockedApiWorkflowClient(token="token_xyz") @mock.patch.dict(os.environ, {"LIGHTLY_TOKEN": "token_xyz"}) def test_init_with_env_token(self): MockedApiWorkflowClient() def test_error_if_init_without_token(self): # copy environment variables but remove LIGHTLY_TOKEN if it exists env_without_token = { k: v for k, v in os.environ.items() if k != "LIGHTLY_TOKEN" } with self.assertRaises(ValueError), mock.patch.dict( os.environ, env_without_token, clear=True ): MockedApiWorkflowClient() def test_error_if_version_is_incompatible(self): lightly.api.api_workflow_client.__version__ = "0.0.0" with self.assertWarns(UserWarning): MockedApiWorkflowClient(token="token_xyz") lightly.api.api_workflow_client.__version__ = lightly.__version__ def test_dataset_id_nonexisting(self): self.api_workflow_client._datasets_api.reset() assert not hasattr(self.api_workflow_client, "_dataset_id") with self.assertWarns(UserWarning): dataset_id = self.api_workflow_client.dataset_id assert dataset_id == self.api_workflow_client._datasets_api.datasets[-1].id def test_dataset_id_existing(self): id = utils.generate_id() self.api_workflow_client._dataset_id = id assert self.api_workflow_client.dataset_id == id def test_set_dataset_id_existing(self): datasets = self.api_workflow_client.get_all_datasets() self.api_workflow_client.dataset_id = datasets[1].id def test_set_dataset_id_missing(self): with self.assertRaises(ValueError): self.api_workflow_client.dataset_id = "nonexisting-id" def test_reorder_random(self): no_random_tries = 100 for iter in range(no_random_tries): numbers_to_choose_from = list(range(100)) numbers_all = list(np.random.choice(numbers_to_choose_from, 100)) filenames_on_server = [f"img_{i}" for i in numbers_all] api_workflow_client = MockedApiWorkflowClient( token="token_xyz", dataset_id="dataset_id_xyz" ) api_workflow_client._mappings_api.sample_names = filenames_on_server numbers_in_tag = np.copy(numbers_all) np.random.shuffle(numbers_in_tag) filenames_for_list = [f"img_{i}" for i in numbers_in_tag] list_ordered = api_workflow_client._order_list_by_filenames( filenames_for_list, list_to_order=numbers_in_tag ) list_desired_order = [i for i in numbers_all if i in numbers_in_tag] assert list_ordered == list_desired_order def test_reorder_manual(self): filenames_on_server = ["a", "b", "c"] api_workflow_client = MockedApiWorkflowClient( token="token_xyz", dataset_id="dataset_id_xyz" ) api_workflow_client._mappings_api.sample_names = filenames_on_server filenames_for_list = ["c", "a", "b"] list_to_order = ["cccc", "aaaa", "bbbb"] list_ordered = api_workflow_client._order_list_by_filenames( filenames_for_list, list_to_order=list_to_order ) list_desired_order = ["aaaa", "bbbb", "cccc"] assert list_ordered == list_desired_order def test_reorder_wrong_lengths(self): filenames_on_server = ["a", "b", "c"] api_workflow_client = MockedApiWorkflowClient( token="token_xyz", dataset_id="dataset_id_xyz" ) api_workflow_client._mappings_api.sample_names = filenames_on_server filenames_for_list = ["c", "a", "b"] list_to_order = ["cccc", "aaaa", "bbbb"] with self.subTest("filenames_for_list wrong length"): with self.assertRaises(ValueError): api_workflow_client._order_list_by_filenames( filenames_for_list[:-1], list_to_order ) with self.subTest("list_to_order wrong length"): with self.assertRaises(ValueError): api_workflow_client._order_list_by_filenames( filenames_for_list, list_to_order[:-1] ) with self.subTest("filenames_for_list and list_to_order wrong length"): with self.assertRaises(ValueError): api_workflow_client._order_list_by_filenames( filenames_for_list[:-1], list_to_order[:-1] )
1e2aa85f0965aff2eedf0ef1f39775aa3d5919a0
529e713a78e82de2ae5d44cfb8ef209e0894d72a
/python-bitwise-operators/stegano/bitmap.py
cda7bc4536349b23ae92992756f8898795acefc9
[ "MIT" ]
permissive
realpython/materials
cd2f548276be2c82f134ca03eadb1cd279e0f26e
d2d62756d3854f54a12a767f2bf9470486c0ceef
refs/heads/master
2023-09-05T22:12:29.806738
2023-08-31T20:56:28
2023-08-31T20:56:28
132,374,697
4,678
6,482
MIT
2023-09-12T22:22:06
2018-05-06T20:46:18
HTML
UTF-8
Python
false
false
3,785
py
bitmap.py
""" Bitmap read/write operations. """ import pathlib from dataclasses import dataclass from itertools import islice from mmap import mmap, ACCESS_WRITE from struct import pack, unpack from typing import Any, Union, Iterator class Bitmap: """High-level interface to a bitmap file.""" def __init__(self, path: pathlib.Path) -> None: self._file = path.open(mode="r+b") self._file_bytes = mmap(self._file.fileno(), 0, access=ACCESS_WRITE) self._header = Header.from_bytes(self._file_bytes[:50]) def __enter__(self) -> "Bitmap": return self def __exit__(self, *args, **kwargs) -> None: self._file_bytes.close() self._file.close() def __getattr__(self, name: str) -> Any: return getattr(self._header, name) def __getitem__(self, offset: Union[int, slice]) -> Union[int, bytes]: return self._file_bytes[offset] def __setitem__( self, offset: Union[int, slice], value: Union[int, bytes] ) -> None: self._file_bytes[offset] = value @property def max_bytes(self) -> int: """The maximum number of bytes the bitmap can hide.""" return self.width * self.height * 3 @property def byte_offsets(self) -> Iterator[int]: """Return an iterator over byte offsets (skip the padding).""" start_index = self.pixels_offset end_index = self.pixels_offset + self.pixel_size_bytes scanline_bytes = self.pixel_size_bytes // self.height for scanline in range(start_index, end_index, scanline_bytes): yield from range(scanline, scanline + self.width * 3) @property def byte_slices(self) -> Iterator[slice]: """Generator iterator of 8-byte long slices.""" for byte_index in islice(self.byte_offsets, 0, self.max_bytes, 8): yield slice(byte_index, byte_index + 8) @property def reserved_field(self) -> int: """Return a little-endian 32-bit unsigned integer.""" return unsigned_int(self._file_bytes, 0x06) @reserved_field.setter def reserved_field(self, value: int) -> None: """Store a little-endian 32-bit unsigned integer.""" self._file_bytes.seek(0x06) self._file_bytes.write(pack("<I", value)) @dataclass class Header: """Bitmap metadata from the file header.""" signature: bytes file_size_bytes: int pixel_size_bytes: int pixels_offset: int width: int height: int bit_depth: int compressed: bool has_palette: bool def __post_init__(self): assert self.signature == b"BM", "Unknown file signature" assert not self.compressed, "Compression unsupported" assert not self.has_palette, "Color palette unsupported" assert self.bit_depth == 24, "Only 24-bit depth supported" @staticmethod def from_bytes(data: bytes) -> "Header": """Factory method to deserialize the header from bytes.""" return Header( signature=data[0x00:2], file_size_bytes=unsigned_int(data, 0x02), pixels_offset=unsigned_int(data, 0x0A), width=unsigned_int(data, 0x12), height=unsigned_int(data, 0x16), bit_depth=unsigned_short(data, 0x1C), compressed=unsigned_int(data, 0x1E) != 0, has_palette=unsigned_int(data, 0x2E) != 0, pixel_size_bytes=unsigned_int(data, 0x22), ) def unsigned_int(data: Union[bytes, mmap], offset: int) -> int: """Read a little-endian 32-bit unsigned integer.""" return unpack("<I", data[offset : offset + 4])[0] def unsigned_short(data: Union[bytes, mmap], offset: int) -> int: """Read a little-endian 16-bit unsigned integer.""" return unpack("<H", data[offset : offset + 2])[0]
b2bf48653dcc59084360de3c082c7dbc7d1211aa
dcb823e295bb94de99a89dd4d69314186b9351b2
/dreamcoder/domains/regex/main.py
4c23a3ac6b05be27dac65bbaddfcce9afbc5920d
[ "MIT" ]
permissive
ellisk42/ec
caae2ad9fa7892b2fc456f0d82ee4e3e394ebeb6
cb0e63f5c33cd2de360b791038b0f5272750270e
refs/heads/master
2023-07-05T22:31:26.762022
2022-03-16T17:45:10
2022-03-16T17:45:10
117,295,639
371
137
MIT
2023-02-16T20:24:12
2018-01-12T22:55:36
Slash
UTF-8
Python
false
false
14,146
py
main.py
# analog of list.py for regex tasks. Responsible for actually running the task. from dreamcoder.domains.regex.makeRegexTasks import makeOldTasks, makeLongTasks, makeShortTasks, makeWordTasks, makeNumberTasks, makeHandPickedTasks, makeNewTasks, makeNewNumberTasks from dreamcoder.domains.regex.regexPrimitives import basePrimitives, altPrimitives, easyWordsPrimitives, alt2Primitives, concatPrimitives, reducedConcatPrimitives, strConstConcatPrimitives, PRC from dreamcoder.dreamcoder import explorationCompression, Task from dreamcoder.grammar import Grammar from dreamcoder.likelihoodModel import add_cutoff_values, add_string_constants from dreamcoder.program import Abstraction, Application from dreamcoder.type import tpregex from dreamcoder.utilities import eprint, flatten, testTrainSplit, POSITIVEINFINITY import random import math import pregex as pre import os try: from dreamcoder.recognition import RecurrentFeatureExtractor, JSONFeatureExtractor class LearnedFeatureExtractor(RecurrentFeatureExtractor): H = 64 special = 'regex' def tokenize(self, examples): def sanitize(l): return [z if z in self.lexicon else "?" for z_ in l for z in (z_ if isinstance(z_, list) else [z_])] tokenized = [] for xs, y in examples: if isinstance(y, list): y = ["LIST_START"] + y + ["LIST_END"] else: y = [y] y = sanitize(y) if len(y) > self.maximumLength: return None serializedInputs = [] for xi, x in enumerate(xs): if isinstance(x, list): x = ["LIST_START"] + x + ["LIST_END"] else: x = [x] x = sanitize(x) if len(x) > self.maximumLength: return None serializedInputs.append(x) tokenized.append((tuple(serializedInputs), y)) return tokenized def __init__(self, tasks, testingTasks=[], cuda=False): self.lexicon = set(flatten((t.examples for t in tasks + testingTasks), abort=lambda x: isinstance( x, str))).union({"LIST_START", "LIST_END", "?"}) self.num_examples_list = [len(t.examples) for t in tasks] # Calculate the maximum length self.maximumLength = POSITIVEINFINITY self.maximumLength = max(len(l) for t in tasks + testingTasks for xs, y in self.tokenize(t.examples) for l in [y] + [x for x in xs]) super( LearnedFeatureExtractor, self).__init__( lexicon=list( self.lexicon), tasks=tasks, cuda=cuda, H=self.H, bidirectional=True) self.parallelTaskOfProgram = False def taskOfProgram(self, p, t): #raise NotImplementedError num_examples = random.choice(self.num_examples_list) p = p.visit(ConstantInstantiateVisitor.SINGLE) preg = p.evaluate([])(pre.String("")) t = Task("Helm", t, [((), list(preg.sample())) for _ in range(num_examples) ]) return t except: pass #in init: loop over tasks, save lengths, class ConstantInstantiateVisitor(object): def __init__(self): self.regexes = [ pre.create(".+"), pre.create("\d+"), pre.create("\w+"), pre.create("\s+"), pre.create("\\u+"), pre.create("\l+")] def primitive(self, e): if e.name == "r_const": #return Primitive("STRING", e.tp, random.choice(self.words)) s = random.choice(self.regexes).sample() #random string const s = pre.String(s) e.value = PRC(s,arity=0) return e def invented(self, e): return e.body.visit(self) def index(self, e): return e def application(self, e): return Application(e.f.visit(self), e.x.visit(self)) def abstraction(self, e): return Abstraction(e.body.visit(self)) #TODO fix class MyJSONFeatureExtractor(JSONFeatureExtractor): N_EXAMPLES = 5 def _featuresOfProgram(self, program, tp): try: preg = program.evaluate([]) # if 'left_paren' in program.show(False): #eprint("string_pregex:", string_pregex) #eprint("string_pregex:", string_pregex) except IndexError: # free variable return None except Exception as e: eprint("Exception during evaluation:", e) if "Attempt to evaluate fragment variable" in e: eprint("program (bc fragment error)", program) return None examples = [] for _ in range(self.N_EXAMPLES * 5): # oh this is arbitrary ig try: y = preg.sample() # TODO #this line should keep inputs short, so that helmholtzbatch can be large #allows it to try other samples #(Could also return None off the bat... idk which is better) #if len(y) > 20: # continue #eprint(tp, program, x, y) examples.append(y) except BaseException: continues if len(examples) >= self.N_EXAMPLES: break else: return None return examples # changed to list_features(examples) from examples def regex_options(parser): parser.add_argument("--maxTasks", type=int, default=500, help="truncate tasks to fit within this boundary") parser.add_argument( "--maxExamples", type=int, default=10, help="truncate number of examples per task to fit within this boundary") parser.add_argument("--tasks", default="long", help="which tasks to use", choices=["old", "short", "long", "words", "number", "handpicked", "new", "newNumber"]) parser.add_argument("--primitives", default="concat", help="Which primitive set to use", choices=["base", "alt1", "easyWords", "alt2", "concat", "reduced", "strConst"]) parser.add_argument("--extractor", type=str, choices=["hand", "deep", "learned", "json"], default="learned") # if i switch to json it breaks parser.add_argument("--split", metavar="TRAIN_RATIO", type=float, default=0.8, help="split test/train") parser.add_argument("-H", "--hidden", type=int, default=256, help="number of hidden units") parser.add_argument("--likelihoodModel", default="probabilistic", help="likelihood Model", choices=["probabilistic", "all-or-nothing"]) parser.add_argument("--topk_use_map", dest="topk_use_only_likelihood", action="store_false") parser.add_argument("--debug", dest="debug", action="store_true") parser.add_argument("--ll_cutoff", dest="use_ll_cutoff", nargs='*', default=False, help="use ll cutoff for training tasks (for probabilistic likelihood model only). default is False,") parser.add_argument("--use_str_const", action="store_true", help="use string constants") """parser.add_argument("--stardecay", type=float, dest="stardecay", default=0.5, help="p value for kleenestar and plus")""" # Lucas recommends putting a struct with the definitions of the primitives here. # TODO: # Build likelihood funciton # modify NN # make primitives # make tasks def main(args): """ Takes the return value of the `commandlineArguments()` function as input and trains/tests the model on regular expressions. """ #for dreaming #parse use_ll_cutoff use_ll_cutoff = args.pop('use_ll_cutoff') if not use_ll_cutoff is False: #if use_ll_cutoff is a list of strings, then train_ll_cutoff and train_ll_cutoff #will be tuples of that string followed by the actual model if len(use_ll_cutoff) == 1: train_ll_cutoff = use_ll_cutoff[0] # make_cutoff_model(use_ll_cutoff[0], tasks)) test_ll_cutoff = use_ll_cutoff[0] # make_cutoff_model(use_ll_cutoff[0], tasks)) else: assert len(use_ll_cutoff) == 2 train_ll_cutoff = use_ll_cutoff[0] #make_cutoff_model(use_ll_cutoff[0], tasks)) test_ll_cutoff = use_ll_cutoff[1] #make_cutoff_model(use_ll_cutoff[1], tasks)) else: train_ll_cutoff = None test_ll_cutoff = None regexTasks = {"old": makeOldTasks, "short": makeShortTasks, "long": makeLongTasks, "words": makeWordTasks, "number": makeNumberTasks, "handpicked": makeHandPickedTasks, "new": makeNewTasks, "newNumber": makeNewNumberTasks }[args.pop("tasks")] tasks = regexTasks() # TODO eprint("Generated", len(tasks), "tasks") maxTasks = args.pop("maxTasks") if len(tasks) > maxTasks: eprint("Unwilling to handle {} tasks, truncating..".format(len(tasks))) seed = 42 # previously this was hardcoded and never changed random.seed(seed) random.shuffle(tasks) del tasks[maxTasks:] maxExamples = args.pop("maxExamples") split = args.pop("split") test, train = testTrainSplit(tasks, split) eprint("Split tasks into %d/%d test/train" % (len(test), len(train))) test = add_cutoff_values(test, test_ll_cutoff) train = add_cutoff_values(train, train_ll_cutoff) eprint("added cutoff values to tasks, train: ", train_ll_cutoff, ", test:", test_ll_cutoff ) if args.pop("use_str_const"): assert args["primitives"] == "strConst" or args["primitives"] == "reduced" ConstantInstantiateVisitor.SINGLE = \ ConstantInstantiateVisitor() test = add_string_constants(test) train = add_string_constants(train) eprint("added string constants to test and train") for task in test + train: if len(task.examples) > maxExamples: task.examples = task.examples[:maxExamples] task.specialTask = ("regex", {"cutoff": task.ll_cutoff, "str_const": task.str_const}) task.examples = [(xs, [y for y in ys ]) for xs,ys in task.examples ] task.maxParameters = 1 # from list stuff primtype = args.pop("primitives") prims = {"base": basePrimitives, "alt1": altPrimitives, "alt2": alt2Primitives, "easyWords": easyWordsPrimitives, "concat": concatPrimitives, "reduced": reducedConcatPrimitives, "strConst": strConstConcatPrimitives }[primtype] extractor = { "learned": LearnedFeatureExtractor, "json": MyJSONFeatureExtractor }[args.pop("extractor")] extractor.H = args.pop("hidden") #stardecay = args.stardecay #stardecay = args.pop('stardecay') #decaystr = 'd' + str(stardecay) import datetime timestamp = datetime.datetime.now().isoformat() outputDirectory = "experimentOutputs/regex/%s"%timestamp os.system("mkdir -p %s"%outputDirectory) args.update({ "featureExtractor": extractor, "outputPrefix": "%s/regex"%(outputDirectory), "evaluationTimeout": 0.005, "topk_use_only_likelihood": True, "maximumFrontier": 10, "compressor": args.get("compressor","ocaml") }) #### # use the #prim_list = prims(stardecay) prim_list = prims() specials = ["r_kleene", "r_plus", "r_maybe", "r_alt", "r_concat"] n_base_prim = len(prim_list) - len(specials) productions = [ (math.log(0.5 / float(n_base_prim)), prim) if prim.name not in specials else ( math.log(0.10), prim) for prim in prim_list] baseGrammar = Grammar.fromProductions(productions, continuationType=tpregex) #baseGrammar = Grammar.uniform(prims()) #for i in range(100): # eprint(baseGrammar.sample(tpregex)) #eprint(baseGrammar) #explore test_stuff = args.pop("debug") if test_stuff: eprint(baseGrammar) eprint("sampled programs from prior:") for i in range(100): #100 eprint(baseGrammar.sample(test[0].request,maximumDepth=1000)) eprint("""half the probability mass is on higher-order primitives. Therefore half of enumerated programs should have more than one node. However, we do not observe this. Instead we see a very small fraction of programs have more than one node. So something seems to be wrong with grammar.sample. Furthermore: observe the large print statement above. This prints the candidates for sampleDistribution in grammar.sample. the first element of each tuple is the probability passed into sampleDistribution. Half of the probability mass should be on the functions, but instead they are equally weighted with the constants. If you look at the grammar above, this is an error!!!! """) assert False del args["likelihoodModel"] explorationCompression(baseGrammar, train, testingTasks = test, **args)
705ea14d682a0aa1ac92b70de976c56b437db98b
72293b4650b92019f9c046133f7de13ea6f69644
/zulip_bots/zulip_bots/bots/trello/test_trello.py
60af4174df85d0d1d2b98bfbf5d244786898e41f
[ "Apache-2.0", "LicenseRef-scancode-free-unknown" ]
permissive
zulip/python-zulip-api
820978c36689db4872abf21730b25ce8abb5fbcf
35a8ff8839ac39cff0638f533fea59665cb9aff3
refs/heads/main
2023-09-03T14:04:46.920347
2023-06-12T21:03:10
2023-08-11T19:36:11
96,455,158
387
437
Apache-2.0
2023-08-11T19:36:12
2017-07-06T17:25:46
Python
UTF-8
Python
false
false
5,223
py
test_trello.py
from unittest.mock import patch from zulip_bots.bots.trello.trello import TrelloHandler from zulip_bots.test_lib import BotTestCase, DefaultTests, StubBotHandler mock_config = {"api_key": "TEST", "access_token": "TEST", "user_name": "TEST"} class TestTrelloBot(BotTestCase, DefaultTests): bot_name = "trello" # type: str def test_bot_responds_to_empty_message(self) -> None: with self.mock_config_info(mock_config), patch("requests.get"): self.verify_reply("", "Empty Query") def test_bot_usage(self) -> None: with self.mock_config_info(mock_config), patch("requests.get"): self.verify_reply( "help", """ This interactive bot can be used to interact with Trello. Use `list-commands` to get information about the supported commands. """, ) def test_bot_quit_with_invalid_config(self) -> None: with self.mock_config_info(mock_config), self.assertRaises(StubBotHandler.BotQuitException): with self.mock_http_conversation("invalid_key"): TrelloHandler().initialize(StubBotHandler()) def test_invalid_command(self) -> None: with self.mock_config_info(mock_config), patch("requests.get"): self.verify_reply("abcd", "Command not supported") def test_list_commands_command(self) -> None: expected_reply = ( "**Commands:** \n" "1. **help**: Get the bot usage information.\n" "2. **list-commands**: Get information about the commands supported by the bot.\n" "3. **get-all-boards**: Get all the boards under the configured account.\n" "4. **get-all-cards <board_id>**: Get all the cards in the given board.\n" "5. **get-all-checklists <card_id>**: Get all the checklists in the given card.\n" "6. **get-all-lists <board_id>**: Get all the lists in the given board.\n" ) with self.mock_config_info(mock_config), patch("requests.get"): self.verify_reply("list-commands", expected_reply) def test_get_all_boards_command(self) -> None: with self.mock_config_info(mock_config), patch("requests.get"): with self.mock_http_conversation("get_all_boards"): self.verify_reply("get-all-boards", "**Boards:**\n") with self.mock_http_conversation("get_board_descs"): bot_instance = TrelloHandler() bot_instance.initialize(StubBotHandler()) self.assertEqual(bot_instance.get_board_descs(["TEST"]), "1.[TEST](TEST) (`TEST`)") def test_get_all_cards_command(self) -> None: with self.mock_config_info(mock_config), patch("requests.get"): with self.mock_http_conversation("get_cards"): self.verify_reply("get-all-cards TEST", "**Cards:**\n1. [TEST](TEST) (`TEST`)") def test_get_all_checklists_command(self) -> None: with self.mock_config_info(mock_config), patch("requests.get"): with self.mock_http_conversation("get_checklists"): self.verify_reply( "get-all-checklists TEST", "**Checklists:**\n" "1. `TEST`:\n" " * [X] TEST_1\n * [X] TEST_2\n" " * [-] TEST_3\n * [-] TEST_4", ) def test_get_all_lists_command(self) -> None: with self.mock_config_info(mock_config), patch("requests.get"): with self.mock_http_conversation("get_lists"): self.verify_reply( "get-all-lists TEST", ("**Lists:**\n" "1. TEST_A\n" " * TEST_1\n" "2. TEST_B\n" " * TEST_2"), ) def test_command_exceptions(self) -> None: """Add appropriate tests here for all additional commands with try/except blocks. This ensures consistency.""" expected_error_response = "Invalid Response. Please check configuration and parameters." with self.mock_config_info(mock_config), patch("requests.get"): with self.mock_http_conversation("exception_boards"): self.verify_reply("get-all-boards", expected_error_response) with self.mock_http_conversation("exception_cards"): self.verify_reply("get-all-cards TEST", expected_error_response) with self.mock_http_conversation("exception_checklists"): self.verify_reply("get-all-checklists TEST", expected_error_response) with self.mock_http_conversation("exception_lists"): self.verify_reply("get-all-lists TEST", expected_error_response) def test_command_invalid_arguments(self) -> None: """Add appropriate tests here for all additional commands with more than one arguments. This ensures consistency.""" expected_error_response = "Invalid Arguments." with self.mock_config_info(mock_config), patch("requests.get"): self.verify_reply("get-all-cards", expected_error_response) self.verify_reply("get-all-checklists", expected_error_response) self.verify_reply("get-all-lists", expected_error_response)
6d0ef467f39918c56c6ac974eee083be51a4a9c4
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
/tests/st/profiler/test_ascend_profiler.py
1df2fc10b58bc975d5e506e73167499cefb3f63d
[ "Apache-2.0", "LicenseRef-scancode-proprietary-license", "MPL-1.0", "OpenSSL", "LGPL-3.0-only", "LicenseRef-scancode-warranty-disclaimer", "BSD-3-Clause-Open-MPI", "MIT", "MPL-2.0-no-copyleft-exception", "NTP", "BSD-3-Clause", "GPL-1.0-or-later", "0BSD", "MPL-2.0", "LicenseRef-scancode-free-unknown", "AGPL-3.0-only", "Libpng", "MPL-1.1", "IJG", "GPL-2.0-only", "BSL-1.0", "Zlib", "LicenseRef-scancode-public-domain", "LicenseRef-scancode-python-cwi", "BSD-2-Clause", "LicenseRef-scancode-gary-s-brown", "LGPL-2.1-only", "LicenseRef-scancode-other-permissive", "Python-2.0", "LicenseRef-scancode-mit-nagy", "LicenseRef-scancode-other-copyleft", "LicenseRef-scancode-unknown-license-reference", "Unlicense" ]
permissive
mindspore-ai/mindspore
ca7d5bb51a3451c2705ff2e583a740589d80393b
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
refs/heads/master
2023-07-29T09:17:11.051569
2023-07-17T13:14:15
2023-07-17T13:14:15
239,714,835
4,178
768
Apache-2.0
2023-07-26T22:31:11
2020-02-11T08:43:48
C++
UTF-8
Python
false
false
4,305
py
test_ascend_profiler.py
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Test ascend profiling.""" import glob import tempfile import numpy as np import pytest import mindspore import mindspore.context as context import mindspore.dataset as ds import mindspore.nn as nn from mindspore import Model from mindspore import Profiler from mindspore import Tensor from mindspore.ops import operations as P from tests.security_utils import security_off_wrap class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.add = P.Add() def construct(self, x_, y_): return self.add(x_, y_) x = np.random.randn(1, 3, 3, 4).astype(np.float32) y = np.random.randn(1, 3, 3, 4).astype(np.float32) class NetWork(nn.Cell): def __init__(self): super(NetWork, self).__init__() self.unique = P.Unique() self.shape = P.Shape() self.reshape = P.Reshape() self.add = P.Add() def construct(self, a, b): val = self.add(a, b) size = self.shape(val) res = self.reshape(val, size) return res def dataset_generator(): for i in range(1, 10): yield (np.ones((32, 2 * i), dtype=np.float32), np.ones((32, 2 * i), dtype=np.float32)) @pytest.mark.level1 @pytest.mark.platform_arm_ascend_training @pytest.mark.platform_x86_ascend_training @pytest.mark.env_onecard @security_off_wrap def test_ascend_profiling(): """Test ascend profiling""" context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") with tempfile.TemporaryDirectory() as tmpdir: profiler = Profiler(output_path=tmpdir, l2_cache=True) add = Net() add(Tensor(x), Tensor(y)) profiler.analyse() assert len(glob.glob(f"{tmpdir}/profiler*/*PROF*/device_*/data/Framework.task_desc_info*")) == 2 assert len(glob.glob(f"{tmpdir}/profiler*/*PROF*/device_*/data/Framework.tensor_data_info*")) == 2 assert len(glob.glob(f"{tmpdir}/profiler*/*PROF*/device_*/data/l2_cache.data*")) >= 2 @pytest.mark.level1 @pytest.mark.platform_arm_ascend_training @pytest.mark.platform_x86_ascend_training @pytest.mark.env_onecard @security_off_wrap def test_ascend_pynative_profiling(): """ Feature: Test the ascend pynative model profiling Description: Generate the Net op timeline Expectation: Timeline generated successfully """ context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") with tempfile.TemporaryDirectory() as tmpdir: profiler = Profiler(output_path=tmpdir) add = Net() add(Tensor(x), Tensor(y)) profiler.analyse() assert len(glob.glob(f"{tmpdir}/profiler*/output_timeline_data_*.txt")) == 1 @pytest.mark.level1 @pytest.mark.platform_arm_ascend_training @pytest.mark.platform_x86_ascend_training @pytest.mark.env_onecard @security_off_wrap def test_shape(): """ Feature: Test the ascend dynamic shape model profiling Description: Generate the Net dynamic shape data. Expectation: Dynamic shape data generated successfully """ context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") with tempfile.TemporaryDirectory() as tmpdir: network = NetWork() profiler = Profiler(output_path=tmpdir) dataset = ds.GeneratorDataset(dataset_generator, ["data1", "data2"]) t0 = Tensor(dtype=mindspore.float32, shape=[32, None]) t1 = Tensor(dtype=mindspore.float32, shape=[32, None]) network.set_inputs(t0, t1) model = Model(network) model.train(1, dataset, dataset_sink_mode=True) profiler.analyse() assert len(glob.glob(f"{tmpdir}/profiler*/dynamic_shape_*.json")) == 1
980130a2686cdceff91af9a96175855a7837b05c
96d69d82b4f7cffe20e405b00f26ea15fa988f6e
/tutorials/spectral/plot_SpectralVariance.py
966c1d98a6658bd854ea36ee2964b3c405ccdd6b
[ "Apache-2.0" ]
permissive
neurodsp-tools/neurodsp
5706550da30f5b6a97390f08614580c2d883070a
d449761a334ff5a091c055401b41be0dc6fa8fc1
refs/heads/main
2023-07-27T10:55:16.082678
2023-07-20T19:24:45
2023-07-20T19:24:45
94,924,688
219
55
Apache-2.0
2023-09-13T11:27:50
2017-06-20T18:51:11
Python
UTF-8
Python
false
false
7,366
py
plot_SpectralVariance.py
""" Spectral Domain Analysis: Variance ================================== Apply spectral domain analyses, calculating variance measures. This tutorial primarily covers the ``neurodsp.spectral.variance`` module. """ ################################################################################################### # Overview # -------- # # This tutorial covers computing and displaying a spectral histogram, and # computing the spectral coefficient of variation (SCV). # ################################################################################################### # Import spectral variance functions from neurodsp.spectral import compute_spectral_hist, compute_scv, compute_scv_rs # Import function to compute power spectra from neurodsp.spectral import compute_spectrum # Import utilities for loading and plotting data from neurodsp.utils import create_times from neurodsp.utils.download import load_ndsp_data from neurodsp.plts.time_series import plot_time_series from neurodsp.plts.spectral import (plot_spectral_hist, plot_scv, plot_scv_rs_lines, plot_scv_rs_matrix) ################################################################################################### # Load example neural signal # ~~~~~~~~~~~~~~~~~~~~~~~~~~ # # First, we load the sample data, which is a segment of rat hippocampal LFP # taken from the publicly available database CRCNS (specifically, from the 'hc2' dataset). # # Relevant publication: Mizuseki et al, 2012, Nature Neuro # ################################################################################################### # sphinx_gallery_thumbnail_number = 4 # Download, if needed, and load example data files sig = load_ndsp_data('sample_data_2.npy', folder='data') # Set sampling rate, and create a times vector for plotting fs = 1000 times = create_times(len(sig)/fs, fs) ################################################################################################### # Plot the loaded signal plot_time_series(times, sig, xlim=[0, 3]) ################################################################################################### # # Plotting the data, we observe a strong theta oscillation (~6-8 Hz). # ################################################################################################### # Spectral histogram # ------------------ # # First, let's look at computing spectral histograms, with # :func:`~.compute_spectral_hist`. # # The PSD is an estimate of the central tendency (mean/median) of the signal's power # at each frequency, with the assumption that the signal is relatively stationary and # that the variance around the mean comes from various forms of noise. # # However, in physiological data, we may be interested in visualizing the distribution of # power values around the mean at each frequency, as estimated in sequential slices of # short-time Fourier transform (STFT), since it may reveal non-stationarities in the data # or particular frequencies that are not like the rest. Here, we simply bin the log-power # values across time, in a histogram, to observe the noise distribution at each frequency. # ################################################################################################### # Calculate the spectral histogram freqs, bins, spect_hist = compute_spectral_hist(sig, fs, nbins=50, f_range=(0, 80), cut_pct=(0.1, 99.9)) # Calculate a power spectrum, with median Welch freq_med, psd_med = compute_spectrum(sig, fs, method='welch', avg_type='median', nperseg=fs*2) # Plot the spectral histogram plot_spectral_hist(freqs, bins, spect_hist, freq_med, psd_med) ################################################################################################### # # Notice in the plot that not only is theta power higher overall (shifted up), # it also has lower variance around its mean. # ################################################################################################### # Spectral Coefficient of Variation (SCV) # --------------------------------------- # # Next, let's look at computing the spectral coefficient of variation, with # :func:`~.compute_scv`. # # As noted above, the range of log-power values in the theta frequency range is lower # compared to other frequencies, while that of 30-100Hz appear to be quite constant # across the entire frequency axis (homoscedasticity). # # To quantify that, we compute the coefficient of variation (standard deviation/mean) as a # normalized estimate of variance. # ################################################################################################### # Calculate SCV freqs, scv = compute_scv(sig, fs, nperseg=int(fs), noverlap=0) ################################################################################################### # # There is also a plotting function for SCV, :func:`~.plot_scv`. # ################################################################################################### # Plot the SCV plot_scv(freqs, scv) ################################################################################################### # # As shown above, SCV calculated from the entire segment of data is quite noisy due to the # single estimate of mean and standard deviation. # # To overcome this, we can compute a bootstrap-resampled estimate of SCV, by randomly drawing # slices from the non-overlapping spectrogram and taking their average. # # The resampled spectral coefficient of variation can be computed with :func:`~.compute_scv_rs`. # ################################################################################################### # Calculate SCV with the resampling method freqs, t_inds, scv_rs = compute_scv_rs(sig, fs, nperseg=fs, method='bootstrap', rs_params=(20, 200)) ################################################################################################### # # You can plot the resampled SCV, as lines, with :func:`~.plot_scv_rs_lines`. # ################################################################################################### # Plot the SCV, from the resampling method plot_scv_rs_lines(freqs, scv_rs) ################################################################################################### # # Another way to compute the resampled SCV is via a sliding window approach, essentially # smoothing over consecutive slices of the spectrogram to compute the mean and standard # deviation estimates. # ################################################################################################### # Calculate SCV with the resampling method freqs, t_inds, scv_rs = compute_scv_rs(sig, fs, method='rolling', rs_params=(10, 2)) ################################################################################################### # # You can plot the resampled SCV, as a matrix, with :func:`~.plot_scv_rs_matrix`. # ################################################################################################### # Plot the SCV, from the resampling method plot_scv_rs_matrix(freqs, t_inds, scv_rs) ################################################################################################### # # In the plot below, we see that the theta band (~7Hz) consistently has CV of less # than 1 (negative in log10). #
930bf4732b67b8418767480d2508b5acb430f678
4c800425b941243c521f0a878c1b12a8f5a50585
/deepreg/config/parser.py
4f9ab34cd27aba13ee07d30a8f20832ab26ed568
[ "Apache-2.0" ]
permissive
DeepRegNet/DeepReg
f7af4554c89a7a40a53bac9f7fc9939402d1110d
650a2f1a88ad3c6932be305d6a98a36e26feedc6
refs/heads/main
2023-04-06T20:40:38.722315
2022-05-18T21:52:19
2022-05-18T21:52:19
269,365,590
509
78
Apache-2.0
2023-03-11T12:18:21
2020-06-04T13:21:37
Python
UTF-8
Python
false
false
2,638
py
parser.py
import os from typing import Dict, List, Union import yaml from deepreg import log from deepreg.config.v011 import parse_v011 logger = log.get(__name__) def update_nested_dict(d: Dict, u: Dict) -> Dict: """ Merge two dicts. https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth :param d: dict to be overwritten in case of conflicts. :param u: dict to be merged into d. :return: """ for k, v in u.items(): if isinstance(v, dict): d[k] = update_nested_dict(d.get(k, {}), v) else: d[k] = v return d def load_configs(config_path: Union[str, List[str]]) -> Dict: """ Load multiple configs and update the nested dictionary. :param config_path: list of paths or one path. :return: the loaded config """ if isinstance(config_path, str): config_path = [config_path] # replace ~ with user home path config_path = [os.path.expanduser(x) for x in config_path] config: Dict = {} for config_path_i in config_path: with open(config_path_i) as file: config_i = yaml.load(file, Loader=yaml.FullLoader) config = update_nested_dict(d=config, u=config_i) loaded_config = config_sanity_check(config) if loaded_config != config: # config got updated head, tail = os.path.split(config_path[0]) filename = "updated_" + tail save(config=loaded_config, out_dir=head, filename=filename) logger.error( "The provided configuration file is outdated. " "An updated version has been saved at %s.", os.path.join(head, filename), ) return loaded_config def save(config: dict, out_dir: str, filename: str = "config.yaml"): """ Save the config into a yaml file. :param config: configuration to be outputed :param out_dir: directory of the output file :param filename: name of the output file """ assert filename.endswith(".yaml") with open(os.path.join(out_dir, filename), "w+") as f: f.write(yaml.dump(config)) def config_sanity_check(config: dict) -> dict: """ Check if the given config satisfies the requirements. :param config: entire config. """ # back compatibility support config = parse_v011(config) # check model if config["train"]["method"] == "conditional": if config["dataset"]["train"]["labeled"] is False: # unlabeled raise ValueError( "For conditional model, data have to be labeled, got unlabeled data." ) return config
ad218877fb21e0ecd0037561a0542d2db8ec05e8
b8f61783b1000c04e878c88ee8c5b86a5adc27ae
/piicatcher/output.py
b1eb4e1690a1fe611e9913769f811846d0dc299a
[ "Apache-2.0" ]
permissive
tokern/piicatcher
8d41a85fd86efa68d4cbd59987618582a3c573a5
aa15f90bd25a45fc7b3ab75c52be71d714b8b57a
refs/heads/master
2023-08-06T06:44:43.367430
2023-08-03T06:34:48
2023-08-03T06:34:48
176,927,554
197
81
Apache-2.0
2023-08-03T06:34:50
2019-03-21T11:03:02
Python
UTF-8
Python
false
false
3,792
py
output.py
import datetime from typing import Any, Dict, List, Optional from dbcat.catalog import Catalog, CatSchema, CatSource, CatTable from piicatcher.generators import column_generator def output_dict( catalog: Catalog, source: CatSource, list_all: bool = False, last_run: datetime.datetime = None, include_schema_regex: List[str] = None, exclude_schema_regex: List[str] = None, include_table_regex: List[str] = None, exclude_table_regex: List[str] = None, ) -> Dict[Any, Any]: current_schema: Optional[CatSchema] = None current_table: Optional[CatTable] = None source_dict = {"name": source.name, "schemata": []} schema_dict = {"name": "", "tables": []} table_dict = {"name": "", "columns": []} for schema, table, column in column_generator( catalog=catalog, source=source, last_run=last_run, exclude_schema_regex_str=exclude_schema_regex, include_schema_regex_str=include_schema_regex, exclude_table_regex_str=exclude_table_regex, include_table_regex_str=include_table_regex, ): if current_schema is None or schema != current_schema: if current_schema is not None: if len(table_dict["columns"]) > 0 or list_all: schema_dict["tables"].append(table_dict) # type: ignore if len(schema_dict["tables"]) > 0 or list_all: source_dict["schemata"].append(schema_dict) current_schema = schema schema_dict = {"name": schema.name, "tables": []} current_table = None if current_table is None or table != current_table: if current_table is not None: if len(table_dict["columns"]) > 0 or list_all: schema_dict["tables"].append(table_dict) # type: ignore current_table = table table_dict = {"name": table.name, "columns": []} if column.pii_type is not None or list_all: table_dict["columns"].append( # type: ignore { "name": column.name, "data_type": column.data_type, "sort_order": column.sort_order, "pii_type": column.pii_type.name if column.pii_type is not None else None, "pii_plugin": column.pii_plugin, } ) if len(table_dict["columns"]) > 0 or list_all: schema_dict["tables"].append(table_dict) # type: ignore if len(schema_dict["tables"]) > 0 or list_all: source_dict["schemata"].append(schema_dict) return source_dict if len(source_dict["schemata"]) > 0 or list_all else {} def output_tabular( catalog: Catalog, source: CatSource, list_all: bool = False, last_run: datetime.datetime = None, include_schema_regex: List[str] = None, exclude_schema_regex: List[str] = None, include_table_regex: List[str] = None, exclude_table_regex: List[str] = None, ) -> List[Any]: tabular = [] for schema, table, column in column_generator( catalog=catalog, source=source, last_run=last_run, exclude_schema_regex_str=exclude_schema_regex, include_schema_regex_str=include_schema_regex, exclude_table_regex_str=exclude_table_regex, include_table_regex_str=include_table_regex, ): if list_all or column.pii_type is not None: tabular.append( [ schema.name, table.name, column.name, column.pii_type.name if column.pii_type is not None else None, column.pii_plugin, ] ) return tabular
8df45952d32f5e0794ee6976480ec64b189c73a0
eb9f655206c43c12b497c667ba56a0d358b6bc3a
/python/helpers/typeshed/stubs/openpyxl/openpyxl/drawing/drawing.pyi
10f90e212a6826adad150d49e547f85a2756db0c
[ "Apache-2.0", "MIT" ]
permissive
JetBrains/intellij-community
2ed226e200ecc17c037dcddd4a006de56cd43941
05dbd4575d01a213f3f4d69aa4968473f2536142
refs/heads/master
2023-09-03T17:06:37.560889
2023-09-03T11:51:00
2023-09-03T12:12:27
2,489,216
16,288
6,635
Apache-2.0
2023-09-12T07:41:58
2011-09-30T13:33:05
null
UTF-8
Python
false
false
615
pyi
drawing.pyi
from typing import Any class Drawing: count: int name: str description: str coordinates: Any left: int top: int resize_proportional: bool rotation: int anchortype: str anchorcol: int anchorrow: int def __init__(self) -> None: ... @property def width(self): ... @width.setter def width(self, w) -> None: ... @property def height(self): ... @height.setter def height(self, h) -> None: ... def set_dimension(self, w: int = ..., h: int = ...) -> None: ... def get_emu_dimensions(self): ... @property def anchor(self): ...
edbca877463e4672b2771b9fdcd6e33d2af7ccfc
e4a879f0e53e5dd76ea1d1625a9db83ee737d4ff
/.circleci/get-commit-range.py
98429ceec7111fd046a78560ec2f5e7e7fd79d2e
[ "BSD-3-Clause" ]
permissive
berkeley-dsep-infra/datahub
d252eed0a126931438d2519400d75c9675dada44
495ca4ed43b8287358f053203236534564a41205
refs/heads/staging
2023-08-16T16:07:21.489331
2023-08-16T04:47:28
2023-08-16T04:47:28
99,443,505
132
158
BSD-3-Clause
2023-09-14T17:46:05
2017-08-05T19:26:32
Python
UTF-8
Python
false
false
2,631
py
get-commit-range.py
#!/usr/bin/env python3 import os import argparse from github import Github import sys def from_pr(project, repo, pr_number): gh = Github() pr = gh.get_repo(f'{project}/{repo}').get_pull(pr_number) return f'{pr.base.sha}...{pr.head.sha}' def from_branch(project, repo, branch_name): """ Return commit_range for a PR from a branch name. CircleCI doesn't give us the PR Number when making a PR from the same repo, rather than a fork. This is terrible. Until this gets fixed, we iterate through all open PRs and find the PR we're operating on. """ gh = Github() prs = gh.get_repo(f'{project}/{repo}').get_pulls(state='all', sort='updated') for pr in prs: if pr.head.ref == branch_name: return f'{pr.base.sha}...{pr.head.sha}' raise ValueError(f'No PR from branch {branch_name} in upstream repo found') def main(): argparser = argparse.ArgumentParser() argparser.add_argument( 'project', default=os.environ.get('CIRCLE_PROJECT_USERNAME'), nargs='?' ) argparser.add_argument( 'repo', default=os.environ.get('CIRCLE_PROJECT_REPONAME'), nargs='?' ) argparser.add_argument( '--pr-number', type=int, nargs='?' ) argparser.add_argument( '--branch-name', nargs='?' ) args = argparser.parse_args() pr_number = None branch_name = None if args.pr_number: pr_number = args.pr_number else: if 'CIRCLE_PR_NUMBER' in os.environ: # When PR is from a fork pr_number = int(os.environ['CIRCLE_PR_NUMBER']) else: if args.branch_name: branch_name = args.branch_name else: if 'CIRCLE_COMPARE_URL' in os.environ: # Post merge, where we must have CIRCLE_COMPARE_URL override CIRCLE_BRANCH if '...' in os.environ['CIRCLE_COMPARE_URL']: print(os.environ['CIRCLE_COMPARE_URL'].split('/')[-1]) return if 'CIRCLE_BRANCH' in os.environ: branch_name = os.environ['CIRCLE_BRANCH'] else: print("Must provide one of --branch-name or --pr-number", file=sys.stderr) sys.exit(1) if pr_number: print(from_pr(args.project, args.repo, pr_number)) elif branch_name: print(from_branch(args.project, args.repo, branch_name)) else: raise ValueError('Neither pr_number nor branch were set') if __name__ == '__main__': main()
f84b62393d16db18ab9f330aee2a3d7e0954d09d
c71b7a8a9dd7bf7c9496b1df2acc1e52a2a913d0
/onadata/apps/logger/migrations/0020_submission_counter_timestamp_as_date.py
659b6b71cfe42fad1aed827efcda494135653953
[ "BSD-2-Clause" ]
permissive
kobotoolbox/kobocat
a5c6fb6a9d3dabe71b5e3c082e4261c4475cbf7f
b8d93d4da649f323af111cf7247206554be7c8b1
refs/heads/main
2023-08-10T00:05:49.384348
2023-07-06T04:47:59
2023-07-06T04:47:59
14,497,749
101
135
BSD-2-Clause
2023-09-13T14:57:13
2013-11-18T16:16:32
Python
UTF-8
Python
false
false
492
py
0020_submission_counter_timestamp_as_date.py
# Generated by Django 2.2.14 on 2021-08-11 14:12 from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('logger', '0019_purge_deleted_instances'), ] operations = [ migrations.AlterField( model_name='submissioncounter', name='timestamp', field=models.DateField(), ), ]
d44e80fb6bfb7fdd45bbebfc7eb44b266d0689b2
9803232b04daa00eb4038be338b833907fd1625f
/library/shared/content_providers/content_detectors/vindictus.py
748fbcdfd29bdbd7614fac1b47226e92eed41bdb
[ "MIT" ]
permissive
REDxEYE/SourceIO
a0ff3cff37504afdb906e4ee20c1077a8daf2912
85661fe057cef1ad2a779a9d48e810ea214f4f07
refs/heads/master
2023-08-08T18:35:28.771447
2023-08-07T22:26:59
2023-08-07T22:26:59
170,197,673
409
53
MIT
2023-08-23T18:40:38
2019-02-11T20:33:55
Python
UTF-8
Python
false
false
883
py
vindictus.py
from pathlib import Path from typing import Dict from .....library.utils.path_utilities import backwalk_file_resolver from ..content_provider_base import ContentProviderBase from ..hfs_provider import HFS1ContentProvider, HFS2ContentProvider from .source1_common import Source1Common class VindictusDetector(Source1Common): @classmethod def scan(cls, path: Path) -> Dict[str, ContentProviderBase]: game_root = None game_exe = backwalk_file_resolver(path, 'Vindictus.exe') if game_exe is not None: game_root = game_exe.parent if game_root is None: return {} hfs_provider = HFS2ContentProvider(game_root / 'hfs') content_providers = {'hfs': hfs_provider} for file in game_root.glob('*.hfs'): content_providers[file.stem] = HFS1ContentProvider(file) return content_providers
5892ece4c848f97d150d7fac3d97f94a5dee597e
b10e501b17337b685a2fef01f63d91c427bf62d6
/raytracing/examples/twoPhotonDescannedDetector.py
a47343051327e0a23b67fc33a9a4aaca3985b4dd
[ "MIT" ]
permissive
DCC-Lab/RayTracing
5ebd1a982e390fab5397a6309aa832efe9c62d20
bbf715b9b9dc8317d3d5d5bd550a726f6908b3bb
refs/heads/master
2023-04-08T16:41:59.211239
2023-02-22T06:02:02
2023-02-22T06:02:02
166,473,288
185
38
MIT
2023-08-14T11:39:05
2019-01-18T21:13:35
Python
UTF-8
Python
false
false
2,193
py
twoPhotonDescannedDetector.py
import envexamples from raytracing import * import matplotlib.pyplot as plt """ The emitted light in some optical systems, like two-photon descanned detector, is scattered in all directions. The size of the diffused light dictates which lenses and detector size to choose. Therefore, it is of great importance to find a well-sized detector that fits the optical system. """ # Defines the path. d1 and d2 are the diameter of the lenses, fl1 and fl2 are the focal lengths and d3 is the diameter of the aperture. def imagingPath(fl1=10, d1=10, fl2=10, d2=10, d3=10, title=""): path = ImagingPath() path.label=title path.append(System4f(f1=fl1, diameter1=d1, f2=fl2, diameter2=d2)) path.append(Aperture(diameter=d3, label='Detector')) return path def exampleCode(): nRays = 100000 minHeight=-0.5 maxHeight=0.5 inputRays = RandomLambertianRays(yMax=maxHeight, yMin=minHeight, maxCount=nRays) # Three paths with different sets of lens and aperture. ### path1 = imagingPath(fl1=75, fl2=75, d1=50, d2=75, d3=0.5, title="") outputRays1 = path1.traceManyThrough(inputRays, progress=False) efficiency1 = 100*outputRays1.count/inputRays.count path1.display(limitObjectToFieldOfView=False, onlyPrincipalAndAxialRays=True) outputRays1.display("Output profile {0:.0f}% efficiency".format(efficiency1), showTheta=False) print(efficiency1) ### path2 = imagingPath(fl1=50, fl2=50, d1=25, d2=50, d3=0.5, title="") outputRays2 = path2.traceManyThrough(inputRays, progress=False) efficiency2 = 100*outputRays2.count/inputRays.count path2.display(limitObjectToFieldOfView=False, onlyPrincipalAndAxialRays=True) outputRays2.display("Output profile {0:.0f}% efficiency".format(efficiency2), showTheta=False) print(efficiency2) ### path3 = imagingPath(fl1=50, fl2=50, d1=25, d2=50, d3=1, title="") outputRays3 = path3.traceManyThrough(inputRays, progress=False) efficiency3 = 100*outputRays3.count/inputRays.count path3.display(limitObjectToFieldOfView=False, onlyPrincipalAndAxialRays=True) outputRays3.display("Output profile {0:.0f}% efficiency".format(efficiency3), showTheta=False) print(efficiency3) if __name__ == "__main__": exampleCode()
b9d23a71864836238eb2f9c23801ee18fbf084f8
9ffbe6414664a107b00e7d5fc4ac70834219b170
/test/acceptance/features/steps/nodejs_application.py
dddea2d732361767b7fac8b7dbda2563c44b3607
[ "Apache-2.0" ]
permissive
redhat-developer/service-binding-operator
7371d91b04783b0332c7b8017ecd574192ceb73c
e66264b9df029f52cd1bdf9be76df6587c03e44f
refs/heads/master
2023-09-03T15:33:03.057544
2023-08-25T04:40:56
2023-08-25T04:40:56
191,504,334
116
96
Apache-2.0
2023-09-14T10:59:21
2019-06-12T05:36:31
Go
UTF-8
Python
false
false
3,071
py
nodejs_application.py
from app import App import re import requests import time import polling2 class NodeJSApp(App): pod_name_pattern = "{name}.*$(?<!-build)" def __init__(self, name, namespace, nodejs_app_image="quay.io/pmacik/nodejs-rest-http-crud"): App.__init__(self, name, namespace, nodejs_app_image, "8080") def get_response_from_api(self, endpoint, interval=10, timeout=300): resp = polling2.poll(lambda: requests.get(url=f"http://{self.route_url}{endpoint}"), check_success=lambda r: r.status_code in [200], step=interval, timeout=timeout, ignore_exceptions=(requests.exceptions.ConnectionError,)) return resp.text def get_observed_generation(self): return self.openshift.get_resource_info_by_jsonpath("deployment", self.name, self.namespace, "{.status.observedGeneration}") def get_running_pod_name(self, interval=5, timeout=300): start = 0 while ((start + interval) <= timeout): pod_list = self.openshift.get_pod_lst(self.namespace) for pod in pod_list: if re.fullmatch(self.get_pod_name_pattern(), pod) is not None: if self.openshift.get_pod_status(pod, self.namespace) == "Running": return pod time.sleep(interval) start += interval return None def get_redeployed_pod_name(self, old_pod_name, interval=5, timeout=300): start = 0 while ((start + interval) <= timeout): pod_list = self.openshift.get_pod_lst(self.namespace) for pod in pod_list: if pod != old_pod_name and re.fullmatch(self.get_pod_name_pattern(), pod) is not None: if self.openshift.get_pod_status(pod, self.namespace) == "Running": return pod time.sleep(interval) start += interval return None def get_pod_name_pattern(self): return self.pod_name_pattern.format(name=self.name) def is_redeployed(self, old_generation, interval=5, timeout=300): start = 0 while ((start + interval) <= timeout): current_generation = self.get_generation() pod_list = self.openshift.get_pod_lst(self.namespace) for pod in pod_list: if (current_generation > old_generation) and (re.fullmatch(self.get_pod_name_pattern(), pod) is not None): if self.openshift.get_pod_status(pod, self.namespace) == "Running": return pod time.sleep(interval) start += interval return None def get_generation(self): return self.openshift.get_resource_info_by_jsonpath("deployment", self.name, self.namespace, "{.metadata.generation}") def get_deployment_with_intermediate_secret(self, intermediate_secret_name): return self.openshift.get_deployment_with_intermediate_secret_of_given_pattern( intermediate_secret_name, self.name, self.namespace, wait=True, timeout=120)
d4acd5071dd23c6f1cf3bfc7aea81c46a173a738
d1c2d00078520cd556f60b7213c27856f8b3460d
/sdks/python/apache_beam/typehints/opcodes.py
5a35b56b9321e78e29f28e461d6970186173dff2
[ "BSD-3-Clause", "MIT", "LicenseRef-scancode-protobuf", "Apache-2.0", "Python-2.0" ]
permissive
apache/beam
ed11b9e043465c720659eac20ac71b5b171bfa88
6d5048e05087ea54abc889ce402ae2a0abb9252b
refs/heads/master
2023-09-04T07:41:07.002653
2023-09-01T23:01:05
2023-09-01T23:01:05
50,904,245
7,061
4,522
Apache-2.0
2023-09-14T21:43:38
2016-02-02T08:00:06
Java
UTF-8
Python
false
false
15,736
py
opcodes.py
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Defines the actions various bytecodes have on the frame. Each function here corresponds to a bytecode documented in https://docs.python.org/2/library/dis.html or https://docs.python.org/3/library/dis.html. The first argument is a (mutable) FrameState object, the second the integer opcode argument. Bytecodes with more complicated behavior (e.g. modifying the program counter) are handled inline rather than here. For internal use only; no backwards-compatibility guarantees. """ # pytype: skip-file import inspect import logging import sys import types from functools import reduce from apache_beam.typehints import row_type from apache_beam.typehints import typehints from apache_beam.typehints.trivial_inference import BoundMethod from apache_beam.typehints.trivial_inference import Const from apache_beam.typehints.trivial_inference import element_type from apache_beam.typehints.trivial_inference import key_value_types from apache_beam.typehints.trivial_inference import union from apache_beam.typehints.typehints import Any from apache_beam.typehints.typehints import Dict from apache_beam.typehints.typehints import Iterable from apache_beam.typehints.typehints import List from apache_beam.typehints.typehints import Set from apache_beam.typehints.typehints import Tuple from apache_beam.typehints.typehints import Union # This is missing in the builtin types module. str.upper is arbitrary, any # method on a C-implemented type will do. _MethodDescriptorType = type(str.upper) def pop_one(state, unused_arg): del state.stack[-1:] def pop_two(state, unused_arg): del state.stack[-2:] def pop_three(state, unused_arg): del state.stack[-3:] def push_value(v): def pusher(state, unused_arg): state.stack.append(v) return pusher def nop(unused_state, unused_arg): pass def pop_top(state, unused_arg): state.stack.pop() def rot_n(state, n): state.stack[-n:] = [state.stack[-1]] + state.stack[-n:-1] def rot_two(state, unused_arg): rot_n(state, 2) def rot_three(state, unused_arg): rot_n(state, 3) def rot_four(state, unused_arg): rot_n(state, 4) def dup_top(state, unused_arg): state.stack.append(state.stack[-1]) def unary(state, unused_arg): state.stack[-1] = Const.unwrap(state.stack[-1]) unary_positive = unary_negative = unary_invert = unary def unary_not(state, unused_arg): state.stack[-1] = bool def unary_convert(state, unused_arg): state.stack[-1] = str def get_iter(state, unused_arg): state.stack.append(Iterable[element_type(state.stack.pop())]) def symmetric_binary_op(state, unused_arg): # TODO(robertwb): This may not be entirely correct... b, a = Const.unwrap(state.stack.pop()), Const.unwrap(state.stack.pop()) if a == b: state.stack.append(a) elif type(a) == type(b) and isinstance(a, typehints.SequenceTypeConstraint): state.stack.append(type(a)(union(element_type(a), element_type(b)))) else: state.stack.append(Any) # Except for int ** -int binary_power = inplace_power = symmetric_binary_op binary_multiply = inplace_multiply = symmetric_binary_op binary_divide = inplace_divide = symmetric_binary_op binary_floor_divide = inplace_floor_divide = symmetric_binary_op def binary_true_divide(state, unused_arg): u = union(state.stack.pop(), state.stack.pop) if u == int: state.stack.append(float) else: state.stack.append(u) inplace_true_divide = binary_true_divide binary_modulo = inplace_modulo = symmetric_binary_op # TODO(robertwb): Tuple add. binary_add = inplace_add = symmetric_binary_op binary_subtract = inplace_subtract = symmetric_binary_op def binary_subscr(state, unused_arg): index = state.stack.pop() base = Const.unwrap(state.stack.pop()) if base is str: out = base elif (isinstance(index, Const) and isinstance(index.value, int) and isinstance(base, typehints.IndexableTypeConstraint)): try: out = base._constraint_for_index(index.value) except IndexError: out = element_type(base) elif index == slice and isinstance(base, typehints.IndexableTypeConstraint): out = base else: out = element_type(base) state.stack.append(out) # As far as types are concerned. binary_lshift = inplace_lshift = binary_rshift = inplace_rshift = pop_top binary_and = inplace_and = symmetric_binary_op binary_xor = inplace_xor = symmetric_binary_op binary_or = inplace_or = symmetric_binary_op binary_op = symmetric_binary_op def store_subscr(unused_state, unused_args): # TODO(robertwb): Update element/value type of iterable/dict. pass print_item = pop_top print_newline = nop def list_append(state, arg): new_element_type = Const.unwrap(state.stack.pop()) state.stack[-arg] = List[union( element_type(state.stack[-arg]), new_element_type)] def set_add(state, arg): new_element_type = Const.unwrap(state.stack.pop()) state.stack[-arg] = Set[union( element_type(state.stack[-arg]), new_element_type)] def map_add(state, arg): if sys.version_info >= (3, 8): # PEP 572 The MAP_ADD expects the value as the first element in the stack # and the key as the second element. new_value_type = Const.unwrap(state.stack.pop()) new_key_type = Const.unwrap(state.stack.pop()) else: new_key_type = Const.unwrap(state.stack.pop()) new_value_type = Const.unwrap(state.stack.pop()) state.stack[-arg] = Dict[Union[state.stack[-arg].key_type, new_key_type], Union[state.stack[-arg].value_type, new_value_type]] load_locals = push_value(Dict[str, Any]) exec_stmt = pop_three build_class = pop_three def unpack_sequence(state, arg): t = state.stack.pop() if isinstance(t, Const): try: unpacked = [Const(ti) for ti in t.value] if len(unpacked) != arg: unpacked = [Any] * arg except TypeError: unpacked = [Any] * arg elif (isinstance(t, typehints.TupleHint.TupleConstraint) and len(t.tuple_types) == arg): unpacked = list(t.tuple_types) else: unpacked = [element_type(t)] * arg state.stack += reversed(unpacked) def dup_topx(state, arg): state.stack += state[-arg:] store_attr = pop_two delete_attr = pop_top store_global = pop_top delete_global = nop def load_const(state, arg): state.stack.append(state.const_type(arg)) load_name = push_value(Any) def build_tuple(state, arg): if arg == 0: state.stack.append(Tuple[()]) else: state.stack[-arg:] = [Tuple[[Const.unwrap(t) for t in state.stack[-arg:]]]] def build_list(state, arg): if arg == 0: state.stack.append(List[Union[()]]) else: state.stack[-arg:] = [List[reduce(union, state.stack[-arg:], Union[()])]] def build_set(state, arg): if arg == 0: state.stack.append(Set[Union[()]]) else: state.stack[-arg:] = [Set[reduce(union, state.stack[-arg:], Union[()])]] # A Dict[Union[], Union[]] is the type of an empty dict. def build_map(state, arg): if arg == 0: state.stack.append(Dict[Union[()], Union[()]]) else: state.stack[-2 * arg:] = [ Dict[reduce(union, state.stack[-2 * arg::2], Union[()]), reduce(union, state.stack[-2 * arg + 1::2], Union[()])] ] def build_const_key_map(state, arg): key_tuple = state.stack.pop() if isinstance(key_tuple, typehints.TupleHint.TupleConstraint): key_types = key_tuple.tuple_types elif isinstance(key_tuple, Const): key_types = [Const(v) for v in key_tuple.value] else: key_types = [Any] state.stack[-arg:] = [ Dict[reduce(union, key_types, Union[()]), reduce(union, state.stack[-arg:], Union[()])] ] def list_to_tuple(state, arg): base = state.stack.pop() state.stack.append(Tuple[element_type(base), ...]) def list_extend(state, arg): tail = state.stack.pop() base = state.stack[-arg] state.stack[-arg] = List[union(element_type(base), element_type(tail))] def set_update(state, arg): other = state.stack.pop() base = state.stack[-arg] state.stack[-arg] = Set[union(element_type(base), element_type(other))] def dict_update(state, arg): other = state.stack.pop() base = state.stack[-arg] if isinstance(base, typehints.Dict.DictConstraint): base_key_type = base.key_type base_value_type = base.value_type else: base_key_type = Any base_value_type = Any if isinstance(other, typehints.Dict.DictConstraint): other_key_type = other.key_type other_value_type = other.value_type else: other_key_type, other_value_type = key_value_types(element_type(other)) state.stack[-arg] = Dict[union(base_key_type, other_key_type), union(base_value_type, other_value_type)] dict_merge = dict_update def load_attr(state, arg): """Replaces the top of the stack, TOS, with getattr(TOS, co_names[arg]) Will replace with Any for builtin methods, but these don't have bytecode in CPython so that's okay. """ o = state.stack.pop() name = state.get_name(arg) state.stack.append(_getattr(o, name)) def _getattr(o, name): if isinstance(o, Const) and hasattr(o.value, name): return Const(getattr(o.value, name)) elif (inspect.isclass(o) and isinstance(getattr(o, name, None), (types.MethodType, types.FunctionType))): # TODO(luke-zhu): Support other callable objects func = getattr(o, name) # Python 3 has no unbound methods return Const(BoundMethod(func, o)) elif isinstance(o, row_type.RowTypeConstraint): return o.get_type_for(name) else: return Any def load_method(state, arg): """Like load_attr. Replaces TOS object with method and TOS.""" o = state.stack.pop() name = state.get_name(arg) if isinstance(o, Const): method = Const(getattr(o.value, name)) elif isinstance(o, typehints.AnyTypeConstraint): method = typehints.Any elif hasattr(o, name): attr = getattr(o, name) if isinstance(attr, _MethodDescriptorType): # Skip builtins since they don't disassemble. method = typehints.Any else: method = Const(BoundMethod(attr, o)) else: method = typehints.Any state.stack.append(method) def compare_op(state, unused_arg): # Could really be anything... state.stack[-2:] = [bool] is_op = compare_op contains_op = compare_op def import_name(state, unused_arg): state.stack[-2:] = [Any] import_from = push_value(Any) def load_global(state, arg): if (sys.version_info.major, sys.version_info.minor) >= (3, 11): arg = arg >> 1 state.stack.append(state.get_global(arg)) store_map = pop_two def load_fast(state, arg): state.stack.append(state.vars[arg]) def store_fast(state, arg): state.vars[arg] = state.stack.pop() def delete_fast(state, arg): state.vars[arg] = Any # really an error # bpo-43683 Adds GEN_START in Python 3.10, but removed in Python 3.11 # https://github.com/python/cpython/pull/25138 def gen_start(state, arg): assert len(state.stack) == 0 def load_closure(state, arg): # The arg is no longer offset by len(covar_names) as of 3.11 # See https://docs.python.org/3/library/dis.html#opcode-LOAD_CLOSURE if (sys.version_info.major, sys.version_info.minor) >= (3, 11): arg -= len(state.co.co_varnames) state.stack.append(state.get_closure(arg)) def load_deref(state, arg): # The arg is no longer offset by len(covar_names) as of 3.11 # See https://docs.python.org/3/library/dis.html#opcode-LOAD_DEREF if (sys.version_info.major, sys.version_info.minor) >= (3, 11): arg -= len(state.co.co_varnames) state.stack.append(state.closure_type(arg)) def make_function(state, arg): """Creates a function with the arguments at the top of the stack. """ # TODO(luke-zhu): Handle default argument types globals = state.f.__globals__ # Inherits globals from the current frame tos = state.stack[-1].value # In Python 3.11 lambdas no longer have fully qualified names on the stack, # so we check for this case (AKA the code is top of stack.) if isinstance(tos, types.CodeType): func_name = None func_code = tos pop_count = 1 is_lambda = True else: func_name = tos func_code = state.stack[-2].value pop_count = 2 is_lambda = False closure = None # arg contains flags, with corresponding stack values if positive. # https://docs.python.org/3.6/library/dis.html#opcode-MAKE_FUNCTION pop_count += bin(arg).count('1') if arg & 0x08: # Convert types in Tuple constraint to a tuple of CPython cells. # https://stackoverflow.com/a/44670295 if is_lambda: closureTuplePos = -2 else: closureTuplePos = -3 closure = tuple((lambda _: lambda: _)(t).__closure__[0] for t in state.stack[closureTuplePos].tuple_types) func = types.FunctionType(func_code, globals, name=func_name, closure=closure) assert pop_count <= len(state.stack) state.stack[-pop_count:] = [Const(func)] def make_closure(state, arg): state.stack[-arg - 2:] = [Any] # a callable def build_slice(state, arg): state.stack[-arg:] = [slice] # a slice object def _unpack_lists(state, arg): """Extract inner types of Lists and Tuples. Pops arg count items from the stack, concatenates their inner types into 1 list, and returns that list. Example: if stack[-arg:] == [[i1, i2], [i3]], the output is [i1, i2, i3] """ types = [] for i in range(arg, 0, -1): type_constraint = state.stack[-i] if isinstance(type_constraint, typehints.IndexableTypeConstraint): types.extend(type_constraint._inner_types()) elif type_constraint == Union[()]: continue else: logging.debug('Unhandled type_constraint: %r', type_constraint) types.append(typehints.Any) state.stack[-arg:] = [] return types def build_list_unpack(state, arg): """Joins arg count iterables from the stack into a single list.""" state.stack.append(List[Union[_unpack_lists(state, arg)]]) def build_set_unpack(state, arg): """Joins arg count iterables from the stack into a single set.""" state.stack.append(Set[Union[_unpack_lists(state, arg)]]) def build_tuple_unpack(state, arg): """Joins arg count iterables from the stack into a single tuple.""" state.stack.append(Tuple[Union[_unpack_lists(state, arg)], ...]) def build_tuple_unpack_with_call(state, arg): """Same as build_tuple_unpack, with an extra fn argument at the bottom of the stack, which remains untouched.""" build_tuple_unpack(state, arg) def build_map_unpack(state, arg): """Joins arg count maps from the stack into a single dict.""" key_types = [] value_types = [] for _ in range(arg): type_constraint = state.stack.pop() if isinstance(type_constraint, typehints.Dict.DictConstraint): key_types.append(type_constraint.key_type) value_types.append(type_constraint.value_type) else: key_type, value_type = key_value_types(element_type(type_constraint)) key_types.append(key_type) value_types.append(value_type) state.stack.append(Dict[Union[key_types], Union[value_types]])
353473d328d02fffc1e6938b5748636456b74185
8eccea9f715a2a0ce602f1944ed3e812adcacb4d
/tests/api/v2/handlers/test_obfuscators_api.py
fcc3ac9e5410f98f9f00d59ce42cb5b554543298
[ "Apache-2.0" ]
permissive
mitre/caldera
c466cde25bb0191880984cfdf3af84efc8a7c9f4
3140411d4b96d8d5607b2b50476f7bf3d506de00
refs/heads/master
2023-08-23T02:14:23.360314
2023-08-21T18:55:29
2023-08-21T18:55:29
112,409,981
4,685
1,046
Apache-2.0
2023-09-13T16:36:05
2017-11-29T01:25:10
Python
UTF-8
Python
false
false
1,825
py
test_obfuscators_api.py
import pytest from http import HTTPStatus from app.objects.c_obfuscator import Obfuscator from app.utility.base_service import BaseService @pytest.fixture def test_obfuscator(event_loop, api_v2_client): obfuscator = Obfuscator(name='test', description='a test obfuscator', module='testmodule') event_loop.run_until_complete(BaseService.get_service('data_svc').store(obfuscator)) return obfuscator class TestObfuscatorsApi: async def test_get_obfuscators(self, api_v2_client, api_cookies, test_obfuscator): resp = await api_v2_client.get('/api/v2/obfuscators', cookies=api_cookies) obfuscators_list = await resp.json() assert len(obfuscators_list) == 1 obfuscator_dict = obfuscators_list[0] assert obfuscator_dict == test_obfuscator.display_schema.dump(test_obfuscator) async def test_unauthorized_get_obfuscators(self, api_v2_client, test_obfuscator): resp = await api_v2_client.get('/api/v2/obfuscators') assert resp.status == HTTPStatus.UNAUTHORIZED async def test_get_obfuscator_by_id(self, api_v2_client, api_cookies, test_obfuscator): resp = await api_v2_client.get(f'/api/v2/obfuscators/{test_obfuscator.name}', cookies=api_cookies) obfuscator_dict = await resp.json() assert obfuscator_dict == test_obfuscator.display_schema.dump(test_obfuscator) async def test_unauthorized_get_obfuscator_by_id(self, api_v2_client, test_obfuscator): resp = await api_v2_client.get(f'/api/v2/obfuscators/{test_obfuscator.name}') assert resp.status == HTTPStatus.UNAUTHORIZED async def test_get_nonexistent_obfuscator_by_id(self, api_v2_client, api_cookies): resp = await api_v2_client.get('/api/v2/obfuscators/999', cookies=api_cookies) assert resp.status == HTTPStatus.NOT_FOUND
8bdd28c02d777d612f4669fad8d7c46f794150a7
bed34365a9dab825fd9f4a4ff1b0863f441266ac
/neutron/conf/services/metering_agent.py
74d7d56532fc1107542e498b1d4a2fb6cc254de2
[ "Apache-2.0" ]
permissive
openstack/neutron
0913ee3cd69d5bdb9c10aa084d4e1803abee320c
dde31aae392b80341f6440eb38db1583563d7d1f
refs/heads/master
2023-08-31T13:09:41.831598
2023-08-31T11:37:30
2023-08-31T11:37:30
2,400,289
1,174
1,325
Apache-2.0
2022-06-29T08:00:05
2011-09-16T16:04:08
Python
UTF-8
Python
false
false
1,666
py
metering_agent.py
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ metering_agent_opts = [ cfg.StrOpt('driver', default='neutron.services.metering.drivers.noop.' 'noop_driver.NoopMeteringDriver', help=_("Metering driver")), cfg.IntOpt('measure_interval', default=30, help=_("Interval between two metering measures")), cfg.IntOpt('report_interval', default=300, help=_("Interval between two metering reports")), cfg.BoolOpt('granular_traffic_data', default=False, help=_("Defines if the metering agent driver should present " "traffic data in a granular fashion, instead of " "grouping all of the traffic data for all projects and " "routers where the labels were assigned to. The " "default value is `False` for backward compatibility."), ), ] def register_metering_agent_opts(cfg=cfg.CONF): cfg.register_opts(metering_agent_opts)
cfd1432fd979ada620a9d77bc492f635ab429620
09a6d8dbad5b92f93791948b5bf9b75f5cb2e5ce
/tests/qchem/of_tests/test_dipole_of.py
22b5a201f63503d27b2a4845297b3a70655dec36
[ "Apache-2.0" ]
permissive
PennyLaneAI/pennylane
458efd5d9457e90ada31ca2ef0fb6bb96a24e9a7
0843183ff15a013c2622af5e61fea431d18076d3
refs/heads/master
2023-09-03T17:00:43.105784
2023-09-01T16:15:07
2023-09-01T16:15:07
129,936,360
1,431
410
Apache-2.0
2023-09-14T21:30:56
2018-04-17T16:45:42
Python
UTF-8
Python
false
false
8,970
py
test_dipole_of.py
# Copyright 2018-2023 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Unit tests for the ``dipole_of`` function. """ # pylint: disable=too-many-arguments import numpy as np import pytest import pennylane as qml h2 = ["H", "H"] x_h2 = np.array([0.0, 0.0, -0.661, 0.0, 0.0, 0.661]) coeffs_h2 = [] coeffs_h2.append([0.0]) coeffs_h2.append([0.0]) coeffs_h2.append([0.45445016, 0.45445016, 0.45445016, 0.45445016]) ops_h2 = [] ops_h2.append([qml.Identity(wires=[0])]) ops_h2.append([qml.Identity(wires=[0])]) ops_h2.append( [ qml.PauliY(wires=[0]) @ qml.PauliZ(wires=[1]) @ qml.PauliY(wires=[2]), qml.PauliX(wires=[0]) @ qml.PauliZ(wires=[1]) @ qml.PauliX(wires=[2]), qml.PauliY(wires=[1]) @ qml.PauliZ(wires=[2]) @ qml.PauliY(wires=[3]), qml.PauliX(wires=[1]) @ qml.PauliZ(wires=[2]) @ qml.PauliX(wires=[3]), ] ) h3p = ["H", "H", "H"] x_h3p = np.array([0.028, 0.054, 0.0, 0.986, 1.610, 0.0, 1.855, 0.002, 0.0]) coeffs_h3p = [] coeffs_h3p.append( [ 0.47811232, 0.47811232, -0.39136385, -0.39136385, -0.39136385, -0.39136385, 0.26611147, 0.26611147, 0.26611147, 0.26611147, 0.71447791, 0.71447791, -0.11734959, -0.11734959, -0.11734959, -0.11734959, 0.24190978, 0.24190978, ] ) coeffs_h3p.append( [ 0.27769368, 0.27769368, 0.26614699, 0.26614699, 0.26614699, 0.26614699, 0.39131162, 0.39131162, 0.39131162, 0.39131162, 0.16019825, 0.16019825, -0.23616713, -0.23616713, -0.23616713, -0.23616713, 0.39510807, 0.39510807, ] ) coeffs_h3p.append([0.0]) ops_h3p = [] ops_h3p.append( [ qml.PauliZ(wires=[0]), qml.PauliZ(wires=[1]), qml.PauliY(wires=[0]) @ qml.PauliZ(wires=[1]) @ qml.PauliY(wires=[2]), qml.PauliX(wires=[0]) @ qml.PauliZ(wires=[1]) @ qml.PauliX(wires=[2]), qml.PauliY(wires=[1]) @ qml.PauliZ(wires=[2]) @ qml.PauliY(wires=[3]), qml.PauliX(wires=[1]) @ qml.PauliZ(wires=[2]) @ qml.PauliX(wires=[3]), qml.PauliY(wires=[0]) @ qml.PauliZ(wires=[1]) @ qml.PauliZ(wires=[2]) @ qml.PauliZ(wires=[3]) @ qml.PauliY(wires=[4]), qml.PauliX(wires=[0]) @ qml.PauliZ(wires=[1]) @ qml.PauliZ(wires=[2]) @ qml.PauliZ(wires=[3]) @ qml.PauliX(wires=[4]), qml.PauliY(wires=[1]) @ qml.PauliZ(wires=[2]) @ qml.PauliZ(wires=[3]) @ qml.PauliZ(wires=[4]) @ qml.PauliY(wires=[5]), qml.PauliX(wires=[1]) @ qml.PauliZ(wires=[2]) @ qml.PauliZ(wires=[3]) @ qml.PauliZ(wires=[4]) @ qml.PauliX(wires=[5]), qml.PauliZ(wires=[2]), qml.PauliZ(wires=[3]), qml.PauliY(wires=[2]) @ qml.PauliZ(wires=[3]) @ qml.PauliY(wires=[4]), qml.PauliX(wires=[2]) @ qml.PauliZ(wires=[3]) @ qml.PauliX(wires=[4]), qml.PauliY(wires=[3]) @ qml.PauliZ(wires=[4]) @ qml.PauliY(wires=[5]), qml.PauliX(wires=[3]) @ qml.PauliZ(wires=[4]) @ qml.PauliX(wires=[5]), qml.PauliZ(wires=[4]), qml.PauliZ(wires=[5]), ] ) ops_h3p.append( [ qml.PauliZ(wires=[0]), qml.PauliZ(wires=[1]), qml.PauliY(wires=[0]) @ qml.PauliZ(wires=[1]) @ qml.PauliY(wires=[2]), qml.PauliX(wires=[0]) @ qml.PauliZ(wires=[1]) @ qml.PauliX(wires=[2]), qml.PauliY(wires=[1]) @ qml.PauliZ(wires=[2]) @ qml.PauliY(wires=[3]), qml.PauliX(wires=[1]) @ qml.PauliZ(wires=[2]) @ qml.PauliX(wires=[3]), qml.PauliY(wires=[0]) @ qml.PauliZ(wires=[1]) @ qml.PauliZ(wires=[2]) @ qml.PauliZ(wires=[3]) @ qml.PauliY(wires=[4]), qml.PauliX(wires=[0]) @ qml.PauliZ(wires=[1]) @ qml.PauliZ(wires=[2]) @ qml.PauliZ(wires=[3]) @ qml.PauliX(wires=[4]), qml.PauliY(wires=[1]) @ qml.PauliZ(wires=[2]) @ qml.PauliZ(wires=[3]) @ qml.PauliZ(wires=[4]) @ qml.PauliY(wires=[5]), qml.PauliX(wires=[1]) @ qml.PauliZ(wires=[2]) @ qml.PauliZ(wires=[3]) @ qml.PauliZ(wires=[4]) @ qml.PauliX(wires=[5]), qml.PauliZ(wires=[2]), qml.PauliZ(wires=[3]), qml.PauliY(wires=[2]) @ qml.PauliZ(wires=[3]) @ qml.PauliY(wires=[4]), qml.PauliX(wires=[2]) @ qml.PauliZ(wires=[3]) @ qml.PauliX(wires=[4]), qml.PauliY(wires=[3]) @ qml.PauliZ(wires=[4]) @ qml.PauliY(wires=[5]), qml.PauliX(wires=[3]) @ qml.PauliZ(wires=[4]) @ qml.PauliX(wires=[5]), qml.PauliZ(wires=[4]), qml.PauliZ(wires=[5]), ] ) ops_h3p.append([qml.Identity(wires=[0])]) h2o = ["H", "H", "O"] x_h2o = np.array([0.0, 1.431, -0.887, 0.0, -1.431, -0.887, 0.0, 0.0, 0.222]) coeffs_h2o = [] coeffs_h2o.append([-0.03700797, 0.03700797, 0.03700797, -0.03700797]) coeffs_h2o.append([0.0]) coeffs_h2o.append([0.28530461, 0.111, 0.111, -0.3710174, -0.3710174]) ops_h2o = [] ops_h2o.append( [ qml.PauliX(wires=[0]) @ qml.PauliY(wires=[1]) @ qml.PauliY(wires=[2]), qml.PauliY(wires=[0]) @ qml.PauliY(wires=[1]) @ qml.PauliX(wires=[2]), qml.PauliZ(wires=[0]) @ qml.PauliX(wires=[1]) @ qml.PauliZ(wires=[3]), qml.PauliX(wires=[1]) @ qml.PauliZ(wires=[2]), ] ) ops_h2o.append([qml.Identity(wires=[0])]) ops_h2o.append( [ qml.Identity(wires=[0]), qml.PauliZ(wires=[0]), qml.PauliZ(wires=[0]) @ qml.PauliZ(wires=[1]), qml.PauliZ(wires=[2]), qml.PauliZ(wires=[1]) @ qml.PauliZ(wires=[2]) @ qml.PauliZ(wires=[3]), ] ) @pytest.mark.parametrize( ("symbols", "coords", "charge", "core", "active", "mapping", "coeffs", "ops"), [ (h2, x_h2, 0, None, None, "jordan_wigner", coeffs_h2, ops_h2), (h3p, x_h3p, 1, None, None, "jordan_wigner", coeffs_h3p, ops_h3p), (h2o, x_h2o, 0, range(4), [4, 5], "bravyi_kitaev", coeffs_h2o, ops_h2o), ], ) @pytest.mark.usefixtures("skip_if_no_openfermion_support") def test_dipole_obs(symbols, coords, charge, core, active, mapping, coeffs, ops, tol, tmpdir): r"""Tests the correctness of the dipole observable computed by the ``dipole`` function.""" dip = qml.qchem.dipole_of( symbols, coords, charge=charge, core=core, active=active, mapping=mapping, outpath=tmpdir.strpath, ) assert len(dip) == len(ops) for i, _dip in enumerate(dip): calc_coeffs = np.array(_dip.coeffs) exp_coeffs = np.array(coeffs[i]) assert np.allclose(calc_coeffs, exp_coeffs, **tol) assert all(isinstance(o1, o2.__class__) for o1, o2 in zip(_dip.ops, ops[i])) assert all(o1.wires == o2.wires for o1, o2 in zip(_dip.ops, ops[i])) @pytest.mark.parametrize( ("symbols", "coords", "charge", "hf_state", "exp_dipole"), [ (h2, x_h2, 0, np.array([1, 1, 0, 0]), np.array([0.0, 0.0, 0.0])), (h3p, x_h3p, 1, np.array([1, 1, 0, 0, 0, 0]), np.array([0.95655073, 0.55522528, 0.0])), ], ) @pytest.mark.usefixtures("skip_if_no_openfermion_support") def test_dipole(symbols, coords, charge, hf_state, exp_dipole, tol, tmpdir): r"""Tests the correctness of the computed dipole moment.""" n_qubits = len(hf_state) dev = qml.device("default.qubit", wires=n_qubits) dip_obs = qml.qchem.dipole_of(symbols, coords, charge=charge, outpath=tmpdir.strpath) def circuit(params, wires): # pylint: disable=unused-argument qml.BasisState(hf_state, wires=wires) with pytest.warns(UserWarning, match="is deprecated,"): dipole = np.array([qml.ExpvalCost(circuit, obs, dev)(None) for obs in dip_obs]) assert np.allclose(dipole, exp_dipole, **tol) @pytest.mark.parametrize( ("symbols", "coords", "mult", "msg_match"), [ (["H", "H"], x_h2, 2, "this functionality is constrained to Hartree-Fock states"), (["H", "Ca"], x_h2, 1, "only first- or second-row elements of the periodic table"), ], ) @pytest.mark.usefixtures("skip_if_no_openfermion_support") def test_exceptions_dipole(symbols, coords, mult, msg_match): """Test exceptions of the ``dipole`` function.""" with pytest.raises(ValueError, match=msg_match): qml.qchem.dipole_of(symbols, coords, mult=mult)
c0958b0820353e9f9aff5ea0b27e3f40a2612897
fac6bd9d5ca6b068e9ec5ce8a6e703c9261476a3
/hockey/player.py
de203b93b1a22551f90a8e18a7ef70a813c03e47
[ "MIT" ]
permissive
TrustyJAID/Trusty-cogs
b72b48a7da37d6985cfc7924dae3a2ff4fcbb39f
5b4c87d461065ef55af4f012a89501925a34a8d5
refs/heads/master
2023-09-04T22:55:34.235209
2023-09-04T22:52:25
2023-09-04T22:52:25
160,604,179
170
252
MIT
2023-06-10T17:01:00
2018-12-06T02:02:25
Python
UTF-8
Python
false
false
29,256
py
player.py
from __future__ import annotations from dataclasses import dataclass from datetime import datetime from typing import Literal, Optional, Union import aiohttp import discord from red_commons.logging import getLogger from redbot.core.i18n import Translator from redbot.core.utils.chat_formatting import box from tabulate import tabulate from .constants import HEADSHOT_URL, TEAMS _ = Translator("Hockey", __file__) log = getLogger("red.trusty-cogs.hockey") # This is somewhat unnecessary but for consistency we have the expected # object here to "get" the data from the disct the API provides # this way we can expect a value for each dataclass and not have to worry about # a lot of other issues that can arise when doing this type of inheritence SKATER_STATS = { "time_on_ice": "timeOnIce", "assists": "assists", "goals": "goals", "pim": "pim", "shots": "shots", "games": "games", "hits": "hits", "powerplay_goals": "powerPlayGoals", "powerplay_points": "powerPlayPoints", "powerplay_time_on_ice": "powerPlayTimeOnIce", "event_time_on_ice": "evenTimeOnIce", "penalty_minutes": "penaltyMinutes", "face_off_percent": "faceOffPct", "shot_percent": "shotPct", "game_winning_goals": "gameWinningGoals", "over_time_goals": "overTimeGoals", "short_handed_goals": "shortHandedGoals", "short_handed_points": "shortHandedPoints", "short_handed_time_on_ice": "shortHandedTimeOnIce", "blocked": "blocked", "plusminus": "plusMinus", "points": "points", "shifts": "shifts", "time_on_ice_per_game": "timeOnIcePerGame", "even_time_on_ice_per_game": "evenTimeOnIcePerGame", "short_handed_time_on_ice_per_game": "shortHandedTimeOnIcePerGame", "powerplay_time_on_ice_per_game": "powerPlayTimeOnIcePerGame", } GOALIE_STATS = { "time_on_ice": "timeOnIce", "ot": "ot", "shutouts": "shutouts", "wins": "wins", "ties": "ties", "losses": "losses", "saves": "saves", "powerplay_saves": "powerPlaySaves", "shorthanded_saves": "shortHandedSaves", "even_saves": "evenSaves", "shorthanded_shots": "shortHandedShots", "even_shots": "evenShots", "powerplay_shots": "powerPlayShots", "save_percentage": "savePercentage", "goals_against_average": "goalAgainstAverage", "games": "games", "games_started": "gamesStarted", "shots_against": "shotsAgainst", "goals_against": "goalsAgainst", "time_on_ice_per_game": "timeOnIcePerGame", "powerplay_save_percentage": "powerPlaySavePercentage", "shorthanded_save_percentage": "shortHandedSavePercentage", "even_strength_save_percentage": "evenStrengthSavePercentage", } FLAG_LOOKUP = { "CAN": ":flag_ca:", "USA": ":flag_us:", "SWE": ":flag_se:", "GBR": ":flag_gb:", "CZE": ":flag_cz:", "LVA": ":flag_lv:", "NLD": ":flag_nl:", "FIN": ":flag_fi:", "UKR": ":flag_ua:", "SRB": ":flag_rs:", "FRA": ":flag_fr:", "ITA": ":flag_it:", "VEN": ":flag_si:", "SVK": ":flag_sk:", "IRL": ":flag_ie:", "RUS": ":flag_ru:", "POL": ":flag_pl:", "LBN": ":flag_lb:", "DEU": ":flag_de:", "BRA": ":flag_gi:", "CHE": ":flag_ch:", "DNK": ":flag_dk:", "ZAF": ":flag_za:", "TWN": ":flag_tw:", "JAM": ":flag_jm:", "KOR": ":flag_kr:", "PRY": ":flag_py:", "NOR": ":flag_no:", "HTI": ":flag_ht:", "MKD": ":flag_mk:", "GUY": ":flag_gy:", "HUN": ":flag_hu:", "AUS": ":flag_au:", "AUT": ":flag_at:", "BLR": ":flag_by:", "GRC": ":flag_gr:", "LTU": ":flag_lt:", "BHS": ":flag_bs:", "JPN": ":flag_jp:", "KAZ": ":flag_kz:", "NGA": ":flag_ng:", "EST": ":flag_ee:", "BEL": ":flag_be:", "BRN": ":flag_bn:", "TZA": ":flag_tz:", "SVN": ":flag_si:", "HRV": ":flag_hr:", "ROU": ":flag_ro:", "THA": ":flag_th:", "IDN": ":flag_id:", "MNE": ":flag_me:", "CHN": ":flag_cn:", "BGR": ":flag_bg:", "MEX": ":flag_mx:", "ISR": ":flag_il:", None: "", } @dataclass class SimplePlayer: id: int full_name: str birth_date: Optional[str] home_town: Optional[str] position: Optional[Literal["L", "R", "C", "D", "G"]] height: Optional[int] weight: Optional[int] birth_city: Optional[str] birth_country: Optional[str] birth_state_province: Optional[str] on_roster: Literal["Y", "N"] sweater_number: Optional[int] last_nhl_team_id: Optional[int] current_team_id: Optional[int] is_rookie: Literal["Y", "N"] is_retired: Literal["Y", "N"] is_junior: Literal["Y", "N"] is_suspended: Literal["Y", "N"] deceased: bool date_of_death: Optional[str] nationality: Optional[str] long_term_injury: Literal["Y", "N"] shoots_catches: Optional[Literal["L", "R"]] ep_player_id: Optional[int] dda_id: Optional[int] def __str__(self) -> str: return "{0.full_name}, born {0.birth_date}".format(self) def __repr__(self) -> str: return "<Player name={0.full_name} id={0.id} number={0.sweater_number}>".format(self) def description(self) -> str: desc = { "birth_date": _("Born: "), "deceased": _("Deceased: "), "home_town": _("Hometown: "), "position": _("Position: "), "height": _("Height: "), "weight": _("Weight: "), "is_rookie": _("Rookie"), "is_junior": _("Junior"), "is_suspended": _("Suspended"), } msg = "" for attr, name in desc.items(): if getattr(self, attr): if attr == "height" and self.height: msg += ( name + f"{self.height//12}' {self.height%12}\" / {int(self.height * 2.54)} cm\n" ) elif attr == "birth_date" and self.birth_date is not None: years = int( (datetime.now() - datetime.strptime(self.birth_date, "%Y-%m-%d")).days / 365.25 ) msg += name + f"{getattr(self, attr)} ({years})\n" flag = FLAG_LOOKUP[self.birth_country] msg += ( ", ".join( [ i for i in [self.birth_city, self.birth_state_province] if i is not None ] ) + f" {flag}\n" ) elif attr == "weight" and self.weight: msg += name + f"{self.weight} lbs / {int(self.weight * 0.453592)} kg\n" elif attr == "home_town": flag = FLAG_LOOKUP[self.nationality] msg += name + f"{getattr(self, attr)} {flag}\n" elif attr == "position": shoots = f"({getattr(self, 'shoots_catches', '')})" ir = "\N{ADHESIVE BANDAGE}" if getattr(self, "long_term_injury") == "Y" else "" msg += name + f"{getattr(self, attr)} {shoots if shoots != '()' else ''}{ir}\n" elif attr == "deceased": death_date = getattr(self, "date_of_death", "") msg += f"{name} {death_date}\n" if getattr(self, attr) else "" elif attr in ["is_rookie", "is_junior", "is_suspended"]: if getattr(self, attr) == "Y": msg += f"{name}\n" elif attr == "dda_id": msg += name.format(dda_id=self.dda_id) + "\n" else: msg += name + f"{getattr(self, attr)}\n" links = [ _("[Elite Prospects]({ep_url})").format(ep_url=self.ep_url()), _("[Cap Friendly]({cf_url})").format(cf_url=self.cap_friendly_url()), ] if getattr(self, "dda_id"): links.append( _( "[HHOF]( https://www.hhof.com/LegendsOfHockey/jsp/SearchPlayer.jsp?player={dda_id})" ).format(dda_id=self.dda_id) ) msg += " | ".join(links) return msg def headshot(self) -> str: return HEADSHOT_URL.format(self.id) def get_embed(self) -> discord.Embed: try: team_id = self.current_team_id or self.last_nhl_team_id log.verbose("SimplePlayer team_id: %s", team_id) team_name = [name for name, team in TEAMS.items() if team["id"] == team_id][0] colour = int(TEAMS[team_name]["home"].replace("#", ""), 16) logo = TEAMS[team_name]["logo"] except IndexError: team_name = _("No Team") colour = 0xFFFFFF logo = "https://cdn.bleacherreport.net/images/team_logos/328x328/nhl.png" em = discord.Embed(colour=colour) em.description = self.description() em.set_thumbnail(url=self.headshot()) number = f"#{self.sweater_number}" if self.sweater_number else "" em.set_author(name=f"{self.full_name} {number}", icon_url=logo) em.description = self.description() return em async def get_full_stats( self, season: Optional[str], session: Optional[aiohttp.ClientSession] = None ) -> Union[Player, Goalie, Skater]: url = f"https://statsapi.web.nhl.com/api/v1/people/{self.id}/stats?stats=yearByYear" log.verbose("get_full_stats url: %s", url) log.verbose("get_full_stats url: %s", season) if session is None: async with aiohttp.ClientSession() as new_session: async with new_session.get(url) as resp: data = await resp.json() else: async with session.get(url) as resp: data = await resp.json() for seasons in reversed(data["stats"][0]["splits"]): if seasons["league"].get("id", None) != 133: continue stats_season = seasons["season"] if season in [stats_season, None]: setattr(self, "last_nhl_team_id", seasons["team"].get("id", None)) if self.position == "G": stats = [seasons["stat"].get(v, "") for k, v in GOALIE_STATS.items()] player = Goalie( *self.__dict__.values(), stats_season, *stats, ) return await player.get_full_stats(season or stats_season) else: stats = [seasons["stat"].get(v, "") for v in SKATER_STATS.values()] player = Skater( *self.__dict__.values(), stats_season, *stats, ) return await player.get_full_stats(season or stats_season) log.verbose("Returning %r", self) return self def full_name_url(self) -> str: return self.full_name.replace(" ", "-").lower() def ep_url(self) -> str: return f"https://www.eliteprospects.com/player/{self.ep_player_id}/{self.full_name_url()}" def cap_friendly_url(self) -> str: return f"https://www.capfriendly.com/players/{self.full_name_url()}" @classmethod async def from_id( cls, player_id: int, session: Optional[aiohttp.ClientSession] = None ) -> Player: url = f"https://records.nhl.com/site/api/player/{player_id}" if session is None: async with aiohttp.ClientSession() as new_session: async with new_session.get(url) as resp: data = await resp.json() else: async with session.get(url) as resp: data = await resp.json() log.info("SimplePlayer from_id data %s", data) return cls(*data["data"][0].values()) @dataclass class Player(SimplePlayer): id: int accrued_seasons: Optional[int] add_names: Optional[str] age_signed_waiver: Optional[int] age_signel_fa: Optional[int] alert: Literal["Y", "N"] career_team_id: Optional[int] central_registry_position: Optional[str] club_elect_arb: Literal["Y", "N"] current_team_id: Optional[int] date_of_death: Optional[str] dda_id: Optional[int] deceased: bool ep_player_id: Optional[int] fa_group_after_season: Literal[None] first_name: str first_signed_by_team_id: Optional[int] free_agent_group: Optional[str] group_5_election: Literal["Y", "N"] group_5_seasons_earned: Optional[int] group_6_proration: Literal[None] group_6_seasons_earned: Optional[int] groups_earned_thru_season: Optional[int] hof_induction_year: Optional[int] iihf_hof_induction_year: Optional[int] in_hockey_hof: bool in_iihf_hof: int in_top_100_all_time: int in_us_hockey_hof: bool is_defected: Literal["Y", "N"] is_deleted: Literal["Y", "N"] is_junior: Literal["Y", "N"] is_retired: Literal[None] is_rookie: Literal["Y", "N"] is_suspended: Literal["Y", "N"] last_ameteur_league_id: Optional[int] last_ameteur_team_id: Optional[int] last_nhl_team_id: Optional[int] last_name: str loan_cap_exception: Literal["Y", "N"] long_term_injury: Literal["Y", "N"] message: Optional[str] middle_name: Optional[str] nationality: Optional[str] nhl_experience: Optional[int] platform_year: Optional[int] pr_name: str pr_stat: int pro_year_reduction: Optional[int] reenty_waivers: Optional[Literal["Y", "N"]] roster_special_code: Optional[str] salary_arbitration_exp: Optional[int] shoots_catches: Optional[Literal["L", "R"]] update_timestamp: str us_hof_induction_year: Optional[int] vet_cap_exception: Literal["Y", "N"] waiver_amount: Optional[int] waiver_draft: Optional[str] waiver_status: Literal["Y", "N"] weight: Optional[int] years_pro: Optional[int] @dataclass class Skater(SimplePlayer): season: str time_on_ice: str assists: int goals: int pim: int shots: int games: int hits: int powerplay_goals: int powerplay_points: int powerplay_time_on_ice: str event_time_on_ice: str penalty_minutes: str face_off_percent: float shot_percent: float game_winning_goals: int over_time_goals: int short_handed_goals: int short_handed_points: int short_handed_time_on_ice: str blocked: int plusminus: int points: int shifts: int time_on_ice_per_game: str even_time_on_ice_per_game: str shorthanded_time_on_ice_per_game: str powerplay_time_on_ice_per_game: str def __str__(self) -> str: return "{0.full_name}, goals {0.goals}, games {0.games}".format(self) def __repr__(self) -> str: return "<Skater name={0.full_name} id={0.id} number={0.sweater_number}>".format(self) async def get_full_stats( self, season: Optional[str], session: Optional[aiohttp.ClientSession] = None ) -> Union[Skater, SkaterPlayoffs]: url = ( f"https://statsapi.web.nhl.com/api/v1/people/{self.id}/stats?stats=yearByYearPlayoffs" ) log.debug("Skater get_full_stats url: %s", url) log.debug("Skater get_full_stats season: %s", season) if session is None: async with aiohttp.ClientSession() as new_session: async with new_session.get(url) as resp: data = await resp.json() else: async with session.get(url) as resp: data = await resp.json() for seasons in reversed(data["stats"][0]["splits"]): stats_season = seasons["season"] if season in [stats_season, None]: stats = [seasons["stat"].get(v, "") for v in SKATER_STATS.values()] player = SkaterPlayoffs( *self.__dict__.values(), *stats, ) return player return self def time_on_ice_average(self) -> str: if self.time_on_ice: minutes, seconds = self.time_on_ice.split(":") total_seconds = (int(minutes) * 60) + int(seconds) average_min = int((total_seconds / self.games) // 60) average_sec = int((total_seconds / self.games) % 60) if average_sec < 10: average_sec = f"0{average_sec}" return f"{average_min}:{average_sec}" return "" def get_embed(self) -> discord.Embed: try: team_id = self.current_team_id log.debug("Skater get_embed team_id: %s", team_id) team_name = [name for name, team in TEAMS.items() if team["id"] == team_id][0] colour = int(TEAMS[team_name]["home"].replace("#", ""), 16) logo = TEAMS[team_name]["logo"] except IndexError: team_name = _("No Team") colour = 0xFFFFFF logo = "https://cdn.bleacherreport.net/images/team_logos/328x328/nhl.png" try: team_id = self.last_nhl_team_id log.debug("Skater get_embed team_id: %s", team_id) team_name = [name for name, team in TEAMS.items() if team["id"] == team_id][0] emoji = f'<:{TEAMS[team_name]["emoji"]}>' except IndexError: team_name = _("No Team") emoji = "" em = discord.Embed(colour=colour) number = f"#{self.sweater_number}" if self.sweater_number else "" em.set_author(name=f"{self.full_name} {number}", icon_url=logo) em.set_thumbnail(url=self.headshot()) em.description = self.description() post_data = [ [_("GP"), f"[ {self.games} ]"], [_("Shots"), f"[ {self.shots} ]"], [_("Goals"), f"[ {self.goals} ]"], [_("Assists"), f"[ {self.assists} ]"], [_("Hits"), f"[ {self.hits} ]"], [_("Faceoff %"), f"[ {self.face_off_percent} ]"], ["+/-", f"[ {self.plusminus} ]"], [_("Blocked Shots"), f"[ {self.blocked} ]"], [_("PIM"), f"[ {self.pim} ]"], [_("Avg. TOI"), f"[ {self.time_on_ice_average()} ]"], ] stats_md = tabulate( post_data, headers=[_("Stats"), f"{self.season[:4]}-{self.season[4:]}"] ) em.set_thumbnail(url=self.headshot()) stats_str = f"{emoji} {team_name} {emoji}\n{box(stats_md, lang='apache')}" em.add_field(name=_("Stats"), value=stats_str) return em @dataclass class SkaterPlayoffs(Skater): p_time_on_ice: str p_assists: int p_goals: int p_pim: int p_shots: int p_games: int p_hits: int p_powerplay_goals: int p_powerplay_points: int p_powerplay_time_on_ice: str p_event_time_on_ice: str p_penalty_minutes: str p_face_off_percent: float p_shot_percent: float p_game_winning_goals: int p_over_time_goals: int p_short_handed_goals: int p_short_handed_points: int p_short_handed_time_on_ice: str p_blocked: int p_plusminus: int p_points: int p_shifts: int p_time_on_ice_per_game: str p_even_time_on_ice_per_game: str p_shorthanded_time_on_ice_per_game: str p_powerplay_time_on_ice_per_game: str def __str__(self) -> str: return "{0.full_name}, goals {0.goals}, games {0.games}".format(self) def __repr__(self) -> str: return "<Skater name={0.full_name} id={0.id} number={0.sweater_number}>".format(self) def p_time_on_ice_average(self) -> str: if self.p_time_on_ice: minutes, seconds = self.p_time_on_ice.split(":") total_seconds = (int(minutes) * 60) + int(seconds) average_min = int((total_seconds / self.p_games) // 60) average_sec = int((total_seconds / self.p_games) % 60) if average_sec < 10: average_sec = f"0{average_sec}" return f"{average_min}:{average_sec}" return "" def get_embed(self) -> discord.Embed: try: team_id = self.current_team_id log.debug("SkaterPlayoffs get_embed team_id: %s", team_id) team_name = [name for name, team in TEAMS.items() if team["id"] == team_id][0] colour = int(TEAMS[team_name]["home"].replace("#", ""), 16) logo = TEAMS[team_name]["logo"] except IndexError: team_name = _("No Team") colour = 0xFFFFFF logo = "https://cdn.bleacherreport.net/images/team_logos/328x328/nhl.png" try: team_id = self.last_nhl_team_id log.debug("SkaterPlayoffs get_embed team_id: %s", team_id) team_name = [name for name, team in TEAMS.items() if team["id"] == team_id][0] emoji = f'<:{TEAMS[team_name]["emoji"]}>' except IndexError: team_name = _("No Team") emoji = "" em = discord.Embed(colour=colour) number = f"#{self.sweater_number}" if self.sweater_number else "" em.set_author(name=f"{self.full_name} {number}", icon_url=logo) em.set_thumbnail(url=self.headshot()) em.description = self.description() post_data = [ [_("GP"), f"[ {self.games} ]", f"[ {self.p_games} ]"], [_("Shots"), f"[ {self.shots} ]", f"[ {self.p_shots} ]"], [_("Goals"), f"[ {self.goals} ]", f"[ {self.p_goals} ]"], [_("Assists"), f"[ {self.assists} ]", f"[ {self.p_assists} ]"], [_("Hits"), f"[ {self.hits} ]", f"[ {self.p_hits} ]"], [_("Faceoff %"), f"[ {self.face_off_percent} ]", f"[ {self.p_face_off_percent} ]"], ["+/-", f"[ {self.plusminus} ]", f"[ {self.p_plusminus} ]"], [_("Blocked"), f"[ {self.blocked} ]", f"[ {self.p_blocked} ]"], [_("PIM"), f"[ {self.pim} ]", f"[ {self.p_pim} ]"], [ _("Avg. TOI"), f"[ {self.time_on_ice_average()} ]", f"[ {self.p_time_on_ice_average()} ]", ], ] stats_md = tabulate( post_data, headers=[_("Stats"), f"{self.season[:4]}-{self.season[4:]}", _("Playoffs")] ) em.set_thumbnail(url=self.headshot()) stats_str = f"{emoji} {team_name} {emoji}\n{box(stats_md, lang='apache')}" em.add_field(name=_("Stats"), value=stats_str) return em @dataclass class Goalie(SimplePlayer): season: str time_on_ice: str ot: int shutouts: int ties: int wins: int losses: int saves: int powerplay_saves: int shorthanded_saves: int even_saves: int shorthanded_shots: int even_shots: int powerplay_shots: int save_percentage: float goals_against_average: float games: int games_started: int shots_against: int goals_against: int time_on_ice_per_game: str powerplay_save_percentage: float shorthanded_save_percentage: float even_strength_save_percentage: float def __str__(self) -> str: return "{0.full_name}, GAA {0.goals_against_average}, games {0.games}".format(self) def __repr__(self) -> str: return "<Goalie name={0.full_name} id={0.id} number={0.sweater_number}>".format(self) async def get_full_stats( self, season: Optional[str], session: Optional[aiohttp.ClientSession] = None ) -> Union[Goalie, GoaliePlayoffs]: url = ( f"https://statsapi.web.nhl.com/api/v1/people/{self.id}/stats?stats=yearByYearPlayoffs" ) log.verbose("Goalie get_full_stats url: %s", url) log.verbose("Goalie get_full_stats season: %s", season) if session is None: async with aiohttp.ClientSession() as new_session: async with new_session.get(url) as resp: data = await resp.json() else: async with session.get(url) as resp: data = await resp.json() for seasons in reversed(data["stats"][0]["splits"]): stats_season = seasons["season"] if season in [stats_season, None]: stats = [seasons["stat"].get(v, "") for v in GOALIE_STATS.values()] player = GoaliePlayoffs( *self.__dict__.values(), *stats, ) return player return self def get_embed(self) -> discord.Embed: try: team_id = self.current_team_id log.verbose("Goalie team_id: %s", team_id) team_name = [name for name, team in TEAMS.items() if team["id"] == team_id][0] colour = int(TEAMS[team_name]["home"].replace("#", ""), 16) logo = TEAMS[team_name]["logo"] except IndexError: team_name = _("No Team") colour = 0xFFFFFF logo = "https://cdn.bleacherreport.net/images/team_logos/328x328/nhl.png" try: team_id = self.last_nhl_team_id log.verbose("Goalie team_id: %s", team_id) team_name = [name for name, team in TEAMS.items() if team["id"] == team_id][0] emoji = f'<:{TEAMS[team_name]["emoji"]}>' except IndexError: team_name = _("No Team") emoji = "" em = discord.Embed(colour=colour) number = f"#{self.sweater_number}" if self.sweater_number else "" em.set_author(name=f"{self.full_name} {number}", icon_url=logo) em.set_thumbnail(url=self.headshot()) em.description = self.description() post_data = [ [_("GP"), f"[ {self.games} ]"], [_("SO"), f"[ {self.shutouts} ]"], [_("Saves"), f"[ {self.saves} ]"], [_("Save %"), f"[ {self.save_percentage} ]"], [_("GAA"), f"[ {self.goals_against_average} ]"], [_("Started"), f"[ {self.games_started} ]"], ] stats_md = tabulate( post_data, headers=[_("Stats"), f"{self.season[:4]}-{self.season[4:]}"] ) em.set_thumbnail(url=self.headshot()) stats_str = f"{emoji} {team_name} {emoji}\n{box(stats_md, lang='apache')}" em.add_field(name=_("Stats"), value=stats_str) return em @dataclass class GoaliePlayoffs(Goalie): p_time_on_ice: str p_ot: int p_shutouts: int p_ties: int p_wins: int p_losses: int p_saves: int p_powerplay_saves: int p_shorthanded_saves: int p_even_saves: int p_shorthanded_shots: int p_even_shots: int p_powerplay_shots: int p_save_percentage: float p_goals_against_average: float p_games: int p_games_started: int p_shots_against: int p_goals_against: int p_time_on_ice_per_game: str p_powerplay_save_percentage: float p_shorthanded_save_percentage: float p_even_strength_save_percentage: float def __str__(self) -> str: return "{0.full_name}, GAA {0.goals_against_average}, games {0.games}".format(self) def __repr__(self) -> str: return "<Goalie name={0.full_name} id={0.id} number={0.sweater_number}>".format(self) def get_embed(self) -> discord.Embed: try: team_id = self.current_team_id log.verbose("GoaliePlayoffs team_id: %s", team_id) team_name = [name for name, team in TEAMS.items() if team["id"] == team_id][0] colour = int(TEAMS[team_name]["home"].replace("#", ""), 16) logo = TEAMS[team_name]["logo"] except IndexError: team_name = _("No Team") colour = 0xFFFFFF logo = "https://cdn.bleacherreport.net/images/team_logos/328x328/nhl.png" try: team_id = self.last_nhl_team_id log.verbose("GoaliePlayoffs team_id: %s", team_id) team_name = [name for name, team in TEAMS.items() if team["id"] == team_id][0] emoji = f'<:{TEAMS[team_name]["emoji"]}>' except IndexError: team_name = _("No Team") emoji = "" em = discord.Embed(colour=colour) number = f"#{self.sweater_number}" if self.sweater_number else "" em.set_author(name=f"{self.full_name} {number}", icon_url=logo) em.set_thumbnail(url=self.headshot()) em.description = self.description() post_data = [ [_("GP"), f"[ {self.games} ]", f"[ {self.p_games} ]"], [_("SO"), f"[ {self.shutouts} ]", f"[ {self.p_shutouts} ]"], [_("Saves"), f"[ {self.saves} ]", f"[ {self.p_saves} ]"], [_("Save %"), f"[ {self.save_percentage} ]", f"[ {self.p_save_percentage} ]"], [_("GAA"), f"[ {self.goals_against_average} ]", f"[ {self.p_goals_against_average} ]"], [_("Started"), f"[ {self.games_started} ]", f"[ {self.p_games_started} ]"], ] stats_md = tabulate( post_data, headers=[_("Stats"), f"{self.season[:4]}-{self.season[4:]}", _("Playoffs")] ) em.set_thumbnail(url=self.headshot()) stats_str = f"{emoji} {team_name} {emoji}\n{box(stats_md, lang='apache')}" em.add_field(name=_("Stats"), value=stats_str) return em
1874331a37b7071791e7fbef93c9e4e9dece2096
99bdb3251fecee538e0630f15f6574054dfc1468
/bsp/Infineon/libraries/IFX_PSOC6_HAL/SConscript
5ef305f38fdcdee104dad227bf7d842a16f970a1
[ "Apache-2.0", "Zlib", "LicenseRef-scancode-proprietary-license", "MIT", "BSD-3-Clause", "X11", "BSD-4-Clause-UC", "LicenseRef-scancode-unknown-license-reference" ]
permissive
RT-Thread/rt-thread
03a7c52c2aeb1b06a544143b0e803d72f47d1ece
3602f891211904a27dcbd51e5ba72fefce7326b2
refs/heads/master
2023-09-01T04:10:20.295801
2023-08-31T16:20:55
2023-08-31T16:20:55
7,408,108
9,599
5,805
Apache-2.0
2023-09-14T13:37:26
2013-01-02T14:49:21
C
UTF-8
Python
false
false
8,245
SConscript
from building import * import rtconfig Import('RTT_ROOT') # get current directory cwd = GetCurrentDir() src = [] # The set of source files associated with this SConscript file. src = Split(''' mtb-hal-cat1/source/cyhal_clock.c mtb-hal-cat1/source/cyhal_hwmgr.c mtb-hal-cat1/source/cyhal_syspm.c mtb-hal-cat1/source/cyhal_system.c mtb-hal-cat1/source/cyhal_uart.c mtb-hal-cat1/source/cyhal_gpio.c mtb-hal-cat1/source/cyhal_scb_common.c mtb-hal-cat1/source/cyhal_interconnect.c mtb-hal-cat1/source/cyhal_utils.c mtb-hal-cat1/source/cyhal_lptimer.c mtb-hal-cat1/source/cyhal_utils_impl.c mtb-hal-cat1/source/cyhal_irq_impl.c mtb-pdl-cat1/drivers/source/cy_sysclk.c mtb-pdl-cat1/drivers/source/cy_systick.c mtb-pdl-cat1/drivers/source/cy_gpio.c mtb-pdl-cat1/drivers/source/cy_sysint.c mtb-pdl-cat1/drivers/source/cy_syslib.c mtb-pdl-cat1/drivers/source/cy_scb_i2c.c mtb-pdl-cat1/drivers/source/cy_syspm.c mtb-pdl-cat1/drivers/source/cy_mcwdt.c mtb-pdl-cat1/drivers/source/cy_ipc_pipe.c mtb-pdl-cat1/drivers/source/cy_ipc_sema.c mtb-pdl-cat1/drivers/source/cy_ipc_drv.c mtb-pdl-cat1/drivers/source/cy_trigmux.c mtb-pdl-cat1/drivers/source/cy_prot.c mtb-pdl-cat1/drivers/source/cy_scb_common.c ''') if GetDepend(['SOC_CY8C624ABZI_S2D44']): src += ['mtb-pdl-cat1/devices/COMPONENT_CAT1A/source/cy_device.c'] src += ['mtb-hal-cat1/COMPONENT_CAT1A/source/triggers/cyhal_triggers_psoc6_02.c'] src += ['mtb-hal-cat1/COMPONENT_CAT1A/source/pin_packages/cyhal_psoc6_02_124_bga.c'] src += Glob('psoc6cm0p/COMPONENT_CM0P_SLEEP/*.c') if GetDepend(['SOC_CY8C6245LQI_S3D72']): src += ['mtb-pdl-cat1/devices/COMPONENT_CAT1A/source/cy_device.c'] src += ['mtb-hal-cat1/COMPONENT_CAT1A/source/triggers/cyhal_triggers_psoc6_03.c'] src += ['mtb-hal-cat1/COMPONENT_CAT1A/source/pin_packages/cyhal_psoc6_03_68_qfn.c'] src += Glob('psoc6cm0p/COMPONENT_CM0P_SLEEP/*.c') if GetDepend(['SOC_CY8C624ALQI_S2D42']): src += ['mtb-pdl-cat1/devices/COMPONENT_CAT1A/source/cy_device.c'] src += ['mtb-hal-cat1/COMPONENT_CAT1A/source/triggers/cyhal_triggers_psoc6_02.c'] src += ['mtb-hal-cat1/COMPONENT_CAT1A/source/pin_packages/cyhal_psoc6_02_68_qfn.c'] src += Glob('psoc6cm0p/COMPONENT_CM0P_SLEEP/*.c') if GetDepend(['SOC_CY8C6247BZI_D54']): src += ['mtb-pdl-cat1/devices/COMPONENT_CAT1A/source/cy_device.c'] src += ['mtb-hal-cat1/COMPONENT_CAT1A/source/triggers/cyhal_triggers_psoc6_01.c'] src += ['mtb-hal-cat1/COMPONENT_CAT1A/source/pin_packages/cyhal_psoc6_01_124_bga.c'] src += ['mtb-pdl-cat1/drivers/source/cy_flash.c'] src += Glob('psoc6cm0p/COMPONENT_CM0P_SLEEP/*.c') if GetDepend(['SOC_CY8C6347BZI_BLD53']): src += ['mtb-pdl-cat1/devices/COMPONENT_CAT1A/source/cy_device.c'] src += ['mtb-hal-cat1/COMPONENT_CAT1A/source/triggers/cyhal_triggers_psoc6_01.c'] src += ['mtb-hal-cat1/COMPONENT_CAT1A/source/pin_packages/cyhal_psoc6_01_116_bga_ble.c'] src += ['mtb-pdl-cat1/drivers/source/cy_flash.c'] src += ['mtb-pdl-cat1/drivers/source/cy_ble_clk.c'] src += Glob('psoc6cm0p/COMPONENT_CM0P_SLEEP/*.c') if GetDepend(['SOC_CY8C6244LQI_S4D92']): src += ['mtb-pdl-cat1/devices/COMPONENT_CAT1A/source/cy_device.c'] src += ['mtb-hal-cat1/COMPONENT_CAT1A/source/triggers/cyhal_triggers_psoc6_04.c'] src += ['mtb-hal-cat1/COMPONENT_CAT1A/source/pin_packages/cyhal_psoc6_04_68_qfn.c'] src += ['mtb-pdl-cat1/drivers/source/cy_flash.c'] src += ['mtb-pdl-cat1/drivers/source/cy_ble_clk.c'] src += Glob('psoc6cm0p/COMPONENT_CM0P_SLEEP/*.c') if GetDepend(['RT_USING_SERIAL']): src += ['mtb-hal-cat1/source/cyhal_uart.c'] src += ['mtb-pdl-cat1/drivers/source/cy_scb_uart.c'] if GetDepend(['RT_USING_ADC']): src += ['mtb-hal-cat1/source/cyhal_dma_dw.c'] src += ['mtb-hal-cat1/source/cyhal_dma_dmac.c'] src += ['mtb-hal-cat1/source/cyhal_dma.c'] src += ['mtb-hal-cat1/source/cyhal_adc_sar.c'] src += ['mtb-hal-cat1/source/cyhal_analog_common.c'] src += ['mtb-pdl-cat1/drivers/source/cy_dma.c'] src += ['mtb-pdl-cat1/drivers/source/cy_sar.c'] src += ['mtb-pdl-cat1/drivers/source/cy_dmac.c'] src += ['mtb-pdl-cat1/drivers/source/cy_sysanalog.c'] if GetDepend(['RT_USING_SDIO']) or GetDepend(['BSP_USING_CYW43012_WIFI']): src += ['mtb-hal-cat1/source/cyhal_sdhc.c'] src += ['mtb-pdl-cat1/drivers/source/cy_sd_host.c'] if GetDepend(['RT_USING_PWM']): src += ['mtb-hal-cat1/source/cyhal_pwm.c'] src += ['mtb-hal-cat1/source/cyhal_timer.c'] src += ['mtb-hal-cat1/source/cyhal_tcpwm_common.c'] src += ['mtb-pdl-cat1/drivers/source/cy_tcpwm_pwm.c'] src += ['mtb-pdl-cat1/drivers/source/cy_tcpwm_counter.c'] if GetDepend(['RT_USING_SPI']): src += ['mtb-hal-cat1/source/cyhal_spi.c'] src += ['mtb-pdl-cat1/drivers/source/cy_scb_spi.c'] if GetDepend(['RT_USING_I2C']): src += ['mtb-hal-cat1/source/cyhal_i2c.c'] if GetDepend('BSP_USING_RTC'): src += ['mtb-hal-cat1/source/cyhal_rtc.c'] src += ['mtb-pdl-cat1/drivers/source/cy_rtc.c'] if GetDepend('BSP_USING_ON_CHIP_FLASH'): src += ['mtb-hal-cat1/source/cyhal_flash.c'] src += ['mtb-pdl-cat1/drivers/source/cy_flash.c'] if GetDepend(['BSP_USING_SLIDER']): src += ['capsense/cy_capsense_control.c'] src += ['capsense/cy_capsense_sensing.c'] src += ['capsense/cy_capsense_sensing_v2.c'] src += ['capsense/cy_capsense_csx_v2.c'] src += ['capsense/cy_capsense_csd_v2.c'] src += ['capsense/cy_capsense_processing.c'] src += ['capsense/cy_capsense_tuner.c'] src += ['capsense/cy_capsense_structure.c'] src += ['capsense/cy_capsense_centroid.c'] src += ['capsense/cy_capsense_filter.c'] src += ['mtb-pdl-cat1/drivers/source/cy_csd.c'] if rtconfig.PLATFORM in ['armclang']: src += ['lib/cy_capsense.lib'] if GetDepend(['RT_USING_WDT']): src += ['mtb-pdl-cat1/drivers/source/cy_wdt.c'] src += ['mtb-hal-cat1/source/cyhal_wdt.c'] if GetDepend(['RT_USING_DAC']): src += ['mtb_shared/csdidac/cy_csdidac.c'] if GetDepend(['RT_USING_HWTIMER']): src += ['mtb-hal-cat1/source/cyhal_timer.c'] path = [cwd + '/retarget-io', cwd + '/core-lib/include', cwd + '/mtb_shared/usbdev', cwd + '/mtb_shared/csdidac', cwd + '/mtb_shared/serial-flash', cwd + '/mtb-pdl-cat1/cmsis/include', cwd + '/mtb-pdl-cat1/drivers/include', cwd + '/mtb-hal-cat1/include_pvt', cwd + '/mtb-hal-cat1/include'] if GetDepend(['SOC_CY8C624ABZI_S2D44']): path += [cwd + '/psoc6cm0p'] path += [cwd + '/capsense'] path += [cwd + '/mtb-hal-cat1/COMPONENT_CAT1A/include'] path += [cwd + '/mtb-pdl-cat1/devices/COMPONENT_CAT1A/include'] if GetDepend(['SOC_CY8C6245LQI_S3D72']): path += [cwd + '/psoc6cm0p'] path += [cwd + '/capsense'] path += [cwd + '/mtb-hal-cat1/COMPONENT_CAT1A/include'] path += [cwd + '/mtb-pdl-cat1/devices/COMPONENT_CAT1A/include'] if GetDepend(['SOC_CY8C624ALQI_S2D42']): path += [cwd + '/psoc6cm0p'] path += [cwd + '/capsense'] path += [cwd + '/mtb-hal-cat1/COMPONENT_CAT1A/include'] path += [cwd + '/mtb-pdl-cat1/devices/COMPONENT_CAT1A/include'] if GetDepend(['SOC_CY8C6247BZI_D54']): path += [cwd + '/psoc6cm0p'] path += [cwd + '/capsense'] path += [cwd + '/mtb-hal-cat1/COMPONENT_CAT1A/include'] path += [cwd + '/mtb-pdl-cat1/devices/COMPONENT_CAT1A/include'] if GetDepend(['SOC_CY8C6347BZI_BLD53']): path += [cwd + '/psoc6cm0p'] path += [cwd + '/capsense'] path += [cwd + '/mtb-hal-cat1/COMPONENT_CAT1A/include'] path += [cwd + '/mtb-pdl-cat1/devices/COMPONENT_CAT1A/include'] if GetDepend(['SOC_CY8C6244LQI_S4D92']): path += [cwd + '/psoc6cm0p'] path += [cwd + '/capsense'] path += [cwd + '/mtb-hal-cat1/COMPONENT_CAT1A/include'] path += [cwd + '/mtb-pdl-cat1/devices/COMPONENT_CAT1A/include'] group = DefineGroup('Libraries', src, depend=[''], CPPPATH=path) Return('group')
03203cdc3c7793b83c0acc6046a2a6d5305e85d1
9ed4d46aedd4d4acadb48d610e940594b5b7b3fd
/project_euler/problem_042/solution42.py
f8a54e40eaab40ee723072a2e413b33f5df49f42
[ "CC-BY-NC-4.0", "CC-BY-NC-SA-4.0", "MIT" ]
permissive
TheAlgorithms/Python
7596a0e236ed12a61f9db19a7ea68309779cc85b
421ace81edb0d9af3a173f4ca7e66cc900078c1d
refs/heads/master
2023-09-01T17:32:20.190949
2023-08-29T13:18:10
2023-08-29T13:18:10
63,476,337
184,217
48,615
MIT
2023-09-14T02:05:29
2016-07-16T09:44:01
Python
UTF-8
Python
false
false
1,362
py
solution42.py
""" The nth term of the sequence of triangle numbers is given by, tn = ½n(n+1); so the first ten triangle numbers are: 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ... By converting each letter in a word to a number corresponding to its alphabetical position and adding these values we form a word value. For example, the word value for SKY is 19 + 11 + 25 = 55 = t10. If the word value is a triangle number then we shall call the word a triangle word. Using words.txt (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common English words, how many are triangle words? """ import os # Precomputes a list of the 100 first triangular numbers TRIANGULAR_NUMBERS = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def solution(): """ Finds the amount of triangular words in the words file. >>> solution() 162 """ script_dir = os.path.dirname(os.path.realpath(__file__)) words_file_path = os.path.join(script_dir, "words.txt") words = "" with open(words_file_path) as f: words = f.readline() words = [word.strip('"') for word in words.strip("\r\n").split(",")] words = [ word for word in [sum(ord(x) - 64 for x in word) for word in words] if word in TRIANGULAR_NUMBERS ] return len(words) if __name__ == "__main__": print(solution())
f2e81ca512636c83bff0ab7c213174982e595736
82f99c9f492551c6050910c4869b2a35ca15df87
/Utilities/ParaView/export-scene-macro.py
a739bacf4ea5d77b075da6f8ba98f7a0bc70bdc6
[ "BSD-3-Clause" ]
permissive
Kitware/vtk-js
e87efd5ed13bc1ee3482a7596de7cab4f75ff184
26bd8da0ec03e881f5dc6af5d0096ebf0f005613
refs/heads/master
2023-09-04T04:35:47.758832
2023-08-31T22:08:06
2023-09-01T15:28:57
57,900,965
1,079
397
BSD-3-Clause
2023-09-14T14:35:17
2016-05-02T15:44:11
JavaScript
UTF-8
Python
false
false
20,532
py
export-scene-macro.py
import zipfile from urllib.parse import quote import hashlib import shutil import gzip import json import errno import time import os import sys from paraview.vtk import * from paraview import simple # ### ----------------------------------------------------------------------- ### # ### Configure output location ### # ### # A top-level export directory will be created (if necessary) and used to # store all exported scenes. Use the EXPORT_DIRECTORY pattern below to # customize where this directory should be. Automatic replacement will # be done on the following variables if they appear in the pattern: # # ${USER_HOME} : Will be replaced by the current user's home directory # ### ----------------------------------------------------------------------- ### EXPORT_DIRECTORY = '${USER_HOME}/vtkJsExport' FILENAME_EXTENSION = '.vtkjs' # ### ----------------------------------------------------------------------- ### # ### Convenience methods and definitions ### # ### ----------------------------------------------------------------------- ### try: from vtk.vtkFiltersGeometry import vtkCompositeDataGeometryFilter except: from vtkFiltersGeometry import vtkCompositeDataGeometryFilter USER_HOME = os.path.expanduser('~') ROOT_OUTPUT_DIRECTORY = EXPORT_DIRECTORY.replace('${USER_HOME}', USER_HOME) ROOT_OUTPUT_DIRECTORY = os.path.normpath(ROOT_OUTPUT_DIRECTORY) arrayTypesMapping = ' bBhHiIlLfdLs' # last ones are idtype and string jsMapping = { 'b': 'Int8Array', 'B': 'Uint8Array', 'h': 'Int16Array', 'H': 'Int16Array', 'i': 'Int32Array', 'I': 'Uint32Array', 'l': 'Int32Array', 'L': 'Uint32Array', 'f': 'Float32Array', 'd': 'Float64Array', 's': 'string', } writerMapping = {} # ----------------------------------------------------------------------------- def getRangeInfo(array, component): r = array.GetRange(component) compRange = {} compRange['min'] = r[0] compRange['max'] = r[1] compRange['component'] = array.GetComponentName(component) return compRange # ----------------------------------------------------------------------------- def getRef(destDirectory, md5): ref = {} ref['id'] = md5 ref['encode'] = 'BigEndian' if sys.byteorder == 'big' else 'LittleEndian' ref['basepath'] = destDirectory return ref # ----------------------------------------------------------------------------- objIds = [] def getObjectId(obj): try: idx = objIds.index(obj) return idx + 1 except ValueError: objIds.append(obj) return len(objIds) # ----------------------------------------------------------------------------- def dumpDataArray(datasetDir, dataDir, array, root=None, compress=True): if not array: return None if root is None: root = {} if array.GetDataType() == 12: # IdType need to be converted to Uint32 arraySize = array.GetNumberOfTuples() * array.GetNumberOfComponents() newArray = vtkTypeUInt32Array() newArray.SetNumberOfTuples(arraySize) for i in range(arraySize): newArray.SetValue(i, -1 if array.GetValue(i) < 0 else array.GetValue(i)) pBuffer = memoryview(newArray) elif array.GetDataType() == 13: # vtkStringArray - write as utf-8 encoded string that contains a json array with the strings arraySize = array.GetNumberOfTuples() * array.GetNumberOfComponents() newArray = json.dumps([array.GetValue(v) for v in range(arraySize)]) pBuffer = memoryview(newArray.encode('utf-8')) else: pBuffer = memoryview(array) pMd5 = hashlib.md5(pBuffer).hexdigest() pPath = os.path.join(dataDir, pMd5) with open(pPath, 'wb') as f: f.write(pBuffer) if compress: with open(pPath, 'rb') as f_in, gzip.open(os.path.join(dataDir, pMd5 + '.gz'), 'wb') as f_out: shutil.copyfileobj(f_in, f_out) os.remove(pPath) root['ref'] = getRef(os.path.relpath(dataDir, datasetDir), pMd5) root['vtkClass'] = 'vtkDataArray' root['name'] = array.GetName() root['dataType'] = jsMapping[arrayTypesMapping[array.GetDataType()]] root['numberOfComponents'] = array.GetNumberOfComponents() root['size'] = array.GetNumberOfComponents() * array.GetNumberOfTuples() root['ranges'] = [] if array.GetDataType() != 13: # ranges do not make sense for vtkStringArray if root['numberOfComponents'] > 1: for i in range(root['numberOfComponents']): root['ranges'].append(getRangeInfo(array, i)) root['ranges'].append(getRangeInfo(array, -1)) else: root['ranges'].append(getRangeInfo(array, 0)) return root # ----------------------------------------------------------------------------- def dumpColorArray(datasetDir, dataDir, colorArrayInfo, root=None, compress=True): if root is None: root = {} colorArray = colorArrayInfo['colorArray'] location = colorArrayInfo['location'] dumpedArray = dumpDataArray(datasetDir, dataDir, colorArray, {}, compress) if dumpedArray: root[location]['activeScalars'] = 0 root[location]['arrays'].append({'data': dumpedArray}) return root # ----------------------------------------------------------------------------- def dumpTCoords(datasetDir, dataDir, dataset, root=None, compress=True): if root is None: root = {} tcoords = dataset.GetPointData().GetTCoords() if tcoords: dumpedArray = dumpDataArray(datasetDir, dataDir, tcoords, {}, compress) root['pointData']['activeTCoords'] = len(root['pointData']['arrays']) root['pointData']['arrays'].append({'data': dumpedArray}) # ----------------------------------------------------------------------------- def dumpNormals(datasetDir, dataDir, dataset, root=None, compress=True): if root is None: root = {} normals = dataset.GetPointData().GetNormals() if normals: dumpedArray = dumpDataArray(datasetDir, dataDir, normals, {}, compress) root['pointData']['activeNormals'] = len(root['pointData']['arrays']) root['pointData']['arrays'].append({'data': dumpedArray}) # ----------------------------------------------------------------------------- def dumpAllArrays(datasetDir, dataDir, dataset, root=None, compress=True): if root is None: root = {} root['pointData'] = { 'vtkClass': 'vtkDataSetAttributes', "activeGlobalIds": -1, "activeNormals": -1, "activePedigreeIds": -1, "activeScalars": -1, "activeTCoords": -1, "activeTensors": -1, "activeVectors": -1, "arrays": [] } root['cellData'] = { 'vtkClass': 'vtkDataSetAttributes', "activeGlobalIds": -1, "activeNormals": -1, "activePedigreeIds": -1, "activeScalars": -1, "activeTCoords": -1, "activeTensors": -1, "activeVectors": -1, "arrays": [] } root['fieldData'] = { 'vtkClass': 'vtkDataSetAttributes', "activeGlobalIds": -1, "activeNormals": -1, "activePedigreeIds": -1, "activeScalars": -1, "activeTCoords": -1, "activeTensors": -1, "activeVectors": -1, "arrays": [] } # Point data pd = dataset.GetPointData() pd_size = pd.GetNumberOfArrays() for i in range(pd_size): array = pd.GetAbstractArray(i) if array: dumpedArray = dumpDataArray( datasetDir, dataDir, array, {}, compress) root['pointData']['activeScalars'] = 0 root['pointData']['arrays'].append({'data': dumpedArray}) # Cell data cd = dataset.GetCellData() cd_size = cd.GetNumberOfArrays() for i in range(cd_size): array = cd.GetAbstractArray(i) if array: dumpedArray = dumpDataArray( datasetDir, dataDir, array, {}, compress) root['cellData']['activeScalars'] = 0 root['cellData']['arrays'].append({'data': dumpedArray}) return root # ----------------------------------------------------------------------------- def dumpPolyData(datasetDir, dataDir, dataset, colorArrayInfo, root=None, compress=True): if root is None: root = {} root['vtkClass'] = 'vtkPolyData' container = root # Points points = dumpDataArray(datasetDir, dataDir, dataset.GetPoints().GetData(), {}, compress) points['vtkClass'] = 'vtkPoints' container['points'] = points # Cells _cells = container # Verts if dataset.GetVerts() and dataset.GetVerts().GetData().GetNumberOfTuples() > 0: _verts = dumpDataArray(datasetDir, dataDir, dataset.GetVerts().GetData(), {}, compress) _cells['verts'] = _verts _cells['verts']['vtkClass'] = 'vtkCellArray' # Lines if dataset.GetLines() and dataset.GetLines().GetData().GetNumberOfTuples() > 0: _lines = dumpDataArray(datasetDir, dataDir, dataset.GetLines().GetData(), {}, compress) _cells['lines'] = _lines _cells['lines']['vtkClass'] = 'vtkCellArray' # Polys if dataset.GetPolys() and dataset.GetPolys().GetData().GetNumberOfTuples() > 0: _polys = dumpDataArray(datasetDir, dataDir, dataset.GetPolys().GetData(), {}, compress) _cells['polys'] = _polys _cells['polys']['vtkClass'] = 'vtkCellArray' # Strips if dataset.GetStrips() and dataset.GetStrips().GetData().GetNumberOfTuples() > 0: _strips = dumpDataArray(datasetDir, dataDir, dataset.GetStrips().GetData(), {}, compress) _cells['strips'] = _strips _cells['strips']['vtkClass'] = 'vtkCellArray' dumpAllArrays(datasetDir, dataDir, dataset, container, compress) dumpColorArray(datasetDir, dataDir, colorArrayInfo, container, compress) # PointData TCoords dumpTCoords(datasetDir, dataDir, dataset, container, compress) # dumpNormals(datasetDir, dataDir, dataset, container, compress) return root # ----------------------------------------------------------------------------- writerMapping['vtkPolyData'] = dumpPolyData # ----------------------------------------------------------------------------- def dumpImageData(datasetDir, dataDir, dataset, colorArrayInfo, root=None, compress=True): if root is None: root = {} root['vtkClass'] = 'vtkImageData' container = root container['spacing'] = dataset.GetSpacing() container['origin'] = dataset.GetOrigin() container['extent'] = dataset.GetExtent() dumpAllArrays(datasetDir, dataDir, dataset, container, compress) return root # ----------------------------------------------------------------------------- writerMapping['vtkImageData'] = dumpImageData # ----------------------------------------------------------------------------- def writeDataSet(filePath, dataset, outputDir, colorArrayInfo, newDSName=None, compress=True): fileName = quote(newDSName if newDSName else os.path.basename(filePath)) datasetDir = os.path.join(outputDir, fileName) dataDir = os.path.join(datasetDir, 'data') if not os.path.exists(dataDir): os.makedirs(dataDir) root = {} root['metadata'] = {} root['metadata']['name'] = fileName writer = writerMapping[dataset.GetClassName()] if writer: writer(datasetDir, dataDir, dataset, colorArrayInfo, root, compress) else: print(dataObject.GetClassName(), 'is not supported') with open(os.path.join(datasetDir, "index.json"), 'w') as f: f.write(json.dumps(root, indent=2)) return datasetDir # ----------------------------------------------------------------------------- def generateSceneName(): srcs = simple.GetSources() nameParts = [] for key, val in srcs.items(): proxyGroup = val.SMProxy.GetXMLGroup() if 'sources' in proxyGroup: nameParts.append(key[0]) fileName = '-'.join(nameParts) # limit to a reasonable length characters fileName = fileName[:12] if len(fileName) > 15 else fileName if len(fileName) == 0: fileName = 'SceneExport' sceneName = '%s' % fileName counter = 0 while os.path.isfile(os.path.join(ROOT_OUTPUT_DIRECTORY, '%s%s' % (sceneName, FILENAME_EXTENSION))): counter += 1 sceneName = '%s (%d)' % (fileName, counter) return sceneName # ----------------------------------------------------------------------------- componentIndex = 0 def getComponentName(actor): global componentIndex srcs = simple.GetSources() duplicates = {} for key, val in srcs.items(): # Prevent name duplication nameToUse = key[0] if nameToUse in duplicates: count = 1 newName = '%s (%d)' % (nameToUse, count) while newName in duplicates: count += 1 newName = '%s (%d)' % (nameToUse, count) nameToUse = newName duplicates[nameToUse] = True actorRep = simple.GetRepresentation( val).GetClientSideObject().GetActiveRepresentation().GetActor() if actor == actorRep: return nameToUse nameToUse = '%d' % componentIndex componentIndex += 1 return nameToUse # ### ----------------------------------------------------------------------- ### # ### Main script contents ### # ### ----------------------------------------------------------------------- ### def mkdir_p(path): try: os.makedirs(path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise # Generate timestamp and use it to make subdirectory within the top level output dir timeStamp = time.strftime("%a-%d-%b-%Y-%H-%M-%S") outputDir = os.path.join(ROOT_OUTPUT_DIRECTORY, timeStamp) mkdir_p(outputDir) doCompressArrays = False # Get the active view and render window, use it to iterate over renderers activeView = simple.GetActiveView() renderWindow = activeView.GetRenderWindow() renderers = renderWindow.GetRenderers() scDirs = [] sceneComponents = [] textureToSave = {} for rIdx in range(renderers.GetNumberOfItems()): renderer = renderers.GetItemAsObject(rIdx) renProps = renderer.GetViewProps() for rpIdx in range(renProps.GetNumberOfItems()): renProp = renProps.GetItemAsObject(rpIdx) if not renProp.GetVisibility(): continue if hasattr(renProp, 'GetMapper'): mapper = renProp.GetMapper() dataObject = mapper.GetInputDataObject(0, 0) dataset = None if dataObject.IsA('vtkCompositeDataSet'): if dataObject.GetNumberOfBlocks() == 1: dataset = dataObject.GetBlock(0) else: print('Apply geometry filter') gf = vtkCompositeDataGeometryFilter() gf.SetInputData(dataObject) gf.Update() dataset = gf.GetOutput() else: dataset = mapper.GetInput() if dataset and dataset.GetPoints(): componentName = getComponentName(renProp) scalarVisibility = mapper.GetScalarVisibility() arrayAccessMode = mapper.GetArrayAccessMode() colorArrayName = mapper.GetArrayName() if arrayAccessMode == 1 else mapper.GetArrayId() colorMode = mapper.GetColorMode() scalarMode = mapper.GetScalarMode() lookupTable = mapper.GetLookupTable() dsAttrs = None arrayLocation = '' if scalarVisibility: # VTK_SCALAR_MODE_USE_POINT_FIELD_DATA or VTK_SCALAR_MODE_USE_POINT_DATA if scalarMode == 3 or scalarMode == 1: dsAttrs = dataset.GetPointData() arrayLocation = 'pointData' # VTK_SCALAR_MODE_USE_CELL_FIELD_DATA or VTK_SCALAR_MODE_USE_CELL_DATA elif scalarMode == 4 or scalarMode == 2: dsAttrs = dataset.GetCellData() arrayLocation = 'cellData' colorArray = None dataArray = None if dsAttrs: dataArray = dsAttrs.GetAbstractArray(colorArrayName) if dataArray: # component = -1 => let specific instance get scalar from vector before mapping colorArray = lookupTable.MapScalars( dataArray, colorMode, -1) colorArrayName = '__CustomRGBColorArray__' colorArray.SetName(colorArrayName) colorMode = 0 else: colorArrayName = '' colorArrayInfo = { 'colorArray': colorArray, 'location': arrayLocation } scDirs.append(writeDataSet('', dataset, outputDir, colorArrayInfo, newDSName=componentName, compress=doCompressArrays)) # Handle texture if any textureName = None if renProp.GetTexture() and renProp.GetTexture().GetInput(): textureData = renProp.GetTexture().GetInput() textureName = 'texture_%d' % getObjectId(textureData) textureToSave[textureName] = textureData representation = renProp.GetProperty().GetRepresentation( ) if hasattr(renProp, 'GetProperty') else 2 colorToUse = renProp.GetProperty().GetDiffuseColor( ) if hasattr(renProp, 'GetProperty') else [1, 1, 1] if representation == 1: colorToUse = renProp.GetProperty().GetColor() if hasattr( renProp, 'GetProperty') else [1, 1, 1] pointSize = renProp.GetProperty().GetPointSize( ) if hasattr(renProp, 'GetProperty') else 1.0 opacity = renProp.GetProperty().GetOpacity() if hasattr( renProp, 'GetProperty') else 1.0 edgeVisibility = renProp.GetProperty().GetEdgeVisibility( ) if hasattr(renProp, 'GetProperty') else False p3dPosition = renProp.GetPosition() if renProp.IsA( 'vtkProp3D') else [0, 0, 0] p3dScale = renProp.GetScale() if renProp.IsA( 'vtkProp3D') else [1, 1, 1] p3dOrigin = renProp.GetOrigin() if renProp.IsA( 'vtkProp3D') else [0, 0, 0] p3dRotateWXYZ = renProp.GetOrientationWXYZ( ) if renProp.IsA('vtkProp3D') else [0, 0, 0, 0] sceneComponents.append({ "name": componentName, "type": "httpDataSetReader", "httpDataSetReader": { "url": componentName }, "actor": { "origin": p3dOrigin, "scale": p3dScale, "position": p3dPosition, }, "actorRotation": p3dRotateWXYZ, "mapper": { "colorByArrayName": colorArrayName, "colorMode": colorMode, "scalarMode": scalarMode }, "property": { "representation": representation, "edgeVisibility": edgeVisibility, "diffuseColor": colorToUse, "pointSize": pointSize, "opacity": opacity }, "lookupTable": { "tableRange": lookupTable.GetRange(), "hueRange": lookupTable.GetHueRange() if hasattr(lookupTable, 'GetHueRange') else [0.5, 0] } }) if textureName: sceneComponents[-1]['texture'] = textureName # Save texture data if any for key, val in textureToSave.items(): writeDataSet('', val, outputDir, None, newDSName=key, compress=doCompressArrays) cameraClippingRange = activeView.GetActiveCamera().GetClippingRange() sceneDescription = { "fetchGzip": doCompressArrays, "background": activeView.Background.GetData(), "camera": { "focalPoint": activeView.CameraFocalPoint.GetData(), "position": activeView.CameraPosition.GetData(), "viewUp": activeView.CameraViewUp.GetData(), "clippingRange": [elt for elt in cameraClippingRange] }, "centerOfRotation": activeView.CenterOfRotation.GetData(), "scene": sceneComponents } indexFilePath = os.path.join(outputDir, 'index.json') with open(indexFilePath, 'w') as outfile: json.dump(sceneDescription, outfile, indent=4) # ----------------------------------------------------------------------------- # Now zip up the results and get rid of the temp directory sceneName = generateSceneName() sceneFileName = os.path.join( ROOT_OUTPUT_DIRECTORY, '%s%s' % (sceneName, FILENAME_EXTENSION)) try: import zlib compression = zipfile.ZIP_DEFLATED except ImportError: compression = zipfile.ZIP_STORED zf = zipfile.ZipFile(sceneFileName, mode='w') try: for dirName, subdirList, fileList in os.walk(outputDir): for fname in fileList: fullPath = os.path.join(dirName, fname) relPath = '%s/%s' % (sceneName, os.path.relpath(fullPath, outputDir)) zf.write(fullPath, arcname=relPath, compress_type=compression) finally: zf.close() shutil.rmtree(outputDir) print('Finished exporting dataset to: ', sceneFileName)