blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c3ab53beb50fa9db262769d3eaed48f285f08495
|
9fd680d1a569fc8e76ba869db522ceebbce49109
|
/anki-markdown/src/controller.py
|
1f014fe8cda4ada2c44332a31f867a221f4bba28
|
[] |
no_license
|
ssricardo/anki-plugins
|
8bdb60228a7052a2a18f48cde48e176835c26bf3
|
d79ed453e3e73921be03f20f708462ff683e7111
|
refs/heads/master
| 2023-05-12T06:58:29.514918
| 2023-05-11T21:13:27
| 2023-05-11T21:13:27
| 138,677,820
| 105
| 27
| null | 2023-05-03T18:09:32
| 2018-06-26T03:04:54
|
Python
|
UTF-8
|
Python
| false
| false
| 11,720
|
py
|
controller.py
|
# -*- coding: utf-8 -*-
# Main interface between Anki and this addon components
# This files is part of anki-markdown-formatter addon
# @author ricardo saturnino
# ------------------------------------------------
from .config import ConfigKey, ConfigService
from .core import Feedback, AppHolder
from .converter import Converter
from .batch import BatchService
from .field_ctrl import NoteFieldControler
from . import html2text
import json
from aqt.editor import Editor, EditorWebView
from aqt.qt import *
from aqt import mw
from PyQt5.QtWidgets import QMenu, QAction, QApplication
from aqt.utils import showInfo, tooltip, showWarning
from anki.hooks import addHook
from aqt import gui_hooks
# Holds references so GC does kill them
controllerInstance = None
CWD = os.path.dirname(os.path.realpath(__file__))
ICON_FILE = 'icons/markdown-3.png'
# -------------------------- WEB --------------------------------
JS_STYLE_APPENDER = """
var prStyle = `
<style type="text/css">
{}
</style>`;
$(prStyle).appendTo('body');
"""
# ---------------------------- Injected functions -------------------
@staticmethod
def _ankiShowInfo(*args):
tooltip(args)
@staticmethod
def _ankiShowError(*args):
showWarning(str(args))
def _ankiConfigRead(key):
return AppHolder.app.addonManager.getConfig(__name__)[key]
# ------------------------ Init ---------------------------------
def run():
global controllerInstance
Feedback.log('Setting anki-markdown controller')
Feedback.showInfo = _ankiShowInfo
Feedback.showError = _ankiShowError
AppHolder.app = mw
ConfigService._f = _ankiConfigRead
controllerInstance = Controller()
controllerInstance.setupBindings()
# noteFieldCtrl = NoteFieldControler(controllerInstance._converter)
# noteFieldCtrl.setup()
class Controller:
"""
The mediator/adapter between Anki with its components and this addon specific API
"""
JS_LOCATION = CWD + '/a-md.js'
CSS_LOCATION = CWD + '/a-md.css'
_converter = Converter()
_batchService = BatchService(_converter)
_showButton = None
_shortcutMenu = None
_shortcutButton = None
_editAsMarkdownEnabled = False
_jsContent = None
_cssContent = None
def __init__(self):
self._showButton = ConfigService.read(ConfigKey.SHOW_MARKDOWN_BUTTON, bool)
self._shortcutMenu = ConfigService.read(ConfigKey.SHORTCUT, str)
self._shortcutButton = ConfigService.read(ConfigKey.SHORTCUT_EDIT, str)
self._enablePreview = ConfigService.read(ConfigKey.ENABLE_PREVIEW, bool)
self._disableMdDecoration = ConfigService.read(ConfigKey.DISABLE_MD_STYLE, bool)
try:
with open(Controller.JS_LOCATION, 'r') as f:
self._jsContent = f.read()
with open(Controller.CSS_LOCATION, 'r') as fCss:
self._cssContent = fCss.read()
except Exception as e:
print(e)
Feedback.showError('An error occoured on loading Markdown Preview. You may need to restart Anki.')
# ------------------- Hooks / entry points -------------------------
def setupBindings(self):
"""
Register the entry points / interface with Anki
"""
# Review
gui_hooks.card_will_show.append(self.processField)
Feedback.log('Review Hook set')
# Editing
addHook("setupEditorButtons", self.setupButtons)
addHook("setupEditorShortcuts", self.setupShortcuts)
addHook("loadNote", self.onLoadNote)
addHook('EditorWebView.contextMenuEvent', self._setupContextMenu)
addHook('browser.setupMenus', self._setupBrowserMenu)
addHook('editTimer', lambda n: self._updatePreview())
Editor.setupWeb = self._wrapEditorSetupWeb(Editor.setupWeb)
Editor.toggleBold = self.wrapEditorToggleBold(Editor.toggleBold)
Editor.toggleItalic = self.wrapEditorToggleItalic(Editor.toggleItalic)
html2textFn = html2text.HTML2Text()
self._converter.getTextFromHtml = lambda data: html2textFn.handle(data)
try:
EditorWebView._onPaste = self._wrapOnPaste(EditorWebView._onPaste)
except:
Feedback.log('Markdown: Handling "Paste" is disabled due to an error')
def _wrapEditorSetupWeb(self, fn):
def wrapper(editor):
fn(editor)
editor.web.eval(JS_STYLE_APPENDER.format(self._cssContent))
editor.web.eval("""
%s
""" % self._jsContent)
if self._enablePreview:
editor.web.eval('setPreviewUp()')
return wrapper
def _wrapOnPaste(self, fn):
ref = self
def _onPaste(self, mode):
extended = self.editor.mw.app.queryKeyboardModifiers() & Qt.ShiftModifier
mime = self.editor.mw.app.clipboard().mimeData(mode=mode)
if ref._editAsMarkdownEnabled:
if not (mime.html() and mime.html().startswith("<!--anki-->")):
self.eval("pasteAmdContent(%s);" % json.dumps(mime.text()))
return
html, internal = self._processMime(mime)
if not html:
return
self.editor.doPaste(html, internal, extended)
return _onPaste
# --------------------------- Editing ----------------------------
def toggleMarkdown(self, editor=None):
self.setEditAsMarkdownEnabled(not self._editAsMarkdownEnabled)
self._editorReference.loadNoteKeepingFocus()
def _setupContextMenu(self, webview, menu):
submenu = self._showCustomMenu(menu)
menu.addMenu(submenu)
def _showCustomMenu(self, parent=None):
if not parent:
parent = self._editorReference.web
submenu = QMenu('&Markdown', parent)
act1 = QAction('(&1) Convert to HTML', submenu,
triggered=lambda: self._convertToHTML())
submenu.addAction(act1)
act2 = QAction('(&2) Convert to MD', submenu,
triggered=lambda: self._clearHTML())
submenu.addAction(act2)
act3 = QAction('(&3) Mark as Markdown block', submenu,
triggered=lambda: self._wrapAsMarkdown())
submenu.addAction(act3)
if not isinstance(parent, QMenu):
submenu.popup(parent.mapToGlobal(parent.pos()))
return submenu
def onLoadNote(self, editor):
note = editor.note
if self._editAsMarkdownEnabled:
# Prevent default Enter behavior if as Markdown enabled
self.setEditAsMarkdownEnabled(self._editAsMarkdownEnabled) # initialization
# editor.web.eval("disableAmd();")
editor.web.eval("showMarkDownNotice();")
editor.web.eval("handleNoteAsMD();")
if self._disableMdDecoration:
editor.web.eval('removeMdDecoration()')
else:
# editor.web.eval("disableAmd();")
pass
self._updatePreview()
def setupButtons(self, buttons, editor):
"""Add buttons to editor"""
if not self._showButton:
return buttons
self._editorReference = editor
editor._links['amd-menu'] = self.toggleMarkdown
return buttons + [
editor._addButton(
CWD + '/' + ICON_FILE,
"amd-menu", "Edit as Markdown? ({})".format(self._shortcutButton),
toggleable=True, id='bt_tg_md')]
def setupShortcuts(self, scuts: list, editor):
scuts.append((self._shortcutButton, self.toggleMarkdown))
scuts.append((self._shortcutMenu, self._showCustomMenu))
def wrapEditorToggleBold(self, originalFn):
def onBold(*args):
if not self._editAsMarkdownEnabled:
return originalFn()
else:
self._editorReference.web.eval("wrap('**', '**');")
return
return onBold
def wrapEditorToggleItalic(self, originalFn):
def onItalic(*args):
if not self._editAsMarkdownEnabled:
return originalFn()
else:
self._editorReference.web.eval("wrap('_', '_');")
return
return onItalic
def _clearHTML(self, editor=None):
"""
Convert to Text (MD)
"""
Feedback.log('_convertToMD')
cur = self._editorReference.currentField
note = self._editorReference.note
newValue = self._converter.getTextFromHtml(note.fields[cur])
note.fields[cur] = newValue
self._editorReference.setNote(note)
def _convertToHTML(self, editor=None):
Feedback.log('_convertToHTML')
cur = self._editorReference.currentField
note = self._editorReference.note
newValue = self._converter.convertMarkdown(note.fields[cur])
note.fields[cur] = newValue
self._editorReference.setNote(note)
def setEditAsMarkdownEnabled(self, value: bool):
self._editAsMarkdownEnabled = value
self._editorReference.web.eval('editAsMarkdownEnabled = {};'.format(str(value).lower())) # check is it needed?
def _wrapAsMarkdown(self, editor=None):
if not editor:
if not self._editorReference:
return
editor = self._editorReference
editor.web.eval("wrap('<amd>', '</amd>');")
Feedback.showInfo('Anki Markdown :: Added successfully')
def _updatePreview(self):
if not (self._editorReference and self._editorReference.note):
return
note = self._editorReference.note
self._editorReference.web.eval('cleanPreview();')
for fld, val in list(note.items()):
self._editorReference.web.eval('setFieldPreview("%s", `%s`);' % (fld,
self._converter.convertMarkdown(val)))
def _isEditing(self):
"""Checks anki current state. Whether is editing or not"""
return True if (self._editorReference) else False
# ------------------------------ Review ------------------------------------------
def processField(self, inpt: str, card, kind: str) -> str:
Feedback.log('processField')
if self._converter.isAmdAreaPresent(inpt):
res = self._converter.convertAmdAreasToMD(inpt, isTypeMode=True)
AppHolder.app.web.eval("""
%s
""" % JS_STYLE_APPENDER.format(self._cssContent))
if self._disableMdDecoration:
AppHolder.app.web.eval('removeMdDecoration()')
return res
return inpt
# --------------------------------------- Browser ------------------------------------
def _setupBrowserMenu(self, browser):
submenu = QMenu('&Markdown Addon', browser.form.menu_Notes)
act1 = QAction('(&1) Convert to HTML', submenu,
triggered=lambda: self._batchConvertHTML(browser))
submenu.addAction(act1)
act2 = QAction('(&2) Convert to MD', submenu,
triggered=lambda: self._batchConvertMD(browser))
submenu.addAction(act2)
browser.form.menu_Notes.addMenu(submenu)
def _batchConvertHTML(self, browser):
selectedItens = browser.selectedNotes()
self._batchService.convertNotesToHTML(selectedItens)
def _batchConvertMD(self, browser):
selectedItens = browser.selectedNotes()
self._batchService.convertNotesToMD(selectedItens)
# ---------------------------------- Events listeners ---------------------------------
|
c61284a4e0fe31b67d055ddeb785d02053ce4f92
|
8191864909f7d8b896f97ff353ce475757f4fbd1
|
/deps/spidershim/spidermonkey/config/expandlibs.py
|
ac06c432f2e466a389cc60b842f74c4109aecce5
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unicode",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-unknown-license-reference",
"Artistic-2.0",
"NAIST-2003",
"NTP",
"ICU",
"ISC",
"Zlib",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
mozilla/spidernode
|
df5a1e58b54da5bfbc35a585fc4bbb15678f8ca0
|
aafa9e5273f954f272bb4382fc007af14674b4c2
|
refs/heads/master
| 2023-08-26T19:45:35.703738
| 2019-06-18T19:01:53
| 2019-06-18T19:01:53
| 55,816,013
| 618
| 69
|
NOASSERTION
| 2019-06-18T18:59:28
| 2016-04-08T23:38:28
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 5,509
|
py
|
expandlibs.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
'''Expandlibs is a system that allows to replace some libraries with a
descriptor file containing some linking information about them.
The descriptor file format is as follows:
---8<-----
OBJS = a.o b.o ...
LIBS = libfoo.a libbar.a ...
--->8-----
(In the example above, OBJ_SUFFIX is o and LIB_SUFFIX is a).
Expandlibs also canonicalizes how to pass libraries to the linker, such
that only the ${LIB_PREFIX}${ROOT}.${LIB_SUFFIX} form needs to be used:
given a list of files, expandlibs will replace items with the form
${LIB_PREFIX}${ROOT}.${LIB_SUFFIX} following these rules:
- If a ${DLL_PREFIX}${ROOT}.${DLL_SUFFIX} or
${DLL_PREFIX}${ROOT}.${IMPORT_LIB_SUFFIX} file exists, use that instead
- If the ${LIB_PREFIX}${ROOT}.${LIB_SUFFIX} file exists, use it
- If a ${LIB_PREFIX}${ROOT}.${LIB_SUFFIX}.${LIB_DESC_SUFFIX} file exists,
replace ${LIB_PREFIX}${ROOT}.${LIB_SUFFIX} with the OBJS and LIBS the
descriptor contains. And for each of these LIBS, also apply the same
rules.
'''
from __future__ import with_statement
import sys, os, errno
import expandlibs_config as conf
def ensureParentDir(file):
'''Ensures the directory parent to the given file exists'''
dir = os.path.dirname(file)
if dir and not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError, error:
if error.errno != errno.EEXIST:
raise
def relativize(path):
'''Returns a path relative to the current working directory, if it is
shorter than the given path'''
def splitpath(path):
dir, file = os.path.split(path)
if os.path.splitdrive(dir)[1] == os.sep:
return [file]
return splitpath(dir) + [file]
if not os.path.exists(path):
return path
curdir = splitpath(os.path.abspath(os.curdir))
abspath = splitpath(os.path.abspath(path))
while curdir and abspath and curdir[0] == abspath[0]:
del curdir[0]
del abspath[0]
if not curdir and not abspath:
return '.'
relpath = os.path.join(*[os.pardir for i in curdir] + abspath)
if len(path) > len(relpath):
return relpath
return path
def isObject(path):
'''Returns whether the given path points to an object file, that is,
ends with OBJ_SUFFIX or .i_o'''
return os.path.splitext(path)[1] in [conf.OBJ_SUFFIX, '.i_o']
def isDynamicLib(path):
'''Returns whether the given path points to a dynamic library, that is,
ends with DLL_SUFFIX.'''
# On mac, the xul library is named XUL, instead of libxul.dylib. Assume any
# file by that name is a dynamic library.
return os.path.splitext(path)[1] == conf.DLL_SUFFIX or os.path.basename(path) == 'XUL'
class LibDescriptor(dict):
KEYS = ['OBJS', 'LIBS']
def __init__(self, content=None):
'''Creates an instance of a lib descriptor, initialized with contents
from a list of strings when given. This is intended for use with
file.readlines()'''
if isinstance(content, list) and all([isinstance(item, str) for item in content]):
pass
elif content is not None:
raise TypeError("LibDescriptor() arg 1 must be None or a list of strings")
super(LibDescriptor, self).__init__()
for key in self.KEYS:
self[key] = []
if not content:
return
for key, value in [(s.strip() for s in item.split('=', 2)) for item in content if item.find('=') >= 0]:
if key in self.KEYS:
self[key] = value.split()
def __str__(self):
'''Serializes the lib descriptor'''
return '\n'.join('%s = %s' % (k, ' '.join(self[k])) for k in self.KEYS if len(self[k]))
class ExpandArgs(list):
def __init__(self, args):
'''Creates a clone of the |args| list and performs file expansion on
each item it contains'''
super(ExpandArgs, self).__init__()
self._descs = set()
for arg in args:
self += self._expand(arg)
def _expand(self, arg):
'''Internal function doing the actual work'''
(root, ext) = os.path.splitext(arg)
if ext != conf.LIB_SUFFIX or not os.path.basename(root).startswith(conf.LIB_PREFIX):
return [relativize(arg)]
if conf.LIB_PREFIX:
dll = root.replace(conf.LIB_PREFIX, conf.DLL_PREFIX, 1) + conf.DLL_SUFFIX
else:
dll = root + conf.DLL_SUFFIX
if os.path.exists(dll):
if conf.IMPORT_LIB_SUFFIX:
return [relativize(root + conf.IMPORT_LIB_SUFFIX)]
else:
return [relativize(dll)]
return self._expand_desc(arg)
def _expand_desc(self, arg):
'''Internal function taking care of lib descriptor expansion only'''
desc = os.path.abspath(arg + conf.LIBS_DESC_SUFFIX)
if os.path.exists(desc):
if desc in self._descs:
return []
self._descs.add(desc)
with open(desc, 'r') as f:
desc = LibDescriptor(f.readlines())
objs = [relativize(o) for o in desc['OBJS']]
for lib in desc['LIBS']:
objs += self._expand(lib)
return objs
return [relativize(arg)]
if __name__ == '__main__':
print " ".join(ExpandArgs(sys.argv[1:]))
|
fe6e0437ee373904a72380e177a8e4ac843fa739
|
de549a8e38e76e61e5a4a8bb0ebc4fb6d2933d1a
|
/tests/types.py
|
dfae4396d8c38b21322ca6a8fe7104ce08b0c6bd
|
[
"BSD-3-Clause"
] |
permissive
|
zeroSteiner/rule-engine
|
464acf999d85f385423167fa57da0d65a18016ee
|
29a37f293a48d3c469c9280a057d035b112555d3
|
refs/heads/master
| 2023-08-04T12:53:42.211220
| 2023-08-03T23:17:57
| 2023-08-03T23:17:57
| 127,063,639
| 331
| 49
|
BSD-3-Clause
| 2023-08-01T23:19:01
| 2018-03-28T00:58:06
|
Python
|
UTF-8
|
Python
| false
| false
| 15,242
|
py
|
types.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# tests/types.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import datetime
import sys
import typing
import unittest
import rule_engine.types as types
__all__ = ('DataTypeTests', 'MetaDataTypeTests', 'ValueIsTests')
DataType = types.DataType
class DataTypeTests(unittest.TestCase):
class _UnsupportedType(object):
pass
def test_data_type_collections(self):
with self.assertRaises(TypeError):
types._CollectionDataTypeDef('TEST', float)
def test_data_type_equality_array(self):
dt1 = DataType.ARRAY(DataType.STRING)
self.assertIs(dt1.value_type, DataType.STRING)
self.assertEqual(dt1, DataType.ARRAY(DataType.STRING))
self.assertNotEqual(dt1, DataType.ARRAY)
self.assertNotEqual(dt1, DataType.ARRAY(DataType.STRING, value_type_nullable=False))
def test_data_type_equality_function(self):
dt1 = DataType.FUNCTION('test', return_type=DataType.FLOAT, argument_types=(), minimum_arguments=0)
self.assertEqual(dt1.value_name, 'test')
self.assertEqual(dt1, DataType.FUNCTION('otherTest', return_type=DataType.FLOAT, argument_types=(), minimum_arguments=0))
self.assertNotEqual(dt1, DataType.NULL)
self.assertNotEqual(dt1, DataType.FUNCTION('test', return_type=DataType.NULL, argument_types=(), minimum_arguments=0))
self.assertNotEqual(dt1, DataType.FUNCTION('test', return_type=DataType.FLOAT, argument_types=(DataType.FLOAT,), minimum_arguments=0))
self.assertNotEqual(dt1, DataType.FUNCTION('otherTest', return_type=DataType.FLOAT, minimum_arguments=1))
def test_data_type_equality_mapping(self):
dt1 = DataType.MAPPING(DataType.STRING)
self.assertIs(dt1.key_type, DataType.STRING)
self.assertEqual(dt1, DataType.MAPPING(DataType.STRING))
self.assertNotEqual(dt1, DataType.MAPPING)
self.assertNotEqual(dt1, DataType.MAPPING(DataType.STRING, value_type=DataType.STRING))
self.assertNotEqual(dt1, DataType.MAPPING(DataType.STRING, value_type_nullable=False))
def test_data_type_equality_set(self):
dt1 = DataType.SET(DataType.STRING)
self.assertIs(dt1.value_type, DataType.STRING)
self.assertEqual(dt1, DataType.SET(DataType.STRING))
self.assertNotEqual(dt1, DataType.SET)
self.assertNotEqual(dt1, DataType.SET(DataType.STRING, value_type_nullable=False))
def test_data_type_from_name(self):
self.assertIs(DataType.from_name('ARRAY'), DataType.ARRAY)
self.assertIs(DataType.from_name('BOOLEAN'), DataType.BOOLEAN)
self.assertIs(DataType.from_name('DATETIME'), DataType.DATETIME)
self.assertIs(DataType.from_name('TIMEDELTA'), DataType.TIMEDELTA)
self.assertIs(DataType.from_name('FLOAT'), DataType.FLOAT)
self.assertIs(DataType.from_name('MAPPING'), DataType.MAPPING)
self.assertIs(DataType.from_name('NULL'), DataType.NULL)
self.assertIs(DataType.from_name('SET'), DataType.SET)
self.assertIs(DataType.from_name('STRING'), DataType.STRING)
self.assertIs(DataType.from_name('FUNCTION'), DataType.FUNCTION)
def test_data_type_from_name_error(self):
with self.assertRaises(TypeError):
DataType.from_name(1)
with self.assertRaises(ValueError):
DataType.from_name('FOOBAR')
def test_data_type_from_type(self):
self.assertIs(DataType.from_type(list), DataType.ARRAY)
self.assertIs(DataType.from_type(tuple), DataType.ARRAY)
self.assertIs(DataType.from_type(bool), DataType.BOOLEAN)
self.assertIs(DataType.from_type(datetime.date), DataType.DATETIME)
self.assertIs(DataType.from_type(datetime.datetime), DataType.DATETIME)
self.assertIs(DataType.from_type(datetime.timedelta), DataType.TIMEDELTA)
self.assertIs(DataType.from_type(float), DataType.FLOAT)
self.assertIs(DataType.from_type(int), DataType.FLOAT)
self.assertIs(DataType.from_type(dict), DataType.MAPPING)
self.assertIs(DataType.from_type(type(None)), DataType.NULL)
self.assertIs(DataType.from_type(set), DataType.SET)
self.assertIs(DataType.from_type(str), DataType.STRING)
self.assertIs(DataType.from_type(type(lambda: None)), DataType.FUNCTION)
def test_data_type_from_type_hint(self):
# simple compound tests
self.assertEqual(DataType.from_type(typing.List[str]), DataType.ARRAY(DataType.STRING))
self.assertEqual(DataType.from_type(typing.Tuple[str]), DataType.ARRAY(DataType.UNDEFINED))
self.assertEqual(DataType.from_type(typing.Set[int]), DataType.SET(DataType.FLOAT))
self.assertEqual(DataType.from_type(typing.Dict[str, str]), DataType.MAPPING(DataType.STRING, DataType.STRING))
# complex compound tests
self.assertEqual(DataType.from_type(typing.List[list]), DataType.ARRAY(DataType.ARRAY))
self.assertEqual(DataType.from_type(
typing.Dict[str, typing.Dict[str, datetime.datetime]]),
DataType.MAPPING(DataType.STRING, DataType.MAPPING(DataType.STRING, DataType.DATETIME)
))
if sys.version_info >= (3, 9):
self.assertEqual(DataType.from_type(list[str]), DataType.ARRAY(DataType.STRING))
self.assertEqual(DataType.from_type(tuple[str]), DataType.ARRAY(DataType.UNDEFINED))
self.assertEqual(DataType.from_type(set[int]), DataType.SET(DataType.FLOAT))
self.assertEqual(DataType.from_type(dict[str, str]), DataType.MAPPING(DataType.STRING, DataType.STRING))
self.assertEqual(DataType.from_type(list[list]), DataType.ARRAY(DataType.ARRAY))
self.assertEqual(DataType.from_type(
dict[str, dict[str, datetime.datetime]]),
DataType.MAPPING(DataType.STRING, DataType.MAPPING(DataType.STRING, DataType.DATETIME)
))
def test_data_type_from_type_error(self):
with self.assertRaisesRegex(TypeError, r'^from_type argument 1 must be a type or a type hint, not _UnsupportedType$'):
DataType.from_type(self._UnsupportedType())
with self.assertRaisesRegex(ValueError, r'^can not map python type \'_UnsupportedType\' to a compatible data type$'):
DataType.from_type(self._UnsupportedType)
def test_data_type_from_value_compound_array(self):
for value in [list(), range(0), tuple()]:
value = DataType.from_value(value)
self.assertEqual(value, DataType.ARRAY)
self.assertIs(value.value_type, DataType.UNDEFINED)
self.assertIs(value.iterable_type, DataType.UNDEFINED)
value = DataType.from_value(['test'])
self.assertEqual(value, DataType.ARRAY(DataType.STRING))
self.assertIs(value.value_type, DataType.STRING)
self.assertIs(value.iterable_type, DataType.STRING)
def test_data_type_from_value_compound_mapping(self):
value = DataType.from_value({})
self.assertEqual(value, DataType.MAPPING)
self.assertIs(value.key_type, DataType.UNDEFINED)
self.assertIs(value.value_type, DataType.UNDEFINED)
self.assertIs(value.iterable_type, DataType.UNDEFINED)
value = DataType.from_value({'one': 1})
self.assertEqual(value, DataType.MAPPING(DataType.STRING, DataType.FLOAT))
self.assertIs(value.key_type, DataType.STRING)
self.assertIs(value.value_type, DataType.FLOAT)
self.assertIs(value.iterable_type, DataType.STRING)
def test_data_type_from_value_compound_set(self):
value = DataType.from_value(set())
self.assertEqual(value, DataType.SET)
self.assertIs(value.value_type, DataType.UNDEFINED)
self.assertIs(value.iterable_type, DataType.UNDEFINED)
value = DataType.from_value({'test'})
self.assertEqual(value, DataType.SET(DataType.STRING))
self.assertIs(value.value_type, DataType.STRING)
self.assertIs(value.iterable_type, DataType.STRING)
def test_data_type_from_value_scalar(self):
self.assertIs(DataType.from_value(False), DataType.BOOLEAN)
self.assertIs(DataType.from_value(datetime.date.today()), DataType.DATETIME)
self.assertIs(DataType.from_value(datetime.datetime.now()), DataType.DATETIME)
self.assertIs(DataType.from_value(datetime.timedelta()), DataType.TIMEDELTA)
self.assertIs(DataType.from_value(0), DataType.FLOAT)
self.assertIs(DataType.from_value(0.0), DataType.FLOAT)
self.assertIs(DataType.from_value(None), DataType.NULL)
self.assertIs(DataType.from_value(''), DataType.STRING)
self.assertIs(DataType.from_value(lambda: None), DataType.FUNCTION)
self.assertIs(DataType.from_value(print), DataType.FUNCTION)
def test_data_type_from_value_error(self):
with self.assertRaisesRegex(TypeError, r'^can not map python type \'_UnsupportedType\' to a compatible data type$'):
DataType.from_value(self._UnsupportedType())
def test_data_type_function(self):
with self.assertRaises(TypeError, msg='argument_types should be a sequence'):
DataType.FUNCTION('test', argument_types=DataType.NULL)
with self.assertRaises(ValueError, msg='minimum_arguments should be less than or equal to the length of argument_types'):
DataType.FUNCTION('test', argument_types=(), minimum_arguments=1)
def test_data_type_definitions_describe_themselves(self):
for name in DataType:
if name == 'UNDEFINED':
continue
data_type = getattr(DataType, name)
self.assertRegex(repr(data_type), 'name=' + name)
class MetaDataTypeTests(unittest.TestCase):
def test_data_type_is_iterable(self):
self.assertGreater(len(DataType), 0)
for name in DataType:
self.assertIsInstance(name, str)
self.assertRegex(name, r'^[A-Z]+$')
def test_data_type_is_compatible(self):
def _is_compat(*args):
return self.assertTrue(DataType.is_compatible(*args))
def _is_not_compat(*args):
return self.assertFalse(DataType.is_compatible(*args))
_is_compat(DataType.STRING, DataType.STRING)
_is_compat(DataType.STRING, DataType.UNDEFINED)
_is_compat(DataType.UNDEFINED, DataType.STRING)
_is_compat(DataType.UNDEFINED, DataType.ARRAY)
_is_compat(DataType.ARRAY, DataType.ARRAY(DataType.STRING))
_is_not_compat(DataType.STRING, DataType.ARRAY)
_is_not_compat(DataType.STRING, DataType.NULL)
_is_not_compat(DataType.ARRAY(DataType.STRING), DataType.ARRAY(DataType.FLOAT))
_is_compat(DataType.MAPPING, DataType.MAPPING)
_is_compat(
DataType.MAPPING(DataType.STRING),
DataType.MAPPING(DataType.STRING, value_type=DataType.ARRAY)
)
_is_compat(
DataType.MAPPING(DataType.STRING, value_type=DataType.ARRAY),
DataType.MAPPING(DataType.STRING, value_type=DataType.ARRAY(DataType.STRING))
)
_is_not_compat(
DataType.MAPPING(DataType.STRING),
DataType.MAPPING(DataType.FLOAT)
)
_is_not_compat(
DataType.MAPPING(DataType.STRING, value_type=DataType.STRING),
DataType.MAPPING(DataType.STRING, value_type=DataType.FLOAT)
)
with self.assertRaises(TypeError):
DataType.is_compatible(DataType.STRING, None)
def test_data_type_is_compatible_function(self):
def _is_compat(*args):
return self.assertTrue(DataType.is_compatible(*args))
def _is_not_compat(*args):
return self.assertFalse(DataType.is_compatible(*args))
# the function name doesn't matter, it's only for reporting
_is_compat(
DataType.FUNCTION('functionA'),
DataType.FUNCTION('functionB')
)
# return type is UNDEFINED by default which should be compatible
_is_compat(
DataType.FUNCTION('test', return_type=DataType.FLOAT),
DataType.FUNCTION('test')
)
# argument types are UNDEFINED by default which should be compatible
_is_compat(
DataType.FUNCTION('test', argument_types=(DataType.STRING,), minimum_arguments=1),
DataType.FUNCTION('test', minimum_arguments=1)
)
# minimum arguments defaults to the number of arguments
_is_compat(
DataType.FUNCTION('test', argument_types=(DataType.STRING,), minimum_arguments=1),
DataType.FUNCTION('test', argument_types=(DataType.STRING,))
)
_is_not_compat(
DataType.FUNCTION('test', return_type=DataType.FLOAT),
DataType.FUNCTION('test', return_type=DataType.STRING)
)
_is_not_compat(
DataType.FUNCTION('test', argument_types=(DataType.STRING,)),
DataType.FUNCTION('test', argument_types=())
)
_is_not_compat(
DataType.FUNCTION('test', argument_types=(DataType.FLOAT,)),
DataType.FUNCTION('test', argument_types=(DataType.STRING,))
)
_is_not_compat(
DataType.FUNCTION('test', minimum_arguments=0),
DataType.FUNCTION('test', minimum_arguments=1)
)
def test_data_type_is_definition(self):
self.assertTrue(DataType.is_definition(DataType.ARRAY))
self.assertFalse(DataType.is_definition(1))
self.assertFalse(DataType.is_definition(None))
def test_data_type_supports_contains(self):
self.assertIn('STRING', DataType)
def test_data_type_supports_getitem(self):
dt = DataType['STRING']
self.assertEqual(dt, DataType.STRING)
inf = float('inf')
nan = float('nan')
class ValueIsTests(unittest.TestCase):
_Case = collections.namedtuple('_Case', ('value', 'numeric', 'real', 'integer', 'natural'))
cases = (
# value numeric real integer natural
_Case(-inf, True, False, False, False),
_Case(-1.5, True, True, False, False),
_Case(-1.0, True, True, True, False),
_Case(-1, True, True, True, False),
_Case(0, True, True, True, True ),
_Case(1, True, True, True, True ),
_Case(1.0, True, True, True, True ),
_Case(1.5, True, True, False, False),
_Case(inf, True, False, False, False),
_Case(nan, True, False, False, False),
_Case(True, False, False, False, False),
_Case(False, False, False, False, False),
_Case('', False, False, False, False),
_Case(None, False, False, False, False),
)
def test_value_is_integer_number(self):
for case in self.cases:
self.assertEqual(types.is_integer_number(case.value), case.integer)
def test_value_is_natural_number(self):
for case in self.cases:
self.assertEqual(types.is_natural_number(case.value), case.natural)
def test_value_is_numeric(self):
for case in self.cases:
self.assertEqual(types.is_numeric(case.value), case.numeric)
def test_value_is_real_number(self):
for case in self.cases:
self.assertEqual(types.is_real_number(case.value), case.real)
if __name__ == '__main__':
unittest.main()
|
d8f744f81327eeaed2868ff8b8623f3c7d8fbd52
|
1bb42bac177fb4e979faa441363c27cb636a43aa
|
/generalization/synthesization/gmm_embedding_test.py
|
cf9cfc16f64cd1107e14cb2970aad9fc4035c489
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
google-research/federated
|
a6040e80fa0fbf533e0d665c66a9bc549d208b3d
|
329e60fa56b87f691303638ceb9dfa1fc5083953
|
refs/heads/master
| 2023-08-28T13:10:10.885505
| 2023-08-22T23:06:08
| 2023-08-22T23:06:40
| 295,559,343
| 595
| 187
|
Apache-2.0
| 2022-05-12T08:42:53
| 2020-09-14T23:09:07
|
Python
|
UTF-8
|
Python
| false
| false
| 5,547
|
py
|
gmm_embedding_test.py
|
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gmm_embedding."""
import collections
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from generalization.synthesization import gmm_embedding
tfd = tfp.distributions
def _build_fake_dataset(num_elems=1000) -> tf.data.Dataset:
rng = np.random.default_rng(1)
return tf.data.Dataset.from_tensor_slices(
collections.OrderedDict(
image=np.array(
rng.integers(0, 256, (num_elems, 4, 4, 3)), dtype=np.uint8),
label=np.array(rng.integers(0, 4, (num_elems,)), dtype=np.int64)))
def _build_fake_pretrained_model() -> tf.keras.Model:
return tf.keras.models.Sequential([tf.keras.layers.Flatten()])
def _fake_tril(dim):
a = tf.random.normal((dim, dim))
return tf.linalg.cholesky(
tf.matmul(a, a, transpose_a=True) + tf.eye(dim) * 1e-6)
def _pairwise_kl_divergence_between_multivariate_normal_tril_element_wise(
means_1: tf.Tensor, trils_1: tf.Tensor, means_2: tf.Tensor,
trils_2: tf.Tensor) -> tf.Tensor:
"""Compute pairwise KL divergence matrix element-wise."""
pairwise_matrix = tf.Variable(
tf.zeros((means_1.shape[0], means_2.shape[0]), dtype=means_1.dtype))
for i in range(means_1.shape[0]):
for j in range(means_2.shape[0]):
pairwise_matrix[i, j].assign(
tfd.kl_divergence(
tfd.MultivariateNormalTriL(means_1[i, :], trils_1[i, :, :]),
tfd.MultivariateNormalTriL(means_2[j, :], trils_2[j, :, :])))
return tf.convert_to_tensor(pairwise_matrix)
class PairwiseKLTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(((f'batch_size = {batch_size}', batch_size)
for batch_size in [1, 2, 3, 5, 7, 11, 13]))
def test_pairwise_kl_divergence_in_batch_returns_the_same_result_as_element_wise(
self, batch_size):
num_dist_1 = 7
num_dist_2 = 11
dim = 13
means_1 = tf.random.normal((num_dist_1, dim))
means_2 = tf.random.normal((num_dist_2, dim))
trils_1 = tf.stack([_fake_tril(dim) for _ in range(num_dist_1)])
trils_2 = tf.stack([_fake_tril(dim) for _ in range(num_dist_2)])
obtained_pairwise_matrix = gmm_embedding.pairwise_kl_divergence_between_multivariate_normal_tril_in_batch(
means_1, trils_1, means_2, trils_2, batch_size=batch_size)
expected_pairwise_matrix = _pairwise_kl_divergence_between_multivariate_normal_tril_element_wise(
means_1, trils_1, means_2, trils_2)
self.assertAllClose(expected_pairwise_matrix, obtained_pairwise_matrix)
class GmmEmbeddingTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('case 1', None, True),
('case 2', 4, True),
('case 3', None, False),
('case 4', 4, False),
)
def test_gmm_embedding(self, pca_components, use_progressive_matching):
dataset = _build_fake_dataset()
num_clients = 4
cd = gmm_embedding.synthesize_by_gmm_over_pretrained_embedding(
dataset,
pretrained_model_builder=_build_fake_pretrained_model,
num_clients=num_clients,
pca_components=pca_components,
use_progressive_matching=use_progressive_matching)
self.assertCountEqual(cd.client_ids, list(map(str, range(num_clients))))
for client_id in cd.client_ids:
client_ds = cd.create_tf_dataset_for_client(client_id)
self.assertEqual(client_ds.element_spec, dataset.element_spec)
@parameterized.named_parameters(
('case 1', None, True),
('case 2', 4, True),
('case 3', None, False),
('case 4', 4, False),
)
def test_gmm_embedding_use_seed(self, pca_components,
use_progressive_matching):
dataset = _build_fake_dataset()
num_clients = 4
cd1 = gmm_embedding.synthesize_by_gmm_over_pretrained_embedding(
dataset,
pretrained_model_builder=_build_fake_pretrained_model,
num_clients=num_clients,
pca_components=pca_components,
use_progressive_matching=use_progressive_matching,
seed=1)
cd2 = gmm_embedding.synthesize_by_gmm_over_pretrained_embedding(
dataset,
pretrained_model_builder=_build_fake_pretrained_model,
num_clients=num_clients,
pca_components=pca_components,
use_progressive_matching=use_progressive_matching,
seed=1)
self.assertCountEqual(cd1.client_ids, list(map(str, range(num_clients))))
self.assertCountEqual(cd2.client_ids, list(map(str, range(num_clients))))
for client_id in cd1.client_ids:
client_ds1_list = list(cd1.create_tf_dataset_for_client(client_id))
client_ds2_list = list(cd2.create_tf_dataset_for_client(client_id))
self.assertEqual(len(client_ds1_list), len(client_ds2_list))
for elem1, elem2 in zip(client_ds1_list, client_ds2_list):
self.assertAllEqual(elem1['image'], elem2['image'])
if __name__ == '__main__':
tf.test.main()
|
5bd1a24b3e636e86100f2222bbd2831dc78d11b1
|
7a15271c7cddd199f43555469a67d26ce0f60836
|
/baselines/diabetic_retinopathy_detection/utils/data_utils.py
|
fbf8a583e526423175a1b5e20ebf88e20555307c
|
[
"Apache-2.0"
] |
permissive
|
google/uncertainty-baselines
|
b2c339d918bf3949ee066f9eafa6b51232a2ac3d
|
f5f6f50f82bd441339c9d9efbef3f09e72c5fef6
|
refs/heads/main
| 2023-09-02T13:59:26.355288
| 2023-08-14T16:35:22
| 2023-08-14T16:36:11
| 280,026,201
| 1,235
| 198
|
Apache-2.0
| 2023-09-11T22:21:48
| 2020-07-16T01:54:32
|
Python
|
UTF-8
|
Python
| false
| false
| 11,945
|
py
|
data_utils.py
|
# coding=utf-8
# Copyright 2023 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Diabetic Retinopathy Data Loading utils."""
# pylint: disable=g-bare-generic
# pylint: disable=g-doc-args
# pylint: disable=g-doc-return-or-yield
# pylint: disable=g-importing-member
# pylint: disable=g-no-space-after-docstring-summary
# pylint: disable=g-short-docstring-punctuation
# pylint: disable=logging-format-interpolation
# pylint: disable=logging-fstring-interpolation
# pylint: disable=missing-function-docstring
import logging
import tensorflow_datasets as tfds
import uncertainty_baselines as ub
def load_kaggle_severity_shift_dataset(train_batch_size,
eval_batch_size,
flags,
strategy,
load_for_eval=False):
"""Partitioning of the Kaggle/EyePACS Diabetic Retinopathy dataset to hold out certain clinical severity levels as OOD.
Optionally exclude train split (e.g., loading for evaluation) in flags.
See runscripts for more information on loading options.
Args:
train_batch_size: int.
eval_batch_size: int.
flags: FlagValues, runscript flags.
strategy: tf.distribute strategy, used to distribute datasets.
load_for_eval: Bool, if True, does not truncate the last batch (for
standardized evaluation).
Returns:
Dict of datasets, Dict of number of steps per dataset.
"""
assert flags.use_validation
data_dir = flags.data_dir
load_train_split = flags.load_train_split
# Using the decision threshold between severity levels 0 and 1, we define
# the in-domain (training) task as
# Binary classification between examples with severity levels 0 and 1
# We consider examples with levels 2,3,4 as OOD
# This leaves a few thousand examples previously segmented in the Kaggle
# training set (the 2,3,4 ones) which we now group into the OOD test set.
#
# We have split sizes:
if flags.dr_decision_threshold == 'mild':
split_to_num_examples = {
'in_domain_validation': 8850,
'ood_validation': 2056,
'in_domain_test': 34445,
'ood_test': 15098
}
if train_batch_size is not None:
split_to_num_examples['train'] = 28253
elif flags.dr_decision_threshold == 'moderate':
split_to_num_examples = {
'in_domain_validation': 10429,
'ood_validation': 477,
'in_domain_test': 40727,
'ood_test': 3524
}
if train_batch_size is not None:
split_to_num_examples['train'] = 33545
else:
raise NotImplementedError(
f'Unknown decision threshold {flags.dr_decision_threshold}.')
split_to_batch_size = {
'in_domain_validation': eval_batch_size,
'ood_validation': eval_batch_size,
'in_domain_test': eval_batch_size,
'ood_test': eval_batch_size
}
if train_batch_size is not None:
split_to_batch_size['train'] = train_batch_size
split_to_steps_per_epoch = {
split: num_examples // split_to_batch_size[split]
for split, num_examples in split_to_num_examples.items()
}
splits_to_return = ['in_domain_validation', 'ood_validation']
if load_train_split:
splits_to_return = ['train'] + splits_to_return
if flags.use_test:
splits_to_return = splits_to_return + ['in_domain_test', 'ood_test']
dataset_name = (
f'diabetic_retinopathy_severity_shift_{flags.dr_decision_threshold}')
split_to_dataset = {}
for split in splits_to_return:
dataset_builder = ub.datasets.get(
dataset_name,
split=split,
data_dir=data_dir,
cache=(flags.cache_eval_datasets and split != 'train'),
drop_remainder=not load_for_eval,
builder_config=f'{dataset_name}/{flags.preproc_builder_config}')
dataset = dataset_builder.load(batch_size=split_to_batch_size[split])
if strategy is not None:
dataset = strategy.experimental_distribute_dataset(dataset)
split_to_dataset[split] = dataset
return split_to_dataset, split_to_steps_per_epoch
def load_kaggle_aptos_country_shift_dataset(train_batch_size,
eval_batch_size,
flags,
strategy,
load_for_eval=False):
"""Full Kaggle/EyePACS Diabetic Retinopathy dataset, including OOD validation/test sets (APTOS).
Optionally exclude train split (e.g., loading for evaluation) in flags.
See runscripts for more information on loading options.
Args:
train_batch_size: int.
eval_batch_size: int.
flags: FlagValues, runscript flags.
strategy: tf.distribute strategy, used to distribute datasets.
load_for_eval: Bool, if True, does not truncate the last batch.
Returns:
Dict of datasets, Dict of number of steps per dataset.
"""
data_dir = flags.data_dir
load_train_split = flags.load_train_split
# * Load Steps Per Epoch for Each Dataset *
split_to_steps_per_epoch = {}
# As per the Kaggle challenge, we have split sizes for the EyePACS subsets:
# train: 35,126
# validation: 10,906
# test: 42,670
ds_info = tfds.builder('diabetic_retinopathy_detection').info
if load_train_split:
split_to_steps_per_epoch['train'] = (
ds_info.splits['train'].num_examples // train_batch_size)
split_to_steps_per_epoch['in_domain_validation'] = (
ds_info.splits['validation'].num_examples // eval_batch_size)
split_to_steps_per_epoch['in_domain_test'] = (
ds_info.splits['test'].num_examples // eval_batch_size)
# APTOS Evaluation Data
split_to_steps_per_epoch['ood_validation'] = 733 // eval_batch_size
split_to_steps_per_epoch['ood_test'] = 2929 // eval_batch_size
# * Load Datasets *
split_to_dataset = {}
dr_dataset_name = 'ub_diabetic_retinopathy_detection'
# Load validation data
dataset_validation_builder = ub.datasets.get(
dr_dataset_name,
split='validation',
data_dir=data_dir,
is_training=not flags.use_validation,
decision_threshold=flags.dr_decision_threshold,
cache=flags.cache_eval_datasets,
drop_remainder=not load_for_eval,
builder_config=f'{dr_dataset_name}/{flags.preproc_builder_config}')
validation_batch_size = (
eval_batch_size if flags.use_validation else train_batch_size)
dataset_validation = dataset_validation_builder.load(
batch_size=validation_batch_size)
# If `flags.use_validation`, then we distribute the validation dataset
# independently and add as a separate dataset.
# Otherwise, we concatenate it with the training data below.
if flags.use_validation:
# Load APTOS validation dataset
aptos_validation_builder = ub.datasets.get(
'aptos',
split='validation',
data_dir=data_dir,
decision_threshold=flags.dr_decision_threshold,
cache=flags.cache_eval_datasets,
drop_remainder=not load_for_eval,
builder_config=f'aptos/{flags.preproc_builder_config}')
dataset_ood_validation = aptos_validation_builder.load(
batch_size=eval_batch_size)
if strategy is not None:
dataset_validation = strategy.experimental_distribute_dataset(
dataset_validation)
dataset_ood_validation = strategy.experimental_distribute_dataset(
dataset_ood_validation)
split_to_dataset['in_domain_validation'] = dataset_validation
split_to_dataset['ood_validation'] = dataset_ood_validation
if load_train_split:
# Load EyePACS train data
dataset_train_builder = ub.datasets.get(
dr_dataset_name,
split='train',
data_dir=data_dir,
decision_threshold=flags.dr_decision_threshold,
builder_config=f'{dr_dataset_name}/{flags.preproc_builder_config}')
dataset_train = dataset_train_builder.load(batch_size=train_batch_size)
if not flags.use_validation:
# TODO(nband): investigate validation dataset concat bug
# Note that this will not create any mixed batches of
# train and validation images.
# dataset_train = dataset_train.concatenate(dataset_validation)
raise NotImplementedError(
'Existing bug involving the number of steps not being adjusted after '
'concatenating the validation dataset. Needs verifying.')
if strategy is not None:
dataset_train = strategy.experimental_distribute_dataset(dataset_train)
split_to_dataset['train'] = dataset_train
if flags.use_test:
# In-Domain Test
dataset_test_builder = ub.datasets.get(
dr_dataset_name,
split='test',
data_dir=data_dir,
decision_threshold=flags.dr_decision_threshold,
cache=flags.cache_eval_datasets,
drop_remainder=not load_for_eval,
builder_config=f'{dr_dataset_name}/{flags.preproc_builder_config}')
dataset_test = dataset_test_builder.load(batch_size=eval_batch_size)
if strategy is not None:
dataset_test = strategy.experimental_distribute_dataset(dataset_test)
split_to_dataset['in_domain_test'] = dataset_test
# OOD (APTOS) Test
aptos_test_builder = ub.datasets.get(
'aptos',
split='test',
data_dir=data_dir,
decision_threshold=flags.dr_decision_threshold,
cache=flags.cache_eval_datasets,
drop_remainder=not load_for_eval,
builder_config=f'aptos/{flags.preproc_builder_config}')
dataset_ood_test = aptos_test_builder.load(batch_size=eval_batch_size)
if strategy is not None:
dataset_ood_test = strategy.experimental_distribute_dataset(
dataset_ood_test)
split_to_dataset['ood_test'] = dataset_ood_test
return split_to_dataset, split_to_steps_per_epoch
def load_dataset(train_batch_size,
eval_batch_size,
flags,
strategy,
load_for_eval=False):
"""Retrieve the in-domain and OOD datasets for a given distributional shift task in diabetic retinopathy.
Optionally exclude train split (e.g., loading for evaluation) in flags.
See runscripts for more information on loading options.
Args:
train_batch_size: int.
eval_batch_size: int.
flags: FlagValues, runscript flags.
strategy: tf.distribute strategy, used to distribute datasets.
load_for_eval: Bool, if True, does not truncate the last batch.
Returns:
Dict of datasets, Dict of number of steps per dataset.
"""
distribution_shift = flags.distribution_shift
if distribution_shift == 'severity':
datasets, steps = load_kaggle_severity_shift_dataset(
train_batch_size,
eval_batch_size,
flags=flags,
strategy=strategy,
load_for_eval=load_for_eval)
elif distribution_shift == 'aptos' or distribution_shift is None:
datasets, steps = load_kaggle_aptos_country_shift_dataset(
train_batch_size,
eval_batch_size,
flags=flags,
strategy=strategy,
load_for_eval=load_for_eval)
else:
raise NotImplementedError(
'Only support `severity` and `aptos` dataset partitions '
'(None defaults to APTOS).')
logging.info(f'Datasets using builder config {flags.preproc_builder_config}.')
logging.info(f'Successfully loaded the following dataset splits from the '
f'{distribution_shift} shift dataset: {list(datasets.keys())}')
return datasets, steps
|
5dd79e004748134b276777359b8d8b81eb03ad24
|
6436d1e6c23f9f43a8025889dc4414a3ad66acf2
|
/Assets/Python/pyHelper/Popup.py
|
9f4dc33eecb2960f1d4e3ac6fb8642d4429a7e10
|
[
"MIT"
] |
permissive
|
dguenms/Dawn-of-Civilization
|
b710195c4f46fe11d9229182c3b1e07b77f42637
|
a305e7846d085d6edf1e9c472e8dfceee1c07dd4
|
refs/heads/develop
| 2023-09-04T04:57:00.086384
| 2023-09-01T15:24:28
| 2023-09-01T15:24:28
| 45,362,597
| 116
| 121
|
MIT
| 2023-02-08T00:18:53
| 2015-11-01T23:52:28
|
C++
|
UTF-8
|
Python
| false
| false
| 10,559
|
py
|
Popup.py
|
## Sid Meier's Civilization 4
## Copyright Firaxis Games 2005
import CvUtil
from CvPythonExtensions import *
class PyPopup:
############## S E T U P F U N C T I O N S ###################
def __init__(self, popupID=-1, contextType=EventContextTypes.NO_EVENTCONTEXT, bDynamic = True):
self.ID = popupID
self.popup = CyPopup(popupID, contextType, bDynamic)
# Misc!
def isNone(self):
"verifies valid instance"
return self.popup.isNone()
def launch(self, bCreateOkButton = True, eState = PopupStates.POPUPSTATE_IMMEDIATE):
"sets attributes to the popup launch"
self.popup.launch(bCreateOkButton, eState)
def setUserData(self, userData):
"sets userData that is passed to OnOkClicked"
self.popup.setUserData(userData)
def setPosition(self, iX, iY):
"set the location of the popup"
self.popup.setPosition(iX, iY)
def setSize(self, iXS, iYS):
"sets the popups size"
self.popup.setSize(iXS, iYS)
def addSeparator(self):
"adds a separator"
self.popup.addSeparator()
# Header String
def setHeaderString( self, strText, uiFlags = CvUtil.FONT_CENTER_JUSTIFY ):
"sets the header text"
self.popup.setHeaderString( strText, uiFlags )
# Body String
def setBodyString( self, strText, uiFlags = CvUtil.FONT_LEFT_JUSTIFY ):
"sets the body text"
self.popup.setBodyString( strText, uiFlags )
def setPythonBodyString( self, strText, strName, strHelpText, uiFlags = CvUtil.FONT_LEFT_JUSTIFY ):
"PYTHON - sets a body string with help text"
self.popup.setPythonBodyString( strText, strName, strHelpText, uiFlags )
def setPythonBodyStringXY( self, strText, strName, strHelpText, iX = -1, iY = -1, uiFlags = CvUtil.FONT_LEFT_JUSTIFY ):
self.popup.setPythonBodyStringXY( strText, strName, strHelpText, iX, iY, uiFlags )
# Radio Buttons
def createRadioButtons( self, iNumButtons, iGroup = 0 ):
"creates radio buttons - only 1 set allowed per popup"
self.popup.createRadioButtons( iNumButtons, iGroup )
def setRadioButtonText( self, iRadioButtonID, strText, iGroup = 0 ):
"sets radio button text - 0 based IDs"
self.popup.setRadioButtonText( iRadioButtonID, strText, iGroup )
def createPythonRadioButtons( self, iNumButtons, iGroup = 0 ):
"creates python radio buttons - only 1 set allowed per popup"
self.popup.createPythonRadioButtons( iNumButtons, iGroup )
def setPythonRadioButtonText( self, iRadioButtonID, strText, strHelpText, iGroup = 0 ):
"sets python radio button text - 0 based IDs"
self.popup.setPythonRadioButtonText( iRadioButtonID, strText, strHelpText, iGroup )
# Check Boxes
def createCheckBoxes( self, iNumBoxes, iGroup = 0 ):
"creates check boxes - only 1 set allowed per popup"
self.popup.createCheckBoxes( iNumBoxes, iGroup )
def setCheckBoxText( self, iCheckBoxID, strText, iGroup = 0 ):
"sets the check box text"
self.popup.setCheckBoxText( iCheckBoxID, strText, iGroup )
def createPythonCheckBoxes( self, iNumBoxes, iGroup = 0 ):
"creates Python check boxes - only 1 set allowed per popup"
self.popup.createPythonCheckBoxes( iNumBoxes, iGroup )
def setPythonCheckBoxText( self, iCheckBoxID, strText, strHelpText, iGroup = 0 ):
"sets the Python check box text"
self.popup.setPythonCheckBoxText( iCheckBoxID, strText, strHelpText, iGroup )
# Edit Boxes
def createEditBox( self, strText, iGroup = 0):
"adds an edit box"
self.popup.createEditBox( strText, iGroup )
def createEditBoxXY( self, strText, iX = -1, iY = -1, iGroup = 0):
"adds an edit box at XY"
self.popup.createEditBox( strText, iX, iY, iGroup )
def createPythonEditBox( self, strText, strHelpText, iGroup = 0):
"adds an Python edit box"
self.popup.createPythonEditBox( strText, strHelpText, iGroup )
def createPythonEditBoxXY( self, strText, strHelpText, iGroup = 0, iX = -1, iY = -1 ):
"adds an Python edit box at XY"
self.popup.createPythonEditBoxXY( strText, strHelpText, iGroup, iX, iY )
def setEditBoxMaxCharCount( self, maxCharCount, preferredCharCount = 32, iGroup = 0 ):
"set the max character count and the preferred character count of the edit box"
self.popup.setEditBoxMaxCharCount( maxCharCount, preferredCharCount, iGroup )
# Pull Down
def createPullDown( self, iGroup = 0 ):
"creates a pulldown menu"
self.popup.createPullDown( iGroup )
def createPullDownXY( self, iGroup = 0, iX = -1, iY = -1 ):
"creates a pulldown menu at XY"
self.popup.createPullDownXY( iGroup, iX, iY )
def addPullDownString( self, strText, iID, iGroup = 0 ):
"adds text to the pulldown"
self.popup.addPullDownString( strText, iID, iGroup )
def createPythonPullDown( self, strHelpText, iGroup = 0 ):
"creates a Python pulldown menu"
self.popup.createPythonPullDown( strHelpText, iGroup )
def createPythonPullDownXY( self, strHelpText, iGroup = 0, iX = -1, iY = -1 ):
"creates a Python pulldown menu at XY"
self.popup.createPythonPullDownXY( strHelpText, iGroup, iX, iY )
# List Box
def createListBox( self, iGroup = 0 ):
"creates a listbox"
self.popup.createListBox( iGroup )
def createListBoxXY( self, iGroup = 0, iX = -1, iY = -1 ):
"creates a listbox at XY"
self.popup.createListBoxXY( iGroup, iX, iY )
def addListBoxString( self, strText, iID, iGroup = 0 ):
"adds list box IDs"
self.popup.addListBoxString( strText, iID, iGroup )
def createPythonListBox( self, strHelpText, iGroup = 0 ):
"creates a Python listbox"
self.popup.createPythonListBox( strHelpText, iGroup )
def createPythonListBoxXY( self, strHelpText, iGroup = 0, iX = -1, iY = -1 ):
"creates a Python listbox at XY"
self.popup.createPythonListBoxXY( strHelpText, iGroup, iX, iY )
# spin Box
def createSpinBox( self, iIndex, strHelpText, iDefault, iIncrement, iMax, iMin ):
"creates a listbox"
self.popup.createSpinBox( iIndex, strHelpText, iDefault, iIncrement, iMax, iMin )
# Buttons
def addButton( self, strText ):
"adds a Button"
self.popup.addButton( strText )
def addButtonXY( self, strText, iX = -1, iY = -1 ):
"adds a Button at XY"
self.popup.addButtonXY( strText, iX, iY )
def addPythonButton( self, strFunctionName, strButtonText, strHelpText, strArtPointer = "Art\Interface\Popups\PopupRadioButton.kfm", iData1 = -1, iData2 = -1, bOption = True):
"adds a python button"
self.popup.addPythonButton( strFunctionName, strButtonText, strHelpText, strArtPointer, iData1, iData2, bOption )
def addPythonButtonXY( self, strFunctionName, strButtonText, strHelpText, strArtPointer = "Art\Interface\Popups\PopupRadioButton.kfm", iData1 = -1, iData2 = -1, bOption = True, iX = -1, iY = -1 ):
"adds a python button at XY"
# Unofficial Patch begin
self.popup.addPythonButtonXY( strFunctionName, strButtonText, strHelpText, strArtPointer, iData1, iData2, bOption, iX, iY )
# Unofficial Patch end
# Graphics
def addDDS( self, strImageLocation, iX, iY, iWidth, iHeight ):
"adds a DDS"
self.popup.addDDS( strImageLocation, iX, iY, iWidth, iHeight )
def addPythonDDS( self, strImageLocation, strHelpText, iX, iY, iWidth, iHeight ):
"adds a DDS"
self.popup.addPythonDDS( strImageLocation, strHelpText, iX, iY, iWidth, iHeight )
############## T A B L E F U N C T I O N S ###################
def createTable( self, iRows, iColumns, iGroup = 0 ):
"creates a table that is size X, Y with GroupID"
self.popup.createTable( iRows, iColumns, iGroup )
#CvUtil.pyPrint( "py.Popup createTable( %d, %d )" %(iRows, iColumns) )
def setTableCellSize( self, iCol, iPixels, iGroup = 0 ):
"set the size of the Cell - required before info is added"
self.popup.setTableCellSize( iCol, iPixels, iGroup )
def setTableYSize( self, iRow, iSize, iGroup = 0 ):
"sets the size of the Row"
self.popup.setTableYSize( iRow, iSize, iGroup )
def addTableCellText( self, iRow, iCol, strText, iGroup = 0):
"adds text to a Cell"
if strText == 0 or strText == False or strText == 'None':
self.addTableBlank( iRow, iCol, iGroup )
return
self.popup.addTableCellText( iRow, iCol, unicode(strText), iGroup )
def addTableBlank( self, iRow, iCol, iGroup = 0 ):
"adds a blank entry to a table"
self.addTableCellText( iRow, iCol, "", iGroup )
def addTableCellImage( self, iRow, iCol, strImageLocation, iGroup = 0 ):
"sets a table cell to locate a cell in the table and have it use an image"
if strImageLocation:
self.popup.addTableCellImage( iRow, iCol, str(strImageLocation), iGroup )
return
self.addTableBlank( iRow, iCol, iGroup )
def addTableCellDDS( self, iRow, iCol, strImageLocation, iX = 5, iY = 5, iWidth = 20, iHeight = 20, iGroup = 0 ):
"adds a DDS image to the popup - iX/iY are location, iWidth/iHeight adjust the DDS size"
if strImageLocation:
self.popup.addTableCellDDS( iRow, iCol, str(strImageLocation), iX, iY, iWidth, iHeight, iGroup )
return
self.addTableBlank( iRow, iCol, iGroup )
def completeTableAndAttach( self, iGroup = 0, iX = -1, iY = -1 ):
"completes a Table and Adds it to the popup - iX,iY are for the Tables Location"
self.popup.completeTableAndAttach( iGroup )
############ G R O U P T A B L E S #########################
def addTitleData(self, TitleList, SizeYTitle = 34):
"Takes a list of title data and adds it to the popup - (iType, iSize, Name, Data"
TEXT = 0
DDS = 1
IMG = 2
for i in range(len(TitleList)):
titleType, titleName, titleSize = TitleList[i]
# handle setting title size
self.setTableCellSize(i, titleSize)
if titleType: # if image type
strFileLocation = titleName #file location is the 2nd entry
if titleType == DDS:
# Unoffical Patch start
#data = loopTitle[3]
#iX, iY, iWidth, iHeight = data
iX, iY, iWidth, iHeight = titleSize
# Unoffical Patch end
self.addTableCellDDS(0, i, strFileLocation, iX, iY, iWidth, iHeight)
elif titleType == IMG:
self.popup.addTableCellImage(0, i, strFileLocation)
else:
self.addTableCellText(0, i, titleName)
self.setTableYSize(0, SizeYTitle)
def addTableData(self, TableData):
'Adds data to the table'
iLenTable = len(TableData)
for i in range(iLenTable):
loopRowData = TableData[i]
self.addTableCellText(i+1, 0, loopRowData[0])
for j in range(len(loopRowData[1])):
self.addTableCellText(i+1, j+1, loopRowData[1][j])
|
a7721c496bf46e5e011e1818d0ab879589b13576
|
47ef6997d03f4d5c921c83cc09aef1dfc6828e2c
|
/zeus/networks/tensorflow/losses/__init__.py
|
6c3f8641b7ace184254c184ab800accb5a556ddf
|
[
"MIT"
] |
permissive
|
huawei-noah/xingtian
|
620c9f245183d636e0a65659fd99a984397ecbd4
|
e4ef3a1c92d19d1d08c3ef0e2156b6fecefdbe04
|
refs/heads/master
| 2023-09-03T01:10:21.768245
| 2022-03-21T03:39:39
| 2022-03-21T03:39:39
| 287,759,621
| 308
| 91
|
MIT
| 2023-09-12T11:33:22
| 2020-08-15T14:13:06
|
Python
|
UTF-8
|
Python
| false
| false
| 98
|
py
|
__init__.py
|
from .cross_entropy_loss import CrossEntropyLoss
from .mix_auxiliary_loss import MixAuxiliaryLoss
|
7abbc10a74622919fa830f064b4ae598ef6f2cf5
|
a902290fb3b911676358ae4d93f83061a6c2bd0f
|
/InvenTree/company/apps.py
|
feff9a0e0fa01ec6ec7de07857a03beb87343d74
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
inventree/InvenTree
|
a15e54182c9bfafdf5348cc9a66da1004e23e760
|
e88a8e99a5f0b201c67a95cba097c729f090d5e2
|
refs/heads/master
| 2023-09-03T19:32:35.438375
| 2023-08-30T00:25:40
| 2023-08-30T00:25:40
| 85,894,461
| 3,077
| 549
|
MIT
| 2023-09-14T14:21:01
| 2017-03-23T01:44:10
|
Python
|
UTF-8
|
Python
| false
| false
| 280
|
py
|
apps.py
|
"""Config for the 'company' app"""
from django.apps import AppConfig
class CompanyConfig(AppConfig):
"""Config class for the 'company' app"""
name = 'company'
def ready(self):
"""This function is called whenever the Company app is loaded."""
pass
|
46566ec56242ae659b5512a94c7375624f5a8be5
|
126884e6916e9d78201527419681c0369bc61e9a
|
/Python/Download_Mangas/download_mangas.py
|
4c1fcd605db4c7e84851c824f278355d45246df1
|
[
"MIT"
] |
permissive
|
HarshCasper/Rotten-Scripts
|
adb9d9b707958f0353f7f7dda44f406da123e64a
|
31fd3fb1233f39ea2252a7a44160ff8a2140f7bd
|
refs/heads/master
| 2023-08-27T20:10:27.180869
| 2023-05-07T19:14:31
| 2023-07-25T11:59:06
| 240,786,294
| 1,474
| 754
|
MIT
| 2023-07-25T11:59:08
| 2020-02-15T20:53:36
|
Python
|
UTF-8
|
Python
| false
| false
| 2,927
|
py
|
download_mangas.py
|
import argparse
import requests
from PIL import Image
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from webdriver_manager.chrome import ChromeDriverManager
my_parser = argparse.ArgumentParser()
my_parser.add_argument(
"--name",
action="store",
type=str,
required=True,
help="Enter the name of the manga",
)
my_parser.add_argument(
"--start",
action="store",
type=int,
required=False,
help="Enter the starting chapter for your download",
)
my_parser.add_argument(
"--end",
action="store",
type=int,
required=False,
help="Enter the ending chapter for your download",
)
my_parser.add_argument(
"--chapter",
action="store",
type=int,
required=False,
help="Enter a specific chapter number to download",
)
args = my_parser.parse_args()
name = args.name
starting_chapter = args.start
ending_chapter = args.end
one_chapter = args.chapter
# main function to download each chapter
def chapterDownload(one_chapter, name):
name = name.lower()
name = name.replace(" ", "-")
# webdriver is used to get the number of pages in each chapter
driver = webdriver.Chrome(ChromeDriverManager().install())
URL = "http://www.mangareader.net/" + name + "/" + str(one_chapter)
driver.get(URL)
# the element pageMenu is a drop-down which has options for each page
page = driver.find_element_by_name("pageMenu")
number_of_pages = [x for x in page.find_elements_by_tag_name("option")]
last_page = int(number_of_pages[-1].get_attribute("value").split("/")[-1])
first_page = 1
driver.quit()
images = []
for i in range(first_page, last_page + 1):
# since we now have the number of pages, we can use beautiful soup to get the images
url = (
"http://www.mangareader.net/" + name + "/" + str(one_chapter) + "/" + str(i)
)
page = requests.get(url)
soup = BeautifulSoup(page.content, "html.parser")
image = soup.find("img")
imageURL = image["src"]
data = requests.get(imageURL, stream=True).raw
images.append(data)
main_image = Image.open(images[0]).convert("RGB")
image_list = []
for i in range(1, len(images)):
img = Image.open(images[i]).convert("RGB")
image_list.append(img)
# conversion of a list of images to pdf
filename = name + "-chapter-" + str(one_chapter) + ".pdf"
main_image.save(filename, save_all=True, append_images=image_list)
# to download a single chapter
if one_chapter is not None:
chapterDownload(one_chapter, name)
# to download a range of chapters
elif starting_chapter is not None and ending_chapter is not None:
for i in range(starting_chapter, ending_chapter + 1):
chapterDownload(i, name)
# if nothing is mentioned besides the name of the manga
else:
chapterDownload(1, name)
|
13f7289012b0f748286e412db9ef0bf4a24377ac
|
1adebf72de7aa7147b1148ba35280645fbe5bbd3
|
/supriya/ugens/delay.py
|
4cf4880cb341e27d0f8710ffedace72922f735c7
|
[
"MIT"
] |
permissive
|
josiah-wolf-oberholtzer/supriya
|
d0c4f921a06e3f9df40f91a226a1c038d3ef84d5
|
2ebf835ce9bbfca19e4220628a32c30fa66e04f7
|
refs/heads/main
| 2023-07-20T00:06:23.955530
| 2023-07-18T03:02:14
| 2023-07-18T03:02:14
| 17,463,359
| 227
| 28
|
MIT
| 2023-07-18T03:02:15
| 2014-03-06T02:27:25
|
Python
|
UTF-8
|
Python
| false
| false
| 10,087
|
py
|
delay.py
|
from .bases import UGen, param, ugen
@ugen(ar=True, kr=True, is_pure=True)
class AllpassC(UGen):
"""
A cubic-interpolating allpass delay line unit generator.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> allpass_c = supriya.ugens.AllpassC.ar(source=source)
>>> allpass_c
AllpassC.ar()
"""
source = param(None)
maximum_delay_time = param(0.2)
delay_time = param(0.2)
decay_time = param(1.0)
@ugen(ar=True, kr=True, is_pure=True)
class AllpassL(UGen):
"""
A linear interpolating allpass delay line unit generator.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> allpass_l = supriya.ugens.AllpassL.ar(source=source)
>>> allpass_l
AllpassL.ar()
"""
source = param(None)
maximum_delay_time = param(0.2)
delay_time = param(0.2)
decay_time = param(1.0)
@ugen(ar=True, kr=True, is_pure=True)
class AllpassN(UGen):
"""
A non-interpolating allpass delay line unit generator.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> allpass_n = supriya.ugens.AllpassN.ar(source=source)
>>> allpass_n
AllpassN.ar()
"""
source = param(None)
maximum_delay_time = param(0.2)
delay_time = param(0.2)
decay_time = param(1.0)
@ugen(ar=True, kr=True, is_pure=True)
class BufAllpassC(UGen):
"""
A buffer-based cubic-interpolating allpass delay line unit generator.
::
>>> buffer_id = 0
>>> source = supriya.ugens.In.ar(bus=0)
>>> supriya.ugens.BufAllpassC.ar(
... buffer_id=buffer_id,
... source=source,
... )
BufAllpassC.ar()
"""
buffer_id = param(None)
source = param(None)
maximum_delay_time = param(0.2)
delay_time = param(0.2)
decay_time = param(1.0)
@ugen(ar=True, kr=True, is_pure=True)
class BufAllpassL(UGen):
"""
A buffer-based linear-interpolating allpass delay line unit generator.
::
>>> buffer_id = 0
>>> source = supriya.ugens.In.ar(bus=0)
>>> supriya.ugens.BufAllpassL.ar(
... buffer_id=buffer_id,
... source=source,
... )
BufAllpassL.ar()
"""
buffer_id = param(None)
source = param(None)
maximum_delay_time = param(0.2)
delay_time = param(0.2)
decay_time = param(1.0)
@ugen(ar=True, kr=True, is_pure=True)
class BufAllpassN(UGen):
"""
A buffer-based non-interpolating allpass delay line unit generator.
::
>>> buffer_id = 0
>>> source = supriya.ugens.In.ar(bus=0)
>>> supriya.ugens.BufAllpassN.ar(
... buffer_id=buffer_id,
... source=source,
... )
BufAllpassN.ar()
"""
buffer_id = param(None)
source = param(None)
maximum_delay_time = param(0.2)
delay_time = param(0.2)
decay_time = param(1.0)
@ugen(ar=True, kr=True, is_pure=True)
class BufCombC(UGen):
"""
A buffer-based cubic-interpolating comb delay line unit generator.
::
>>> buffer_id = 0
>>> source = supriya.ugens.In.ar(bus=0)
>>> supriya.ugens.BufCombC.ar(
... buffer_id=buffer_id,
... source=source,
... )
BufCombC.ar()
"""
buffer_id = param(None)
source = param(None)
maximum_delay_time = param(0.2)
delay_time = param(0.2)
decay_time = param(1.0)
@ugen(ar=True, kr=True, is_pure=True)
class BufCombL(UGen):
"""
A buffer-based linear-interpolating comb delay line unit generator.
::
>>> buffer_id = 0
>>> source = supriya.ugens.In.ar(bus=0)
>>> supriya.ugens.BufCombL.ar(
... buffer_id=buffer_id,
... source=source,
... )
BufCombL.ar()
"""
buffer_id = param(None)
source = param(None)
maximum_delay_time = param(0.2)
delay_time = param(0.2)
decay_time = param(1.0)
@ugen(ar=True, kr=True, is_pure=True)
class BufCombN(UGen):
"""
A buffer-based non-interpolating comb delay line unit generator.
::
>>> buffer_id = 0
>>> source = supriya.ugens.In.ar(bus=0)
>>> supriya.ugens.BufCombN.ar(
... buffer_id=buffer_id,
... source=source,
... )
BufCombN.ar()
"""
buffer_id = param(None)
source = param(None)
maximum_delay_time = param(0.2)
delay_time = param(0.2)
decay_time = param(1.0)
@ugen(ar=True, kr=True, is_pure=True)
class BufDelayC(UGen):
"""
A buffer-based cubic-interpolating delay line unit generator.
::
>>> buffer_id = 0
>>> source = supriya.ugens.In.ar(bus=0)
>>> supriya.ugens.BufDelayC.ar(
... buffer_id=buffer_id,
... source=source,
... )
BufDelayC.ar()
"""
buffer_id = param(None)
source = param(None)
maximum_delay_time = param(0.2)
delay_time = param(0.2)
@ugen(ar=True, kr=True, is_pure=True)
class BufDelayL(UGen):
"""
A buffer-based linear-interpolating delay line unit generator.
::
>>> buffer_id = 0
>>> source = supriya.ugens.In.ar(bus=0)
>>> supriya.ugens.BufDelayL.ar(
... buffer_id=buffer_id,
... source=source,
... )
BufDelayL.ar()
"""
buffer_id = param(None)
source = param(None)
maximum_delay_time = param(0.2)
delay_time = param(0.2)
@ugen(ar=True, kr=True, is_pure=True)
class BufDelayN(UGen):
"""
A buffer-based non-interpolating delay line unit generator.
::
>>> buffer_id = 0
>>> source = supriya.ugens.In.ar(bus=0)
>>> supriya.ugens.BufDelayN.ar(
... buffer_id=buffer_id,
... source=source,
... )
BufDelayN.ar()
"""
buffer_id = param(None)
source = param(None)
maximum_delay_time = param(0.2)
delay_time = param(0.2)
@ugen(ar=True, kr=True, is_pure=True)
class CombC(UGen):
"""
A cubic-interpolating comb delay line unit generator.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> supriya.ugens.CombC.ar(source=source)
CombC.ar()
"""
source = param(None)
maximum_delay_time = param(0.2)
delay_time = param(0.2)
decay_time = param(1.0)
@ugen(ar=True, kr=True, is_pure=True)
class CombL(UGen):
"""
A linear interpolating comb delay line unit generator.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> supriya.ugens.CombL.ar(source=source)
CombL.ar()
"""
source = param(None)
maximum_delay_time = param(0.2)
delay_time = param(0.2)
decay_time = param(1.0)
@ugen(ar=True, kr=True, is_pure=True)
class CombN(UGen):
"""
A non-interpolating comb delay line unit generator.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> supriya.ugens.CombN.ar(source=source)
CombN.ar()
"""
source = param(None)
maximum_delay_time = param(0.2)
delay_time = param(0.2)
decay_time = param(1.0)
@ugen(ar=True, kr=True, is_pure=True)
class DelTapRd(UGen):
"""
A delay tap reader unit generator.
::
>>> buffer_id = 0
>>> source = supriya.ugens.In.ar(bus=0)
>>> tapin = supriya.ugens.DelTapWr.ar(
... buffer_id=buffer_id,
... source=source,
... )
::
>>> tapin
DelTapWr.ar()
::
>>> tapout = supriya.ugens.DelTapRd.ar(
... buffer_id=buffer_id,
... phase=tapin,
... delay_time=0.1,
... interpolation=True,
... )
::
>>> tapout
DelTapRd.ar()
"""
buffer_id = param(None)
phase = param(None)
delay_time = param(0.0)
interpolation = param(1.0)
@ugen(ar=True, kr=True, is_pure=True)
class DelTapWr(UGen):
"""
A delay tap writer unit generator.
::
>>> buffer_id = 0
>>> source = supriya.ugens.In.ar(bus=0)
>>> tapin = supriya.ugens.DelTapWr.ar(
... buffer_id=buffer_id,
... source=source,
... )
::
>>> tapin
DelTapWr.ar()
::
>>> tapout = supriya.ugens.DelTapRd.ar(
... buffer_id=buffer_id,
... phase=tapin,
... delay_time=0.1,
... interpolation=True,
... )
::
>>> tapout
DelTapRd.ar()
"""
buffer_id = param(None)
source = param(None)
@ugen(ar=True, kr=True, is_pure=True)
class DelayC(UGen):
"""
A cubic-interpolating delay line unit generator.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> supriya.ugens.DelayC.ar(source=source)
DelayC.ar()
"""
source = param(None)
maximum_delay_time = param(0.2)
delay_time = param(0.2)
@ugen(ar=True, kr=True, is_pure=True)
class DelayL(UGen):
"""
A linear-interpolating delay line unit generator.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> supriya.ugens.DelayL.ar(source=source)
DelayL.ar()
"""
source = param(None)
maximum_delay_time = param(0.2)
delay_time = param(0.2)
@ugen(ar=True, kr=True, is_pure=True)
class DelayN(UGen):
"""
A non-interpolating delay line unit generator.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> supriya.ugens.DelayN.ar(source=source)
DelayN.ar()
"""
source = param(None)
maximum_delay_time = param(0.2)
delay_time = param(0.2)
@ugen(ar=True, kr=True, is_pure=True)
class Delay1(UGen):
"""
A one-sample delay line unit generator.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> supriya.ugens.Delay1.ar(source=source)
Delay1.ar()
"""
source = param(None)
@ugen(ar=True, kr=True, is_pure=True)
class Delay2(UGen):
"""
A two-sample delay line unit generator.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> supriya.ugens.Delay2.ar(source=source)
Delay2.ar()
"""
source = param(None)
|
c8c57008d6acd1b77275aa8cc7d05ca242af0511
|
518bf342bc4138982af3e2724e75f1d9ca3ba56c
|
/solutions/0031. Next Permutation/0031.py
|
c371cc13a5966eef303304a5e35589c11794a00a
|
[
"MIT"
] |
permissive
|
walkccc/LeetCode
|
dae85af7cc689882a84ee5011f0a13a19ad97f18
|
a27be41c174565d365cbfe785f0633f634a01b2a
|
refs/heads/main
| 2023-08-28T01:32:43.384999
| 2023-08-20T19:00:45
| 2023-08-20T19:00:45
| 172,231,974
| 692
| 302
|
MIT
| 2023-08-13T14:48:42
| 2019-02-23T15:46:23
|
C++
|
UTF-8
|
Python
| false
| false
| 686
|
py
|
0031.py
|
class Solution:
def nextPermutation(self, nums: List[int]) -> None:
n = len(nums)
# From back to front, find the first num < nums[i + 1]
i = n - 2
while i >= 0:
if nums[i] < nums[i + 1]:
break
i -= 1
# From back to front, find the first num > nums[i], swap it with nums[i]
if i >= 0:
for j in range(n - 1, i, -1):
if nums[j] > nums[i]:
nums[i], nums[j] = nums[j], nums[i]
break
def reverse(nums: List[int], l: int, r: int) -> None:
while l < r:
nums[l], nums[r] = nums[r], nums[l]
l += 1
r -= 1
# Reverse nums[i + 1..n - 1]
reverse(nums, i + 1, len(nums) - 1)
|
0bcd80a47f0e61d5f00d4b624f4831f10861dfaa
|
b38247a5d84d8b52ce8363f8dd81629cfbe17f65
|
/reagent/training/cb/supervised_trainer.py
|
df39908d724e9eae304a07f4ba7441e51ea56131
|
[
"BSD-3-Clause"
] |
permissive
|
facebookresearch/ReAgent
|
7f2b82eaaf7a19e58cc50aacc307d7b001231440
|
c5f1a8371a677b4f8fb0882b600bf331eba5259d
|
refs/heads/main
| 2023-09-05T15:56:49.175072
| 2023-08-29T21:48:40
| 2023-08-29T21:48:40
| 98,565,575
| 1,480
| 290
|
BSD-3-Clause
| 2023-09-12T23:09:30
| 2017-07-27T17:53:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,935
|
py
|
supervised_trainer.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import torch
from reagent.core.types import CBInput
from reagent.gym.policies.policy import Policy
from reagent.training.cb.base_trainer import BaseCBTrainerWithEval
logger = logging.getLogger(__name__)
LOSS_TYPES = {
"mse": torch.nn.functional.mse_loss,
"mae": torch.nn.functional.l1_loss,
"cross_entropy": torch.nn.functional.binary_cross_entropy,
}
class SupervisedTrainer(BaseCBTrainerWithEval):
"""
The trainer with a supervised learning loss. Supports Cross-Entropy, MSE and MAE losses.
Args:
policy: The policy to be trained.
"""
def __init__(
self,
policy: Policy,
loss_type: str = "mse", # one of the LossTypes names
lr: float = 1e-3,
weight_decay: float = 0.0,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.scorer = policy.scorer
self.lr = lr
self.weight_decay = weight_decay
self.loss = LOSS_TYPES[loss_type]
def configure_optimizers(self) -> torch.optim.Optimizer:
return torch.optim.Adam(
self.parameters(), lr=self.lr, weight_decay=self.weight_decay
)
def cb_training_step(
self, batch: CBInput, batch_idx: int, optimizer_idx: int = 0
) -> torch.Tensor:
assert batch.label is not None # to satisfy Pyre
# compute the NN loss
model_output = self.scorer(batch.features_of_chosen_arm)
pred_label = model_output["pred_label"]
# The supervised learning model outputs predicted label with no uncertainty(uncertainty=ucb_alpha*pred_sigma).
# weighted average loss
losses = self.loss(pred_label, batch.label.squeeze(-1), reduction="none")
weight = batch.effective_weight
return (losses * weight.squeeze(-1)).sum() / losses.shape[0]
|
fe1a04116a80cc93c65d02950c24337d27bfce79
|
4e558281ab352b745e970936650e479bbb687982
|
/videoflow/core/processor.py
|
7ae25ea944810c5a93a4701c2378a53715b7f8ce
|
[
"MIT"
] |
permissive
|
videoflow/videoflow
|
e3b84b3acd3591837d30ce51d5023f3dee9823f3
|
c49d3fe6c814574bcda1a4e907ce52ea86e1617c
|
refs/heads/master
| 2023-01-24T06:51:56.141621
| 2022-01-20T14:23:58
| 2022-01-20T14:23:58
| 181,554,939
| 1,065
| 96
|
MIT
| 2022-01-20T14:23:59
| 2019-04-15T19:47:22
|
Python
|
UTF-8
|
Python
| false
| false
| 223
|
py
|
processor.py
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
class Processor:
def process(self, item):
raise NotImplementedError('Subclass must implement method')
|
883dc03e3e7d48e330361b2adf83f49ca4436291
|
229a28fc18c13bfe1ba7fc81c38b03651ed8e93b
|
/sw/vendor/riscv-tests/debug/targets/RISC-V/spike32.py
|
17d28fb8a1b2c0045c0d1d4d7b288d320e4499b6
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-bsd-3-clause-jtag"
] |
permissive
|
pulp-platform/snitch
|
d3967742434fa21e8af71afa6be35ea5420166ca
|
d026f47843f0ea6c269244c4e6851e0e09141ec3
|
refs/heads/master
| 2023-08-24T08:42:36.230951
| 2023-06-19T09:34:05
| 2023-06-19T09:34:05
| 289,236,605
| 194
| 44
|
Apache-2.0
| 2023-07-11T12:46:26
| 2020-08-21T09:57:34
|
SystemVerilog
|
UTF-8
|
Python
| false
| false
| 814
|
py
|
spike32.py
|
import targets
import testlib
class spike32_hart(targets.Hart):
xlen = 32
ram = 0x10000000
ram_size = 0x10000000
bad_address = ram - 8
instruction_hardware_breakpoint_count = 4
reset_vectors = [0x1000]
link_script_path = "spike32.lds"
class spike32(targets.Target):
harts = [spike32_hart(misa=0x4034112d)]
openocd_config_path = "spike-1.cfg"
timeout_sec = 30
implements_custom_test = True
support_memory_sampling = False # Needs SBA
freertos_binary = "bin/RTOSDemo32.axf"
def create(self):
# 64-bit FPRs on 32-bit target
return testlib.Spike(self, isa="RV32IMAFDCV", dmi_rti=4,
support_abstract_csr=True, support_haltgroups=False,
# elen must be at least 64 because D is supported.
elen=64)
|
ea429292a3864593f5e9a395eca9a64e274753b5
|
294776fc4e920e99d03e023838134dd7c10b7140
|
/tests/aggregate/test_clustered_aggregate_cpu.py
|
1c731341d0ee34490a8a181a228a720dcdfe8b07
|
[
"MIT"
] |
permissive
|
idiap/fast-transformers
|
1510559ad9bb4896460b615895d09231cc2db784
|
2ad36b97e64cb93862937bd21fcc9568d989561f
|
refs/heads/master
| 2023-08-02T15:36:58.835564
| 2023-03-09T08:51:59
| 2023-03-23T16:46:47
| 272,430,903
| 1,528
| 173
| null | 2023-03-22T10:17:39
| 2020-06-15T12:19:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,626
|
py
|
test_clustered_aggregate_cpu.py
|
#
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>,
# Apoorv Vyas <avyas@idiap.ch>
#
import unittest
import os
import numpy as np
import time
import torch
try:
from fast_transformers.aggregate import clustered_aggregate, \
clustered_broadcast
except ImportError:
pass
class TestAggregateCPU(unittest.TestCase):
def test_aggregate(self):
N = 2
H = 4
L = 80
E = 2
C = 4
for i in range(30):
C = np.random.randint(5, 10)
L = np.random.randint(1, 30) * C
E = np.random.randint(10, 128)
if os.getenv("VERBOSE_TESTS", ""):
print(("Testing: N H L E C: "
"{} {} {} {} {}").format(N, H, L, E, C))
x = torch.rand((N, H, L, E)).cpu()
g = (torch.arange(L) % C).view(1, 1, L).repeat(N, H, 1).int().cpu()
f = torch.ones(N, H, C).cpu() * (C / L)
counts = torch.ones_like(f, dtype=torch.int32) * (L // C)
y = torch.zeros(N, H, C, E).cpu()
lengths = torch.full((N,), L, dtype=torch.int32).to(x.device)
sorted_g, sorted_gi = torch.sort(g.view(N*H, -1), dim=-1)
sorted_rev_gi = torch.argsort(sorted_gi, dim=-1)
q_offset = torch.arange(N*H, device=x.device).unsqueeze(-1) * L
q_flat = (sorted_gi + q_offset).reshape(-1)
# sorted queries, keys, values
s_x = x.reshape(-1, E).index_select(0, q_flat).view(N, H, L, E)
y = clustered_aggregate(
s_x, sorted_g.view(N, H, -1), f, lengths, y
)
for i in range(C):
self.assertLess(
torch.abs(
x[:, :, i::C, :].mean(2) - y[:, :, i, :]
).max().item(),
1e-6
)
def test_aggregate_masked(self):
N = 10
H = 3
L = 40
E = 32
C = 4
for i in range(30):
C = np.random.randint(5, 10)
L = np.random.randint(2, 30) * C
E = np.random.randint(10, 128)
if os.getenv("VERBOSE_TESTS", ""):
print(("Testing: N H L E C: "
"{} {} {} {} {}").format(N, H, L, E, C))
x = torch.rand((N, H, L, E)).cpu()
g = (torch.arange(L) % C).view(1, 1, L).repeat(N, H, 1).int().cpu()
g[:, :, -C:] = C + 1
c = (L // C) - 1
lengths = torch.full((N,), L-C, dtype=torch.int32).to(x.device)
f = torch.ones(N, H, C).cpu() / float(c)
counts = torch.ones_like(f, dtype=torch.int32) * c
y = torch.zeros(N, H, C, E).cpu()
sorted_g, sorted_gi = torch.sort(g.view(N*H, -1), dim=-1)
sorted_rev_gi = torch.argsort(sorted_gi, dim=-1)
q_offset = torch.arange(N*H, device=x.device).unsqueeze(-1) * L
q_flat = (sorted_gi + q_offset).reshape(-1)
# sorted queries, keys, values
s_x = x.reshape(-1, E).index_select(0, q_flat).view(N, H, L, E)
y = clustered_aggregate(
s_x, sorted_g.view(N, H, -1), f, lengths, y
)
for i in range(C):
x_m = x[:, :, i::C, :][:, :, :-1, :].mean(2)
self.assertLess(
torch.abs(
x_m - y[:, :, i, :]
).max().item(),
1e-6
)
if __name__ == "__main__":
unittest.main()
|
6929af2c3337ca31788f7cd77c214f6b9234a02a
|
38d86234ef4ba4ed5ac3bf585bcff8615004d2a6
|
/ssseg/modules/models/segmentors/isnet/__init__.py
|
10fd06db2166e066d67da062bfd7c37690033c2a
|
[
"Apache-2.0"
] |
permissive
|
SegmentationBLWX/sssegmentation
|
e57e7a071b03214c55248c4b1e64c85796744bf1
|
fe3d0dac83055b728fe3c5df964507fc7cc4948c
|
refs/heads/main
| 2023-08-05T02:49:57.370911
| 2023-08-01T13:49:17
| 2023-08-01T13:49:17
| 306,540,019
| 725
| 97
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
__init__.py
|
'''initialize'''
from .isnet import ISNet
from .imagelevel import ImageLevelContext
from .semanticlevel import SemanticLevelContext
|
9cd5eaef751c73f935100e11aaa4cf8c5c9b499f
|
a17cee3eaf4e3d329a7fc024592a7bc44f4e6c8f
|
/mobly/snippet/callback_event.py
|
55471c728dc2c771b05ce04511f6b7203db1fce8
|
[
"Apache-2.0"
] |
permissive
|
google/mobly
|
e40245acb4ccb84794a2c486eb9d5d446105b588
|
6392f83acf512fb9e3a9229858bf9fd26e9d7278
|
refs/heads/master
| 2023-08-28T02:29:47.690183
| 2023-06-06T03:06:26
| 2023-06-06T03:06:26
| 73,104,945
| 624
| 209
|
Apache-2.0
| 2023-09-14T08:14:09
| 2016-11-07T17:45:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,791
|
py
|
callback_event.py
|
# Copyright 2022 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The class that represents callback events for Mobly Snippet Lib."""
def from_dict(event_dict):
"""Creates a CallbackEvent object from a dictionary.
Args:
event_dict: dict, a dictionary representing an event.
Returns:
A CallbackEvent object.
"""
return CallbackEvent(callback_id=event_dict['callbackId'],
name=event_dict['name'],
creation_time=event_dict['time'],
data=event_dict['data'])
class CallbackEvent:
"""The class that represents callback events for Mobly Snippet Library.
Attributes:
callback_id: str, the callback ID associated with the event.
name: str, the name of the event.
creation_time: int, the epoch time when the event is created on the
RPC server side.
data: dict, the data held by the event. Can be None.
"""
def __init__(self, callback_id, name, creation_time, data):
self.callback_id = callback_id
self.name = name
self.creation_time = creation_time
self.data = data
def __repr__(self):
return (
f'CallbackEvent(callback_id: {self.callback_id}, name: {self.name}, '
f'creation_time: {self.creation_time}, data: {self.data})')
|
ee5c459c3d064bfe4a3da901265ea636dab4cee5
|
6186a3787d1e74f1866844491da48b9643c8f1a9
|
/ghostwriter/reporting/migrations/0004_report_delivered.py
|
e7ecde6953c5ee0a150c2fae862d7153112b1076
|
[
"BSD-3-Clause"
] |
permissive
|
GhostManager/Ghostwriter
|
b46b2421e5737ed0afbf49182dce9eeb5eb31936
|
b9eae4459ba192fbb2d4a5b66f8210d57fd7112a
|
refs/heads/master
| 2023-09-04T02:34:54.085997
| 2023-07-13T22:38:44
| 2023-07-13T22:38:44
| 197,269,443
| 1,011
| 197
|
BSD-3-Clause
| 2023-09-08T00:19:52
| 2019-07-16T21:19:43
|
Python
|
UTF-8
|
Python
| false
| false
| 483
|
py
|
0004_report_delivered.py
|
# Generated by Django 2.2.3 on 2019-08-27 18:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("reporting", "0003_findingnote"),
]
operations = [
migrations.AddField(
model_name="report",
name="delivered",
field=models.BooleanField(
default=False, help_text="Delivery status of the report", verbose_name="Delivered"
),
),
]
|
1fdada65828db400f09c0b7f2630b0a241e52150
|
091e97bcfe5acc0635bd601aa8497e377b74d41a
|
/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/openshift_health_checker/test/sdn_tests.py
|
9995f37b04c178d869985a6aae8f96e21c1c0614
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
openshift/openshift-tools
|
d59b63778f25cb8fb3c7a0253afe22a173e72f9d
|
e342f6659a4ef1a188ff403e2fc6b06ac6d119c7
|
refs/heads/prod
| 2023-08-30T01:52:04.108978
| 2022-03-23T21:07:28
| 2022-03-23T21:07:28
| 36,827,699
| 170
| 254
|
Apache-2.0
| 2022-06-16T12:11:51
| 2015-06-03T20:09:22
|
Python
|
UTF-8
|
Python
| false
| false
| 7,481
|
py
|
sdn_tests.py
|
import pytest
from openshift_checks.sdn import SDNCheck
from openshift_checks import OpenShiftCheckException
def fake_execute_module(*args):
raise AssertionError('this function should not be called')
def test_check_nodes_missing_node_name():
task_vars = dict(
group_names=['oo_nodes_to_config'],
)
check = SDNCheck(fake_execute_module, task_vars)
check.run()
assert 1 == len(check.failures)
assert 'Could not determine node name' in str(check.failures[0])
def test_check_master():
nodes = [
{
'apiVersion': 'v1',
'kind': 'Node',
'metadata': {
'annotations': {'kubernetes.io/hostname': 'node1'},
'name': 'ip-172-31-50-1.ec2.internal'
},
'status': {
'addresses': [
{'address': '172.31.50.1', 'type': 'InternalIP'},
{'address': '52.0.0.1', 'type': 'ExternalIP'},
{
'address': 'ip-172-31-50-1.ec2.internal',
'type': 'Hostname'
}
]
}
},
{
'apiVersion': 'v1',
'kind': 'Node',
'metadata': {'name': 'ip-172-31-50-2.ec2.internal'},
'status': {
'addresses': [
{'address': '172.31.50.2', 'type': 'InternalIP'},
{'address': '52.0.0.2', 'type': 'ExternalIP'},
{
'address': 'ip-172-31-50-2.ec2.internal',
'type': 'Hostname'
}
]
}
}
]
task_vars = dict(
group_names=['oo_masters_to_config'],
resources=dict(results=[
dict(item='nodes', results=dict(results=[dict(items=nodes)])),
dict(item='pods', results=dict(results=[dict(items={})])),
dict(item='services', results=dict(results=[dict(items={})]))
])
)
node_addresses = {
node['metadata']['name']: {
address['type']: address['address']
for address
in node['status']['addresses']
}
for node in nodes
}
expected_hostnames = [addresses['Hostname']
for addresses in node_addresses.values()]
uri_hostnames = []
resolve_address_hostnames = []
def execute_module(module_name, args, *_):
if module_name == 'uri':
for hostname in expected_hostnames:
if hostname in args['url']:
uri_hostnames.append(hostname)
return {}
raise ValueError('unexpected url: %s' % args['url'])
raise ValueError('not expecting module %s' % module_name)
def resolve_address(address):
for hostname in expected_hostnames:
if address == hostname:
resolve_address_hostnames.append(hostname)
return node_addresses[hostname]['InternalIP']
raise ValueError('unexpected address: %s' % hostname)
check = SDNCheck(execute_module, task_vars)
check.resolve_address = resolve_address
check.run()
assert 0 == len(check.failures)
assert set(expected_hostnames) == set(uri_hostnames), 'should try to connect to the kubelet'
assert set(expected_hostnames) == set(resolve_address_hostnames), 'should try to resolve the node\'s address'
def test_check_nodes():
nodes = [
{
'apiVersion': 'v1',
'kind': 'Node',
'metadata': {
'annotations': {'kubernetes.io/hostname': 'node1'},
'name': 'ip-172-31-50-1.ec2.internal'
},
'status': {
'addresses': [
{'address': '172.31.50.1', 'type': 'InternalIP'},
{'address': '52.0.0.1', 'type': 'ExternalIP'},
{
'address': 'ip-172-31-50-1.ec2.internal',
'type': 'Hostname'
}
]
}
},
{
'apiVersion': 'v1',
'kind': 'Node',
'metadata': {'name': 'ip-172-31-50-2.ec2.internal'},
'status': {
'addresses': [
{'address': '172.31.50.2', 'type': 'InternalIP'},
{'address': '52.0.0.2', 'type': 'ExternalIP'},
{
'address': 'ip-172-31-50-2.ec2.internal',
'type': 'Hostname'
}
]
}
}
]
hostsubnets = [
{
'metadata': {
'name': 'ip-172-31-50-1.ec2.internal'
},
'subnet': '10.128.0.1/23'
},
{
'metadata': {
'name': 'ip-172-31-50-2.ec2.internal'
},
'subnet': '10.129.0.1/23'
}
]
task_vars = dict(
group_names=['oo_nodes_to_config'],
resources=dict(results=[
dict(item='nodes', results=dict(results=[dict(items=nodes)])),
dict(item='hostsubnets', results=dict(results=[dict(items=hostsubnets)]))
]),
openshift=dict(node=dict(nodename='foo'))
)
def execute_module(module_name, args, *_):
if module_name == 'command':
return dict(stdout='bogus_container_id')
raise ValueError('not expecting module %s' % module_name)
SDNCheck(execute_module, task_vars).run()
def test_resolve_address():
def execute_module(module_name, args, *_):
if module_name != 'command':
raise ValueError('not expecting module %s' % module_name)
command_args = args['_raw_params'].split()
if command_args[0] != '/bin/getent':
raise ValueError('not expecting command: %s' % args.raw_params)
# The expected command_args is ['/bin/getent', 'ahostsv4', 'foo'].
if command_args[2] == 'foo':
return {
'rc': 0,
'stdout': '''1.2.3.4 STREAM bar
1.2.3.4 DGRAM
1.2.3.4 RAW
'''
}
return {'rc': 2}
check = SDNCheck(execute_module, None)
assert check.resolve_address('foo') == '1.2.3.4'
with pytest.raises(OpenShiftCheckException):
check.resolve_address('baz')
def test_no_nodes():
task_vars = dict(
group_names=['oo_masters_to_config'],
resources=dict(results=[
dict(item='nodes', results=dict(results=[dict(items={})])),
dict(item='pods', results=dict(results=[dict(items={})])),
dict(item='services', results=dict(results=[dict(items={})]))
])
)
check = SDNCheck(fake_execute_module, task_vars)
check.run()
assert 1 == len(check.failures)
assert 'No nodes' in str(check.failures[0])
@pytest.mark.parametrize('group_names,expected', [
(['oo_masters_to_config'], True),
(['oo_nodes_to_config'], True),
(['oo_masters_to_config', 'oo_nodes_to_config'], True),
(['oo_masters_to_config', 'oo_etcd_to_config'], True),
([], False),
(['oo_etcd_to_config'], False),
(['lb'], False),
(['nfs'], False),
])
def test_sdn_skip_when_not_master_nor_node(group_names, expected):
task_vars = dict(
group_names=group_names,
openshift_is_atomic=True,
)
assert SDNCheck(None, task_vars).is_active() == expected
|
1b38dd450296a4584f9f6a3f940d5fd2a580fb3f
|
90beaf6c9e091f5aaa40c3b24bff498a0ed5055a
|
/frontends/torch-frontend/torch-frontend/python/test/test_model.py
|
c07f0ac09cd83610e4161bb4192b0d72b894cc67
|
[
"Apache-2.0",
"NCSA",
"LicenseRef-scancode-generic-cla",
"LLVM-exception",
"BSD-2-Clause",
"MIT"
] |
permissive
|
bytedance/byteir
|
cf1d9bc27ec8e9f7ea92ff449cbedf470f90115c
|
0e7673074c9d9a299717bb3379cb774c9921f72c
|
refs/heads/main
| 2023-08-31T04:42:44.536710
| 2023-08-30T20:14:22
| 2023-08-30T20:14:22
| 598,267,148
| 208
| 16
|
NOASSERTION
| 2023-09-13T21:50:19
| 2023-02-06T18:51:31
|
MLIR
|
UTF-8
|
Python
| false
| false
| 1,221
|
py
|
test_model.py
|
import torch
import torch_frontend
from torch_frontend import convert_to_mhlo_via_torch_mlir
def test_resnet18_compile():
import torchvision.models as models
resnet18 = models.resnet18(pretrained=True)
resnet18.train(False)
inputs = torch.ones(1, 3, 224, 224)
module = convert_to_mhlo_via_torch_mlir(resnet18, inputs)
print(module.operation.get_asm(large_elements_limit=10, enable_debug_info=False))
# def test_berttiny_compile():
# from functorch import make_fx
# from transformers import BertForMaskedLM
# bert = BertForMaskedLM.from_pretrained("prajjwal1/bert-tiny",
# return_dict=False)
# bert.train(False)
# inputs = [torch.randint(100, (1, 128))]
# bert = torch.jit.trace(bert, inputs)
# # TODO: support unpack tuple in torch-mlir
# # FX rewrite to unpack return tuple
# torch._C._jit_set_nvfuser_enabled(False)
# fx_g = make_fx(bert)(*inputs)
# fx_g = torch_frontend.preprocess_fx_graph(fx_g)
# fx_g.graph.lint()
# fx_g.recompile()
# bert = torch.jit.trace(fx_g, inputs)
# module = convert_to_mhlo_via_torch_mlir(bert, inputs)
# print(module.operation.get_asm(large_elements_limit=10, enable_debug_info=False))
|
a88a983aab0b015fb16ce8dacb2e68612cad647b
|
f3806d9fb54773908cd9704121a543b114470aca
|
/angr/knowledge_base/__init__.py
|
615a5736dff9ab60ca155645959db8ad50aa5b96
|
[
"BSD-2-Clause"
] |
permissive
|
angr/angr
|
8ae95fceca51b0a001de56477d984dd01193ac1d
|
37e8ca1c3308ec601ad1d7c6bc8081ff38a7cffd
|
refs/heads/master
| 2023-08-17T03:15:21.007865
| 2023-08-15T18:44:57
| 2023-08-15T18:44:57
| 40,328,394
| 7,184
| 1,306
|
BSD-2-Clause
| 2023-09-14T20:14:23
| 2015-08-06T21:46:55
|
Python
|
UTF-8
|
Python
| false
| false
| 42
|
py
|
__init__.py
|
from .knowledge_base import KnowledgeBase
|
9c4ede3e469f3afe3ef0ab8593c7b3d38ee768d2
|
85ccd32aa73eecf274a937f1fc3b6f4d484b77da
|
/test cases/python/2 extmodule/subinst/printer.py
|
8bc528dfe047f212c19d03f941b4448122ee21b4
|
[
"Apache-2.0"
] |
permissive
|
mesonbuild/meson
|
48321cf4235dfcc0194fed90ff43a57367592bf7
|
cf5adf0c646474f0259d123fad60ca5ed38ec891
|
refs/heads/master
| 2023-09-01T05:58:50.807952
| 2023-03-17T20:27:37
| 2023-08-31T11:52:41
| 19,784,232
| 5,122
| 1,848
|
Apache-2.0
| 2023-09-14T15:47:23
| 2014-05-14T15:08:16
|
Python
|
UTF-8
|
Python
| false
| false
| 41
|
py
|
printer.py
|
#!/usr/bin/env python3
print('subinst')
|
d2ffc75e98d6908ddb556d51befb6e2de8abfa6e
|
787022de03a2dd6998c1518673830395b389e3df
|
/migration/migrator/migrations/system/20190306102812_python_permissions.py
|
94387c4b7ec4149fec362d8b433f5df05ba1f6f8
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
Submitty/Submitty
|
e6b8731656291a025aa77f928eb067bc9a307540
|
b223d9e952bcdb8664721a55593bc75e0e3c8c4f
|
refs/heads/main
| 2023-08-31T23:56:11.291752
| 2023-08-31T19:12:18
| 2023-08-31T19:12:18
| 16,236,118
| 592
| 727
|
BSD-3-Clause
| 2023-09-13T05:36:08
| 2014-01-25T17:43:57
|
PHP
|
UTF-8
|
Python
| false
| false
| 625
|
py
|
20190306102812_python_permissions.py
|
from pathlib import Path
import subprocess
def up(config):
for path in Path('/usr/local/lib').glob('python*'):
subprocess.call(['find', Path(path, 'dist-packages'), '-type', 'd', '-exec', 'chmod', '755', '{}', '+'])
subprocess.call(['find', Path(path, 'dist-packages'), '-type', 'f', '-exec', 'chmod', '755', '{}', '+'])
subprocess.call(['find', Path(path, 'dist-packages'), '-type', 'f', '-name', '*.py*', '-exec', 'chmod', '644', '{}', '+'])
if Path(path, 'dist-packages', 'pam.py').exists():
subprocess.call(['chown', 'root:staff', Path(path, 'dist-packages', 'pam.py')])
|
b5ce8aceec0437e0ec5cd5d8dc6b4e4974293342
|
c7c73566784a7896100e993606e1bd8fdd0ea94e
|
/direct/src/distributed/DistributedObjectAI.py
|
b67ee57d1ee4a7acd4634e6a0631610d634b810a
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
panda3d/panda3d
|
c3f94df2206ff7cfe4a3b370777a56fb11a07926
|
160ba090a5e80068f61f34fc3d6f49dbb6ad52c5
|
refs/heads/master
| 2023-08-21T13:23:16.904756
| 2021-04-11T22:55:33
| 2023-08-06T06:09:32
| 13,212,165
| 4,417
| 1,072
|
NOASSERTION
| 2023-09-09T19:26:14
| 2013-09-30T10:20:25
|
C++
|
UTF-8
|
Python
| false
| false
| 21,735
|
py
|
DistributedObjectAI.py
|
"""DistributedObjectAI module: contains the DistributedObjectAI class"""
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.distributed.DistributedObjectBase import DistributedObjectBase
from direct.showbase.MessengerGlobal import messenger
from direct.showbase import PythonUtil
#from PyDatagram import PyDatagram
#from PyDatagramIterator import PyDatagramIterator
class DistributedObjectAI(DistributedObjectBase):
notify = directNotify.newCategory("DistributedObjectAI")
QuietZone = 1
def __init__(self, air):
if not hasattr(self, 'DistributedObjectAI_initialized'):
self.DistributedObjectAI_initialized = 1
DistributedObjectBase.__init__(self, air)
self.accountName=''
# Record the repository
self.air = air
# Record our distributed class
className = self.__class__.__name__
self.dclass = self.air.dclassesByName[className]
# init doId pre-allocated flag
self.__preallocDoId = 0
# used to track zone changes across the quiet zone
# NOTE: the quiet zone is defined in OTP, but we need it
# here.
self.lastNonQuietZone = None
self._DOAI_requestedDelete = False
# These are used to implement beginBarrier().
self.__nextBarrierContext = 0
self.__barriers = {}
self.__generated = False
# reference count for multiple inheritance
self.__generates = 0
self._zoneData = None
# Uncomment if you want to debug DO leaks
#def __del__(self):
# """
# For debugging purposes, this just prints out what got deleted
# """
# print ("Destructing: " + self.__class__.__name__)
if __debug__:
def status(self, indent=0):
"""
print out doId(parentId, zoneId) className
and conditionally show generated or deleted
"""
spaces = ' ' * (indent + 2)
try:
print("%s%s:" % (' ' * indent, self.__class__.__name__))
flags = []
if self.__generated:
flags.append("generated")
if self.air is None:
flags.append("deleted")
flagStr = ""
if len(flags) > 0:
flagStr = " (%s)" % (" ".join(flags))
print("%sfrom DistributedObject doId:%s, parent:%s, zone:%s%s" % (
spaces, self.doId, self.parentId, self.zoneId, flagStr))
except Exception as e:
print("%serror printing status %s" % (spaces, e))
def getDeleteEvent(self):
# this is sent just before we get deleted
if hasattr(self, 'doId'):
return 'distObjDelete-%s' % self.doId
return None
def sendDeleteEvent(self):
# this is called just before we get deleted
delEvent = self.getDeleteEvent()
if delEvent:
messenger.send(delEvent)
def getCacheable(self):
""" This method exists only to mirror the similar method on
DistributedObject. AI objects aren't cacheable. """
return False
def deleteOrDelay(self):
""" This method exists only to mirror the similar method on
DistributedObject. AI objects don't have delayDelete, they
just get deleted immediately. """
self.delete()
def getDelayDeleteCount(self):
return 0
def delete(self):
"""
Inheritors should redefine this to take appropriate action on delete
Note that this may be called multiple times if a class inherits
from DistributedObjectAI more than once.
"""
self.__generates -= 1
if self.__generates < 0:
self.notify.debug('DistributedObjectAI: delete() called more times than generate()')
if self.__generates == 0:
# prevent this code from executing multiple times
if self.air is not None:
# self.doId may not exist. The __dict__ syntax works around that.
assert self.notify.debug('delete(): %s' % (self.__dict__.get("doId")))
#if not self._DOAI_requestedDelete:
# # this logs every delete that was not requested by us.
# # TODO: this currently prints warnings for deletes of objects
# # that we did not create. We need to add a 'locally created'
# # flag to every object to filter these out.
#
# DistributedObjectAI.notify.warning(
# 'delete() called but requestDelete never called for %s: %s'
# % (self.__dict__.get('doId'), self.__class__.__name__))
#
# # print a stack trace so we can detect whether this is the
# # result of a network msg.
# # this is slow.
# from direct.showbase.PythonUtil import StackTrace
# DistributedObjectAI.notify.warning(
# 'stack trace: %s' % StackTrace())
self._DOAI_requestedDelete = False
self.releaseZoneData()
# Clean up all the pending barriers.
for barrier in self.__barriers.values():
barrier.cleanup()
self.__barriers = {}
# DCR: I've re-enabled this block of code so that Toontown's
# AI won't leak channels.
# Let me know if it causes trouble.
### Asad: As per Roger's suggestion, turn off the following
### block until a solution is thought out of how to prevent
### this delete message or to handle this message better
# TODO: do we still need this check?
if not getattr(self, "doNotDeallocateChannel", False):
if self.air:
self.air.deallocateChannel(self.doId)
self.air = None
self.parentId = None
self.zoneId = None
self.__generated = False
def isDeleted(self):
"""
Returns true if the object has been deleted,
or if it is brand new and hasnt yet been generated.
"""
return self.air is None
def isGenerated(self):
"""
Returns true if the object has been generated
"""
return self.__generated
def getDoId(self):
"""
Return the distributed object id
"""
return self.doId
def preAllocateDoId(self):
"""
objects that need to have a doId before they are generated
can call this to pre-allocate a doId for the object
"""
assert not self.__preallocDoId
self.doId = self.air.allocateChannel()
self.__preallocDoId = 1
def announceGenerate(self):
"""
Called after the object has been generated and all
of its required fields filled in. Overwrite when needed.
"""
def b_setLocation(self, parentId, zoneId):
self.d_setLocation(parentId, zoneId)
self.setLocation(parentId, zoneId)
def d_setLocation(self, parentId, zoneId):
self.air.sendSetLocation(self, parentId, zoneId)
def setLocation(self, parentId, zoneId):
# Prevent Duplicate SetLocations for being Called
if self.parentId == parentId and self.zoneId == zoneId:
return
oldParentId = self.parentId
oldZoneId = self.zoneId
self.air.storeObjectLocation(self, parentId, zoneId)
if oldParentId != parentId or oldZoneId != zoneId:
self.releaseZoneData()
messenger.send(self.getZoneChangeEvent(), [zoneId, oldZoneId])
# if we are not going into the quiet zone, send a 'logical' zone
# change message
if zoneId != DistributedObjectAI.QuietZone:
lastLogicalZone = oldZoneId
if oldZoneId == DistributedObjectAI.QuietZone:
lastLogicalZone = self.lastNonQuietZone
self.handleLogicalZoneChange(zoneId, lastLogicalZone)
self.lastNonQuietZone = zoneId
def getLocation(self):
try:
if self.parentId <= 0 and self.zoneId <= 0:
return None
# This is a -1 stuffed into a uint32
if self.parentId == 0xffffffff and self.zoneId == 0xffffffff:
return None
return (self.parentId, self.zoneId)
except AttributeError:
return None
def postGenerateMessage(self):
self.__generated = True
messenger.send(self.uniqueName("generate"), [self])
def updateRequiredFields(self, dclass, di):
dclass.receiveUpdateBroadcastRequired(self, di)
self.announceGenerate()
self.postGenerateMessage()
def updateAllRequiredFields(self, dclass, di):
dclass.receiveUpdateAllRequired(self, di)
self.announceGenerate()
self.postGenerateMessage()
def updateRequiredOtherFields(self, dclass, di):
dclass.receiveUpdateBroadcastRequired(self, di)
# Announce generate after updating all the required fields,
# but before we update the non-required fields.
self.announceGenerate()
self.postGenerateMessage()
dclass.receiveUpdateOther(self, di)
def updateAllRequiredOtherFields(self, dclass, di):
dclass.receiveUpdateAllRequired(self, di)
# Announce generate after updating all the required fields,
# but before we update the non-required fields.
self.announceGenerate()
self.postGenerateMessage()
dclass.receiveUpdateOther(self, di)
def startMessageBundle(self, name):
self.air.startMessageBundle(name)
def sendMessageBundle(self):
self.air.sendMessageBundle(self.doId)
def getZoneChangeEvent(self):
# this event is generated whenever this object changes zones.
# arguments are newZoneId, oldZoneId
# includes the quiet zone.
return DistributedObjectAI.staticGetZoneChangeEvent(self.doId)
def getLogicalZoneChangeEvent(self):
# this event is generated whenever this object changes to a
# non-quiet-zone zone.
# arguments are newZoneId, oldZoneId
# does not include the quiet zone.
return DistributedObjectAI.staticGetLogicalZoneChangeEvent(self.doId)
@staticmethod
def staticGetZoneChangeEvent(doId):
return 'DOChangeZone-%s' % doId
@staticmethod
def staticGetLogicalZoneChangeEvent(doId):
return 'DOLogicalChangeZone-%s' % doId
def handleLogicalZoneChange(self, newZoneId, oldZoneId):
"""this function gets called as if we never go through the
quiet zone. Note that it is called once you reach the newZone,
and not at the time that you leave the oldZone."""
messenger.send(self.getLogicalZoneChangeEvent(),
[newZoneId, oldZoneId])
def getZoneData(self):
# Call this to get an AIZoneData object for the current zone.
# This class will hold onto it as self._zoneData
# setLocation destroys self._zoneData if we move away to
# a different zone
if self._zoneData is None:
from otp.ai.AIZoneData import AIZoneData # type: ignore[import]
self._zoneData = AIZoneData(self.air, self.parentId, self.zoneId)
return self._zoneData
def releaseZoneData(self):
# You can call this to release any AIZoneData object that we might be
# holding onto. If we're the last one for the current zone, the data
# will be destroyed (render, collision traverser, etc.)
# Note that the AIZoneData object that we're holding will be destroyed
# automatically when we move away or are destroyed.
if self._zoneData is not None:
self._zoneData.destroy()
self._zoneData = None
def getRender(self):
# note that this will return a different node if we change zones
#return self.air.getRender(self.zoneId)
return self.getZoneData().getRender()
def getNonCollidableParent(self):
return self.getZoneData().getNonCollidableParent()
def getParentMgr(self):
#return self.air.getParentMgr(self.zoneId)
return self.getZoneData().getParentMgr()
def getCollTrav(self, *args, **kArgs):
return self.getZoneData().getCollTrav(*args, **kArgs)
def sendUpdate(self, fieldName, args = []):
assert self.notify.debugStateCall(self)
if self.air:
self.air.sendUpdate(self, fieldName, args)
def GetPuppetConnectionChannel(self, doId):
return doId + (1001 << 32)
def GetAccountConnectionChannel(self, doId):
return doId + (1003 << 32)
def GetAccountIDFromChannelCode(self, channel):
return channel >> 32
def GetAvatarIDFromChannelCode(self, channel):
return channel & 0xffffffff
def sendUpdateToAvatarId(self, avId, fieldName, args):
assert self.notify.debugStateCall(self)
channelId = self.GetPuppetConnectionChannel(avId)
self.sendUpdateToChannel(channelId, fieldName, args)
def sendUpdateToAccountId(self, accountId, fieldName, args):
assert self.notify.debugStateCall(self)
channelId = self.GetAccountConnectionChannel(accountId)
self.sendUpdateToChannel(channelId, fieldName, args)
def sendUpdateToChannel(self, channelId, fieldName, args):
assert self.notify.debugStateCall(self)
if self.air:
self.air.sendUpdateToChannel(self, channelId, fieldName, args)
def generateWithRequired(self, zoneId, optionalFields=[]):
assert self.notify.debugStateCall(self)
# have we already allocated a doId?
if self.__preallocDoId:
self.__preallocDoId = 0
return self.generateWithRequiredAndId(
self.doId, zoneId, optionalFields)
# The repository is the one that really does the work
parentId = self.air.districtId
self.air.generateWithRequired(self, parentId, zoneId, optionalFields)
self.generate()
self.announceGenerate()
self.postGenerateMessage()
# this is a special generate used for estates, or anything else that
# needs to have a hard coded doId as assigned by the server
def generateWithRequiredAndId(self, doId, parentId, zoneId, optionalFields=[]):
assert self.notify.debugStateCall(self)
# have we already allocated a doId?
if self.__preallocDoId:
assert doId == self.doId
self.__preallocDoId = 0
# The repository is the one that really does the work
self.air.generateWithRequiredAndId(self, doId, parentId, zoneId, optionalFields)
self.generate()
self.announceGenerate()
self.postGenerateMessage()
def generateOtpObject(self, parentId, zoneId, optionalFields=[], doId=None):
assert self.notify.debugStateCall(self)
# have we already allocated a doId?
if self.__preallocDoId:
assert doId is None or doId == self.doId
doId=self.doId
self.__preallocDoId = 0
# Assign it an id
if doId is None:
self.doId = self.air.allocateChannel()
else:
self.doId = doId
# Put the new DO in the dictionaries
self.air.addDOToTables(self, location=(parentId, zoneId))
# Send a generate message
self.sendGenerateWithRequired(self.air, parentId, zoneId, optionalFields)
self.generate()
self.announceGenerate()
self.postGenerateMessage()
def generate(self):
"""
Inheritors should put functions that require self.zoneId or
other networked info in this function.
"""
assert self.notify.debugStateCall(self)
self.__generates += 1
def generateInit(self, repository=None):
"""
First generate (not from cache).
"""
assert self.notify.debugStateCall(self)
def generateTargetChannel(self, repository):
"""
Who to send this to for generate messages
"""
if hasattr(self, "dbObject"):
return self.doId
return repository.serverId
def sendGenerateWithRequired(self, repository, parentId, zoneId, optionalFields=[]):
assert self.notify.debugStateCall(self)
dg = self.dclass.aiFormatGenerate(
self, self.doId, parentId, zoneId,
#repository.serverId,
self.generateTargetChannel(repository),
repository.ourChannel,
optionalFields)
repository.send(dg)
def initFromServerResponse(self, valDict):
assert self.notify.debugStateCall(self)
# This is a special method used for estates, etc., which get
# their fields set from the database indirectly by way of the
# AI. The input parameter is a dictionary of field names to
# datagrams that describes the initial field values from the
# database.
dclass = self.dclass
for key, value in valDict.items():
# Update the field
dclass.directUpdate(self, key, value)
def requestDelete(self):
assert self.notify.debugStateCall(self)
if not self.air:
doId = "none"
if hasattr(self, "doId"):
doId = self.doId
self.notify.warning(
"Tried to delete a %s (doId %s) that is already deleted" %
(self.__class__, doId))
return
self.air.requestDelete(self)
self._DOAI_requestedDelete = True
def taskName(self, taskString):
return "%s-%s" % (taskString, self.doId)
def uniqueName(self, idString):
return "%s-%s" % (idString, self.doId)
def validate(self, avId, bool, msg):
if not bool:
self.air.writeServerEvent('suspicious', avId, msg)
self.notify.warning('validate error: avId: %s -- %s' % (avId, msg))
return bool
def beginBarrier(self, name, avIds, timeout, callback):
# Begins waiting for a set of avatars. When all avatars in
# the list have reported back in or the callback has expired,
# calls the indicated callback with the list of avatars that
# made it through. There may be multiple barriers waiting
# simultaneously on different lists of avatars, although they
# should have different names.
from otp.ai import Barrier # type: ignore[import]
context = self.__nextBarrierContext
# We assume the context number is passed as a uint16.
self.__nextBarrierContext = (self.__nextBarrierContext + 1) & 0xffff
assert self.notify.debug('beginBarrier(%s, %s, %s, %s)' % (context, name, avIds, timeout))
if avIds:
barrier = Barrier.Barrier(
name, self.uniqueName(name), avIds, timeout,
doneFunc = PythonUtil.Functor(
self.__barrierCallback, context, callback))
self.__barriers[context] = barrier
# Send the context number to each involved client.
self.sendUpdate("setBarrierData", [self.getBarrierData()])
else:
# No avatars; just call the callback immediately.
callback(avIds)
return context
def getBarrierData(self):
# Returns the barrier data formatted for sending to the
# clients. This lists all of the current outstanding barriers
# and the avIds waiting for them.
data = []
for context, barrier in self.__barriers.items():
avatars = barrier.pendingAvatars
if avatars:
data.append((context, barrier.name, avatars))
return data
def ignoreBarrier(self, context):
# Aborts a previously-set barrier. The context is the return
# value from the previous call to beginBarrier().
barrier = self.__barriers.get(context)
if barrier:
barrier.cleanup()
del self.__barriers[context]
def setBarrierReady(self, context):
# Generated by the clients to check in after a beginBarrier()
# call.
avId = self.air.getAvatarIdFromSender()
assert self.notify.debug('setBarrierReady(%s, %s)' % (context, avId))
barrier = self.__barriers.get(context)
if barrier is None:
# This may be None if a client was slow and missed an
# earlier timeout. Too bad.
return
barrier.clear(avId)
def __barrierCallback(self, context, callback, avIds):
assert self.notify.debug('barrierCallback(%s, %s)' % (context, avIds))
# The callback that is generated when a barrier is completed.
barrier = self.__barriers.get(context)
if barrier:
barrier.cleanup()
del self.__barriers[context]
callback(avIds)
else:
self.notify.warning("Unexpected completion from barrier %s" % (context))
def isGridParent(self):
# If this distributed object is a DistributedGrid return 1. 0 by default
return 0
def execCommand(self, string, mwMgrId, avId, zoneId):
pass
def _retrieveCachedData(self):
""" This is a no-op on the AI. """
def setAI(self, aiChannel):
self.air.setAI(self.doId, aiChannel)
|
05bb1d13c4db3c61c3ccf2ef83baa55accd15f09
|
bc028a1cbc3cfab47b1d5a2df31bee8946881fb4
|
/Firewall/BLATSTING/BLATSTING_21680/utils/decodeDate.py
|
c41f600ef55e2d867589419d1e35c483e52de210
|
[] |
no_license
|
nneonneo/eqgrp-free-file
|
58a427b0a9716e7a24de878b1b7a8f06b3d92212
|
49e457d7ac870d5e00f5a247b94476cd5643f3ba
|
refs/heads/master
| 2021-01-19T01:24:37.276779
| 2016-08-16T22:50:57
| 2016-08-16T22:56:14
| 65,761,779
| 211
| 195
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
decodeDate.py
|
#! /usr/bin/env python
import sys
import re
def decode_status(date_string):
m = re.search('(\d{2}):(\d{2}):(\d{2})\.(\d{3})',date_string);
if( m == None ):
return -1;
hh=m.groups()[0];
mm=m.groups()[1];
ss=m.groups()[2];
ms=m.groups()[3];
nhh=int(hh,10);
nmm=int(mm,10);
nss=int(ss,10);
nms=int(ms,10);
result = nms - ((nhh*nmm*nss) % 1000);
if( result < 0 ):
result += 1000;
return result;
def usage():
print 'Usage: %s <date> [ <date2> [ <date3> ... ] ]' % sys.argv[0]
if len(sys.argv) == 1:
usage()
for arg in sys.argv:
ret = decode_status(arg)
if( ret != -1 ):
print '%s: %d' % (arg,ret)
|
9bdcbcbf034c0cecefe9c6a048e31af32a5692e5
|
7c3bace625eb6ece1b06326940b8e89ba3fdc68f
|
/playground/win_ide.py
|
d47b7201b624980e8fe64af2630df352cf8c6946
|
[
"Apache-2.0"
] |
permissive
|
AirtestProject/Airtest
|
64c218a54e6a28cba42af0ffe8a81d30703ffcca
|
bf49dfad0be05125df75c64ea47a282132bc03d5
|
refs/heads/master
| 2023-08-31T05:31:32.059552
| 2023-08-22T06:23:36
| 2023-08-22T06:23:36
| 118,709,540
| 7,580
| 1,256
|
Apache-2.0
| 2023-08-22T06:23:37
| 2018-01-24T04:00:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,328
|
py
|
win_ide.py
|
# -*- coding: utf-8 -*-
import os
import win32gui
from pywinauto.win32structures import RECT
from airtest import aircv
from airtest.core.win.win import Windows, screenshot
class WindowsInIDE(Windows):
"""Windows Device in Airtest-IDE"""
def __init__(self, handle=None, dpifactor=1, **kwargs):
if isinstance(handle, str) and handle.isdigit():
handle = int(handle)
super(WindowsInIDE, self).__init__(handle, dpifactor=dpifactor, **kwargs)
self.handle = handle
def connect(self, **kwargs):
"""
Connect to window and set it foreground
Args:
**kwargs: optional arguments
Returns:
None
"""
self.app = self._app.connect(**kwargs)
try:
self._top_window = self.app.top_window().wrapper_object()
if kwargs.get("foreground", True) in (True, "True", "true"):
self.set_foreground()
except RuntimeError:
self._top_window = None
def get_rect(self):
"""
Get rectangle of app or desktop resolution
Returns:
RECT(left, top, right, bottom)
"""
if self.handle:
left, top, right, bottom = win32gui.GetWindowRect(self.handle)
return RECT(left, top, right, bottom)
else:
desktop = win32gui.GetDesktopWindow()
left, top, right, bottom = win32gui.GetWindowRect(desktop)
return RECT(left, top, right, bottom)
def snapshot(self, filename="tmp.png"):
"""
Take a screenshot and save it to `tmp.png` filename by default
Args:
filename: name of file where to store the screenshot
Returns:
display the screenshot
"""
if not filename:
filename = "tmp.png"
if self.handle:
try:
screenshot(filename, self.handle)
except win32gui.error:
self.handle = None
screenshot(filename)
else:
screenshot(filename)
img = aircv.imread(filename)
os.remove(filename)
return img
if __name__ == '__main__':
from airtest.core.api import G
from airtest.cli.__main__ import main
G.register_custom_device(WindowsInIDE)
main()
|
53fcc3ccc90964730f2bcfc57a71a910502a3a89
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/legacy_test/test_imperative_star_gan_with_gradient_penalty.py
|
62476046e8ab2a19a031153bbf19413af6304284
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 19,510
|
py
|
test_imperative_star_gan_with_gradient_penalty.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
from paddle import _legacy_C_ops, fluid
from paddle.tensor import random
if fluid.is_compiled_with_cuda():
fluid.core.globals()['FLAGS_cudnn_deterministic'] = True
class Config:
def __init__(self, place, sort_sum_gradient=True):
self.place = place
if isinstance(place, fluid.CPUPlace):
# CPU cases are extremely slow
self.g_base_dims = 1
self.d_base_dims = 1
self.g_repeat_num = 1
self.d_repeat_num = 1
self.image_size = 32
else:
self.g_base_dims = 64
self.d_base_dims = 64
self.g_repeat_num = 6
self.d_repeat_num = 6
self.image_size = 256
self.c_dim = 10
self.batch_size = 1
self.seed = 1
self.lambda_rec = 10
self.lambda_gp = 10
self.iterations = 10
self.sort_sum_gradient = sort_sum_gradient
def create_mnist_dataset(cfg):
def create_target_label(label):
return label
# return (label + 1) % cfg.c_dim # fake label target
def create_one_hot(label):
ret = np.zeros([cfg.c_dim])
ret[label] = 1
return ret
def __impl__():
dataset = paddle.dataset.mnist.train()
image_reals = []
label_orgs = []
label_trgs = []
num = 0
for image_real, label_org in dataset():
image_real = np.reshape(np.array(image_real), [28, 28])
image_real = np.resize(image_real, [cfg.image_size, cfg.image_size])
image_real = np.array([image_real] * 3)
label_trg = create_target_label(label_org)
image_reals.append(np.array(image_real))
label_orgs.append(create_one_hot(label_org))
label_trgs.append(create_one_hot(label_trg))
if len(image_reals) == cfg.batch_size:
image_real_np = np.array(image_reals).astype('float32')
label_org_np = np.array(label_orgs).astype('float32')
label_trg_np = np.array(label_trgs).astype('float32')
yield image_real_np, label_org_np, label_trg_np
num += 1
if num == cfg.iterations:
break
image_reals = []
label_orgs = []
label_trgs = []
return __impl__
class InstanceNorm(paddle.nn.Layer):
def __init__(self, num_channels, epsilon=1e-5):
super().__init__()
self.epsilon = epsilon
self.scale = self.create_parameter(shape=[num_channels], is_bias=False)
self.bias = self.create_parameter(shape=[num_channels], is_bias=True)
def forward(self, input):
if fluid.in_dygraph_mode():
out, _, _ = _legacy_C_ops.instance_norm(
input, self.scale, self.bias, 'epsilon', self.epsilon
)
return out
else:
return paddle.static.nn.instance_norm(
input,
epsilon=self.epsilon,
param_attr=fluid.ParamAttr(self.scale.name),
bias_attr=fluid.ParamAttr(self.bias.name),
)
class Conv2DLayer(paddle.nn.Layer):
def __init__(
self,
num_channels,
num_filters=64,
filter_size=7,
stride=1,
padding=0,
norm=None,
use_bias=False,
relufactor=None,
):
super().__init__()
self._conv = paddle.nn.Conv2D(
in_channels=num_channels,
out_channels=num_filters,
kernel_size=filter_size,
stride=stride,
padding=padding,
bias_attr=None if use_bias else False,
)
if norm is not None:
self._norm = InstanceNorm(num_filters)
else:
self._norm = None
self.relufactor = relufactor
def forward(self, input):
conv = self._conv(input)
if self._norm:
conv = self._norm(conv)
if self.relufactor is not None:
conv = paddle.nn.functional.leaky_relu(conv, self.relufactor)
return conv
class Deconv2DLayer(paddle.nn.Layer):
def __init__(
self,
num_channels,
num_filters=64,
filter_size=7,
stride=1,
padding=0,
norm=None,
use_bias=False,
relufactor=None,
):
super().__init__()
self._deconv = paddle.nn.Conv2DTranspose(
num_channels,
num_filters,
filter_size,
stride=stride,
padding=padding,
bias_attr=None if use_bias else False,
)
if norm is not None:
self._norm = InstanceNorm(num_filters)
else:
self._norm = None
self.relufactor = relufactor
def forward(self, input):
deconv = self._deconv(input)
if self._norm:
deconv = self._norm(deconv)
if self.relufactor is not None:
deconv = paddle.nn.functional.leaky_relu(deconv, self.relufactor)
return deconv
class ResidualBlock(paddle.nn.Layer):
def __init__(self, num_channels, num_filters):
super().__init__()
self._conv0 = Conv2DLayer(
num_channels=num_channels,
num_filters=num_filters,
filter_size=3,
stride=1,
padding=1,
norm=True,
relufactor=0,
)
self._conv1 = Conv2DLayer(
num_channels=num_filters,
num_filters=num_filters,
filter_size=3,
stride=1,
padding=1,
norm=True,
relufactor=None,
)
def forward(self, input):
conv0 = self._conv0(input)
conv1 = self._conv1(conv0)
return input + conv1
class Generator(paddle.nn.Layer):
def __init__(self, cfg, num_channels=3):
super().__init__()
conv_base = Conv2DLayer(
num_channels=cfg.c_dim + num_channels,
num_filters=cfg.g_base_dims,
filter_size=7,
stride=1,
padding=3,
norm=True,
relufactor=0,
)
sub_layers = [conv_base]
cur_channels = cfg.g_base_dims
for i in range(2):
sub_layer = Conv2DLayer(
num_channels=cur_channels,
num_filters=cur_channels * 2,
filter_size=4,
stride=2,
padding=1,
norm=True,
relufactor=0,
)
cur_channels *= 2
sub_layers.append(sub_layer)
self._conv0 = paddle.nn.Sequential(*sub_layers)
repeat_num = cfg.g_repeat_num
sub_layers = []
for i in range(repeat_num):
res_block = ResidualBlock(
num_channels=cur_channels, num_filters=cfg.g_base_dims * 4
)
sub_layers.append(res_block)
self._res_block = paddle.nn.Sequential(*sub_layers)
cur_channels = cfg.g_base_dims * 4
sub_layers = []
for i in range(2):
rate = 2 ** (1 - i)
deconv = Deconv2DLayer(
num_channels=cur_channels,
num_filters=cfg.g_base_dims * rate,
filter_size=4,
stride=2,
padding=1,
relufactor=0,
norm=True,
)
cur_channels = cfg.g_base_dims * rate
sub_layers.append(deconv)
self._deconv = paddle.nn.Sequential(*sub_layers)
self._conv1 = Conv2DLayer(
num_channels=cur_channels,
num_filters=3,
filter_size=7,
stride=1,
padding=3,
relufactor=None,
)
def forward(self, input, label_trg):
shape = input.shape
label_trg_e = paddle.reshape(label_trg, [-1, label_trg.shape[1], 1, 1])
label_trg_e = paddle.expand(label_trg_e, [-1, -1, shape[2], shape[3]])
input1 = paddle.concat([input, label_trg_e], 1)
conv0 = self._conv0(input1)
res_block = self._res_block(conv0)
deconv = self._deconv(res_block)
conv1 = self._conv1(deconv)
out = paddle.tanh(conv1)
return out
class Discriminator(paddle.nn.Layer):
def __init__(self, cfg, num_channels=3):
super().__init__()
cur_dim = cfg.d_base_dims
conv_base = Conv2DLayer(
num_channels=num_channels,
num_filters=cur_dim,
filter_size=4,
stride=2,
padding=1,
relufactor=0.2,
)
repeat_num = cfg.d_repeat_num
sub_layers = [conv_base]
for i in range(1, repeat_num):
sub_layer = Conv2DLayer(
num_channels=cur_dim,
num_filters=cur_dim * 2,
filter_size=4,
stride=2,
padding=1,
relufactor=0.2,
)
cur_dim *= 2
sub_layers.append(sub_layer)
self._conv0 = paddle.nn.Sequential(*sub_layers)
kernel_size = int(cfg.image_size / np.power(2, repeat_num))
self._conv1 = Conv2DLayer(
num_channels=cur_dim,
num_filters=1,
filter_size=3,
stride=1,
padding=1,
)
self._conv2 = Conv2DLayer(
num_channels=cur_dim, num_filters=cfg.c_dim, filter_size=kernel_size
)
def forward(self, input):
conv = self._conv0(input)
out1 = self._conv1(conv)
out2 = self._conv2(conv)
return out1, out2
def loss_cls(cls, label, cfg):
cls_shape = cls.shape
cls = paddle.reshape(cls, [-1, cls_shape[1] * cls_shape[2] * cls_shape[3]])
return (
paddle.sum(
paddle.nn.functional.binary_cross_entropy_with_logits(cls, label)
)
/ cfg.batch_size
)
def calc_gradients(outputs, inputs, no_grad_set):
if fluid.in_dygraph_mode():
return fluid.dygraph.grad(
outputs=outputs,
inputs=inputs,
no_grad_vars=no_grad_set,
create_graph=True,
)
else:
return fluid.gradients(
targets=outputs, inputs=inputs, no_grad_set=no_grad_set
)
def gradient_penalty(f, real, fake, no_grad_set, cfg):
def _interpolate(a, b):
shape = [a.shape[0]]
alpha = random.uniform_random_batch_size_like(
input=a, shape=shape, min=0.1, max=1.0, seed=cfg.seed
)
inner = paddle.tensor.math._multiply_with_axis(
b, 1.0 - alpha, axis=0
) + paddle.tensor.math._multiply_with_axis(a, alpha, axis=0)
return inner
x = _interpolate(real, fake)
pred, _ = f(x)
if isinstance(pred, tuple):
pred = pred[0]
gradient = calc_gradients(
outputs=[pred], inputs=[x], no_grad_set=no_grad_set
)
if gradient is None:
return None
gradient = gradient[0]
grad_shape = gradient.shape
gradient = paddle.reshape(
gradient, [-1, grad_shape[1] * grad_shape[2] * grad_shape[3]]
)
epsilon = 1e-16
norm = paddle.sqrt(paddle.sum(paddle.square(gradient), axis=1) + epsilon)
gp = paddle.mean(paddle.square(norm - 1.0))
return gp
def get_generator_loss(
image_real, label_org, label_trg, generator, discriminator, cfg
):
fake_img = generator(image_real, label_trg)
rec_img = generator(fake_img, label_org)
g_loss_rec = paddle.mean(paddle.abs(paddle.subtract(image_real, rec_img)))
pred_fake, cls_fake = discriminator(fake_img)
g_loss_fake = -paddle.mean(pred_fake)
g_loss_cls = loss_cls(cls_fake, label_trg, cfg)
g_loss = g_loss_fake + cfg.lambda_rec * g_loss_rec + g_loss_cls
return g_loss
def get_discriminator_loss(
image_real, label_org, label_trg, generator, discriminator, cfg
):
fake_img = generator(image_real, label_trg)
pred_real, cls_real = discriminator(image_real)
pred_fake, _ = discriminator(fake_img)
d_loss_cls = loss_cls(cls_real, label_org, cfg)
d_loss_fake = paddle.mean(pred_fake)
d_loss_real = -paddle.mean(pred_real)
d_loss = d_loss_real + d_loss_fake + d_loss_cls
d_loss_gp = gradient_penalty(
discriminator,
image_real,
fake_img,
set(discriminator.parameters()),
cfg,
)
if d_loss_gp is not None:
d_loss += cfg.lambda_gp * d_loss_gp
return d_loss
def build_optimizer(layer, cfg, loss=None):
learning_rate = 1e-3
beta1 = 0.5
beta2 = 0.999
if fluid.in_dygraph_mode():
return paddle.optimizer.Adam(
learning_rate=learning_rate,
beta1=beta1,
beta2=beta2,
parameters=layer.parameters(),
)
else:
optimizer = paddle.optimizer.Adam(
learning_rate=learning_rate, beta1=beta1, beta2=beta2
)
optimizer.minimize(loss, parameter_list=layer.parameters())
return optimizer
class DyGraphTrainModel:
def __init__(self, cfg):
paddle.seed(1)
paddle.framework.random._manual_program_seed(1)
self.generator = Generator(cfg)
self.discriminator = Discriminator(cfg)
self.g_optimizer = build_optimizer(self.generator, cfg)
self.d_optimizer = build_optimizer(self.discriminator, cfg)
self.cfg = cfg
fluid.set_flags({'FLAGS_sort_sum_gradient': cfg.sort_sum_gradient})
def clear_gradients(self):
if self.g_optimizer:
self.g_optimizer.clear_gradients()
if self.d_optimizer:
self.d_optimizer.clear_gradients()
def run(self, image_real, label_org, label_trg):
image_real = fluid.dygraph.to_variable(image_real)
label_org = fluid.dygraph.to_variable(label_org)
label_trg = fluid.dygraph.to_variable(label_trg)
g_loss = get_generator_loss(
image_real,
label_org,
label_trg,
self.generator,
self.discriminator,
self.cfg,
)
g_loss.backward()
if self.g_optimizer:
self.g_optimizer.minimize(g_loss)
self.clear_gradients()
d_loss = get_discriminator_loss(
image_real,
label_org,
label_trg,
self.generator,
self.discriminator,
self.cfg,
)
d_loss.backward()
if self.d_optimizer:
self.d_optimizer.minimize(d_loss)
self.clear_gradients()
return float(g_loss), float(d_loss)
class StaticGraphTrainModel:
def __init__(self, cfg):
self.cfg = cfg
def create_data_layer():
image_real = paddle.static.data(
shape=[None, 3, cfg.image_size, cfg.image_size],
dtype='float32',
name='image_real',
)
label_org = paddle.static.data(
shape=[None, cfg.c_dim], dtype='float32', name='label_org'
)
label_trg = paddle.static.data(
shape=[None, cfg.c_dim], dtype='float32', name='label_trg'
)
return image_real, label_org, label_trg
paddle.seed(cfg.seed)
paddle.framework.random._manual_program_seed(cfg.seed)
self.gen_program = fluid.Program()
gen_startup_program = fluid.Program()
with fluid.program_guard(self.gen_program, gen_startup_program):
with fluid.unique_name.guard():
image_real, label_org, label_trg = create_data_layer()
generator = Generator(cfg)
discriminator = Discriminator(cfg)
g_loss = get_generator_loss(
image_real,
label_org,
label_trg,
generator,
discriminator,
cfg,
)
build_optimizer(generator, cfg, loss=g_loss)
self.dis_program = fluid.Program()
dis_startup_program = fluid.Program()
with fluid.program_guard(self.dis_program, dis_startup_program):
with fluid.unique_name.guard():
image_real, label_org, label_trg = create_data_layer()
generator = Generator(cfg)
discriminator = Discriminator(cfg)
d_loss = get_discriminator_loss(
image_real,
label_org,
label_trg,
generator,
discriminator,
cfg,
)
build_optimizer(discriminator, cfg, loss=d_loss)
self.executor = fluid.Executor(cfg.place)
self.scope = fluid.Scope()
with fluid.scope_guard(self.scope):
self.executor.run(gen_startup_program)
self.executor.run(dis_startup_program)
self.g_loss = g_loss
self.d_loss = d_loss
def run(self, image_real, label_org, label_trg):
feed = {
'image_real': image_real,
'label_org': label_org,
'label_trg': label_trg,
}
with fluid.scope_guard(self.scope):
g_loss_val = self.executor.run(
self.gen_program, feed=feed, fetch_list=[self.g_loss]
)[0]
d_loss_val = self.executor.run(
self.dis_program, feed=feed, fetch_list=[self.d_loss]
)[0]
return g_loss_val[0], d_loss_val[0]
class TestStarGANWithGradientPenalty(unittest.TestCase):
def func_main(self):
self.place_test(fluid.CPUPlace())
if fluid.is_compiled_with_cuda():
self.place_test(fluid.CUDAPlace(0))
def place_test(self, place):
cfg = Config(place, False)
dataset = create_mnist_dataset(cfg)
dataset = paddle.reader.cache(dataset)
fluid_dygraph_loss = []
with fluid.dygraph.guard(cfg.place):
fluid_dygraph_model = DyGraphTrainModel(cfg)
for batch_id, (image_real, label_org, label_trg) in enumerate(
dataset()
):
loss = fluid_dygraph_model.run(image_real, label_org, label_trg)
fluid_dygraph_loss.append(loss)
eager_dygraph_loss = []
with fluid.dygraph.guard(cfg.place):
eager_dygraph_model = DyGraphTrainModel(cfg)
for batch_id, (image_real, label_org, label_trg) in enumerate(
dataset()
):
loss = eager_dygraph_model.run(image_real, label_org, label_trg)
eager_dygraph_loss.append(loss)
def test_all_cases(self):
self.func_main()
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
|
d03669dd0da0696cc775c23f0d3e1e023344919f
|
b347bc4b850dee4a8a9a171b563a3f31230ce1c7
|
/sktime/utils/tests/test_check_estimator.py
|
6d44fb7af501e1f1dc4d81b1c4cd1d993ad38969
|
[
"BSD-3-Clause"
] |
permissive
|
sktime/sktime
|
5963962df338c5931a2f9f1794d1203c50ddc27e
|
70b2bfaaa597eb31bc3a1032366dcc0e1f4c8a9f
|
refs/heads/main
| 2023-08-22T18:20:08.022950
| 2023-08-22T15:24:39
| 2023-08-22T15:24:39
| 156,401,841
| 1,117
| 268
|
BSD-3-Clause
| 2023-09-14T20:44:21
| 2018-11-06T15:08:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,123
|
py
|
test_check_estimator.py
|
"""Tests for check_estimator."""
__author__ = ["fkiraly"]
import pytest
from sktime.classification.dummy import DummyClassifier
from sktime.forecasting.dummy import ForecastKnownValues
from sktime.transformations.series.exponent import ExponentTransformer
from sktime.utils.estimator_checks import check_estimator
EXAMPLE_CLASSES = [DummyClassifier, ForecastKnownValues, ExponentTransformer]
@pytest.mark.parametrize("estimator_class", EXAMPLE_CLASSES)
def test_check_estimator_passed(estimator_class):
"""Test that check_estimator returns only passed tests for examples we know pass."""
estimator_instance = estimator_class.create_test_instance()
result_class = check_estimator(estimator_class, verbose=False)
assert all(x == "PASSED" for x in result_class.values())
result_instance = check_estimator(estimator_instance, verbose=False)
assert all(x == "PASSED" for x in result_instance.values())
@pytest.mark.parametrize("estimator_class", EXAMPLE_CLASSES)
def test_check_estimator_does_not_raise(estimator_class):
"""Test that check_estimator does not raise exceptions on examples we know pass."""
estimator_instance = estimator_class.create_test_instance()
check_estimator(estimator_class, raise_exceptions=True, verbose=False)
check_estimator(estimator_instance, raise_exceptions=True, verbose=False)
def test_check_estimator_subset_tests():
"""Test that subsetting by tests_to_run and tests_to_exclude works as intended."""
tests_to_run = [
"test_get_params",
"test_set_params",
"test_clone",
"test_repr",
"test_capability_inverse_tag_is_correct",
"test_remember_data_tag_is_correct",
]
tests_to_exclude = ["test_repr", "test_remember_data_tag_is_correct"]
expected_tests = set(tests_to_run).difference(tests_to_exclude)
results = check_estimator(
ExponentTransformer,
verbose=False,
tests_to_run=tests_to_run,
tests_to_exclude=tests_to_exclude,
)
results_tests = {x.split("[")[0] for x in results.keys()}
assert results_tests == expected_tests
|
4d95f1ecc009afdef5b6b4e74b303ad2bfc44f0e
|
6a468c1650b3c083f102f19ace0b0d6e4d0686f7
|
/sympy/functions/elementary/tests/test_interface.py
|
4ef784378cc90fe03cf8b4de78dbde4765caaf79
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
sympy/sympy
|
a5f8accaa7686c59d9b5c94212fef60d746dac4b
|
69f98fb2b0d845e76874067a381dba37b577e8c5
|
refs/heads/master
| 2023-09-01T15:51:37.886107
| 2023-08-31T20:54:33
| 2023-08-31T20:54:33
| 640,534
| 10,928
| 5,362
|
NOASSERTION
| 2023-09-14T17:29:13
| 2010-04-30T20:37:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,054
|
py
|
test_interface.py
|
# This test file tests the SymPy function interface, that people use to create
# their own new functions. It should be as easy as possible.
from sympy.core.function import Function
from sympy.core.sympify import sympify
from sympy.functions.elementary.hyperbolic import tanh
from sympy.functions.elementary.trigonometric import (cos, sin)
from sympy.series.limits import limit
from sympy.abc import x
def test_function_series1():
"""Create our new "sin" function."""
class my_function(Function):
def fdiff(self, argindex=1):
return cos(self.args[0])
@classmethod
def eval(cls, arg):
arg = sympify(arg)
if arg == 0:
return sympify(0)
#Test that the taylor series is correct
assert my_function(x).series(x, 0, 10) == sin(x).series(x, 0, 10)
assert limit(my_function(x)/x, x, 0) == 1
def test_function_series2():
"""Create our new "cos" function."""
class my_function2(Function):
def fdiff(self, argindex=1):
return -sin(self.args[0])
@classmethod
def eval(cls, arg):
arg = sympify(arg)
if arg == 0:
return sympify(1)
#Test that the taylor series is correct
assert my_function2(x).series(x, 0, 10) == cos(x).series(x, 0, 10)
def test_function_series3():
"""
Test our easy "tanh" function.
This test tests two things:
* that the Function interface works as expected and it's easy to use
* that the general algorithm for the series expansion works even when the
derivative is defined recursively in terms of the original function,
since tanh(x).diff(x) == 1-tanh(x)**2
"""
class mytanh(Function):
def fdiff(self, argindex=1):
return 1 - mytanh(self.args[0])**2
@classmethod
def eval(cls, arg):
arg = sympify(arg)
if arg == 0:
return sympify(0)
e = tanh(x)
f = mytanh(x)
assert e.series(x, 0, 6) == f.series(x, 0, 6)
|
401af4eaf399e7f35372db6059a01463a6393b1d
|
364774e29ef2474552ea3839de0951e63cbae0a6
|
/wouso/core/game/models.py
|
60004a6187678d277601fe03fb4dbff19c8e116e
|
[
"Apache-2.0"
] |
permissive
|
rosedu/wouso
|
66c50ef750cf79d6959768f7df93cc08607cc266
|
ed34c62ac925db719388f27fe5acb40376d8d0c1
|
refs/heads/master
| 2022-10-29T14:28:51.818073
| 2022-09-24T18:54:04
| 2022-09-24T18:54:04
| 2,965,476
| 121
| 97
|
NOASSERTION
| 2019-11-15T09:33:50
| 2011-12-12T16:15:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,783
|
py
|
models.py
|
import logging
from django.db import models
from django.core.urlresolvers import reverse
from wouso.core.common import App, CachedItem
class Game(CachedItem, models.Model, App):
""" Generic game class. Each installed application acting like a
game should extend this class.
"""
CACHE_PART = 'name'
name = models.CharField(max_length=100, primary_key=True)
short_name = models.CharField(max_length=64, blank=True)
verbose_name = models.CharField(max_length=128, blank=True)
url = models.CharField(max_length=64, blank=True)
@classmethod
def get_instance(cls):
""" Return the unique instance of a Game, starting from its class model.
"""
name = cls.__name__
return cls.get(name) or cls.add(name)
@property
def game(self):
# TODO: check usage
name = str(self.__class__.__name__)
return Game.objects.get(name=name)
@classmethod
def get_staff_and_permissions(cls):
return []
@classmethod
def get_formulas(cls):
""" Returns a list of formulas used by the game """
return []
@classmethod
def get_coins(cls):
""" Returns a list of game-specific coins (as names)"""
return []
def get_game_absolute_url(self):
""" Return a tuple for django template system """
return reverse(self.url) if self.url else ''
def get_real_object(self):
""" Find a class by name, and instantiate it
"""
from . import get_games
for g in get_games():
if g.__name__ == self.name:
return g.get_instance()
logging.error('Could not find game class for self.name %s' % self.name)
return self
def __unicode__(self):
return self.name
|
68aea0ea42d36ac31699e4e6c495f88f3ecc964f
|
7c5fb33929116bb77b438de3ead93b3978b5af71
|
/alf/examples/ppo_breakout_conf.py
|
583803c7bd67810c58784b31751e447e59786073
|
[
"Apache-2.0"
] |
permissive
|
HorizonRobotics/alf
|
d6dac891322a81ccb7e2a9749139627b1eda28cb
|
b00ff2fa5e660de31020338ba340263183fbeaa4
|
refs/heads/pytorch
| 2023-08-21T18:51:41.370566
| 2023-08-16T00:07:22
| 2023-08-16T00:07:22
| 178,459,453
| 288
| 57
|
Apache-2.0
| 2023-09-14T20:40:20
| 2019-03-29T18:44:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,394
|
py
|
ppo_breakout_conf.py
|
# Copyright (c) 2021 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import alf
from alf.algorithms.agent import Agent
from alf.networks import ActorDistributionNetwork, ValueNetwork
from alf.examples import atari_conf, ppo_conf
discount = 0.988
num_envs = 64
alf.config("AverageDiscountedReturnMetric", discount=discount)
# From OpenAI gym wiki:
# "v0 vs v4: v0 has repeat_action_probability of 0.25
# (meaning 25% of the time the previous action will be used instead of the new action),
# while v4 has 0 (always follow your issued action)
# Because we already implements frame_skip in AtariPreprocessing, we should always
# use 'NoFrameSkip' Atari environments from OpenAI gym
alf.config(
'create_environment',
env_name='BreakoutNoFrameskip-v4',
num_parallel_environments=num_envs)
CONV_LAYER_PARAMS = ((32, 8, 4), (64, 4, 2), (64, 3, 1))
actor_network_cls = functools.partial(
ActorDistributionNetwork,
fc_layer_params=(128, ),
conv_layer_params=CONV_LAYER_PARAMS)
value_network_cls = functools.partial(
ValueNetwork, fc_layer_params=(128, ), conv_layer_params=CONV_LAYER_PARAMS)
alf.config('CategoricalProjectionNetwork', logits_init_output_factor=1e-10)
alf.config(
'PPOLoss',
entropy_regularization=1e-2,
gamma=discount,
normalize_advantages=False)
alf.config(
'ActorCriticAlgorithm',
actor_network_ctor=actor_network_cls,
value_network_ctor=value_network_cls)
alf.config('Agent', optimizer=alf.optimizers.Adam(lr=1e-3))
alf.config(
'TrainerConfig',
unroll_length=8,
mini_batch_size=64,
mini_batch_length=None,
num_updates_per_train_iter=3,
algorithm_ctor=Agent,
num_iterations=0,
num_env_steps=5000000,
evaluate=False,
debug_summaries=True,
summarize_grads_and_vars=True,
summary_interval=50)
|
88336a71d382c9ab93e24aedc4bf3ac2bb2cfe0f
|
725ac5a0bf72829be627bf8dc82fdc51ba0f94ae
|
/Image_Caption/resnet_rnn/data/gen_vocab.py
|
3082a1ea14a7ac525e27a42f53fa8876e4439526
|
[] |
no_license
|
shawroad/NLP_pytorch_project
|
fa14b6e4a156229765e1d552901d0492d8e1def3
|
1272fed2dc8fef78a9ded0f1ae1644d613a3b57b
|
refs/heads/master
| 2023-06-25T02:37:35.503251
| 2023-06-12T10:57:11
| 2023-06-12T10:57:11
| 229,694,655
| 530
| 104
| null | 2020-12-08T09:21:47
| 2019-12-23T06:54:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,178
|
py
|
gen_vocab.py
|
import json
import jieba
from collections import Counter
from tqdm import tqdm
if __name__ == '__main__':
min_word_freq = 1
samples = json.load(open('ai_challenger_caption_train_20170902/caption_train_annotations_20170902.json', 'r', encoding='utf8'))
temp = json.load(open('ai_challenger_caption_validation_20170910/caption_validation_annotations_20170910.json', 'r', encoding='utf8'))
samples.extend(temp)
# Read image paths and captions for each image
word_freq = Counter()
for sample in tqdm(samples):
caption = sample['caption']
for c in caption:
seg_list = jieba.cut(c, cut_all=True)
# Update word frequency
word_freq.update(seg_list)
words = [w for w in word_freq.keys() if word_freq[w] > min_word_freq]
word_map = {k: v + 1 for v, k in enumerate(words)}
word_map['<unk>'] = len(word_map) + 1
word_map['<start>'] = len(word_map) + 1
word_map['<end>'] = len(word_map) + 1
word_map['<pad>'] = 0
print(len(word_map))
print(words[:10])
# Save word map to a JSON
json.dump(word_map, open('WORDMAP.json', 'w', encoding='utf8'), ensure_ascii=False)
|
786044b91aa38cc126561e73ceed8d3640af5131
|
751fe2de18f00596e4f1ed342b56bd6f38ee2053
|
/wisdem/rotorse/geometry_tools/geometry.py
|
e53e964003550c7a7bff0c5b9e5d5f3457e433a3
|
[
"Apache-2.0"
] |
permissive
|
WISDEM/WISDEM
|
42fa780915d62fd4e4203050e886093ecc806c8a
|
d7270ebe1c554293a9d36730d67ab555c071cb17
|
refs/heads/master
| 2023-08-04T01:22:43.215105
| 2023-06-22T23:36:07
| 2023-06-22T23:36:07
| 23,678,280
| 120
| 86
|
Apache-2.0
| 2023-06-22T19:26:34
| 2014-09-04T20:30:24
|
Python
|
UTF-8
|
Python
| false
| false
| 11,800
|
py
|
geometry.py
|
from __future__ import print_function
import sys
import numpy as np
from scipy.optimize import minimize
from scipy.interpolate import PchipInterpolator, Akima1DInterpolator, pchip
from wisdem.commonse.utilities import arc_length
from wisdem.rotorse.geometry_tools.distfunc import distfunc
from wisdem.rotorse.geometry_tools.geom_tools import curvature
from wisdem.rotorse.geometry_tools.cubicspline import NaturalCubicSpline
class Curve(object):
def __init__(self, points=None):
super(Curve, self).__init__()
self.length = 0.0 # Total curve length
self.s = np.array([]) # Curve accumulated curve length
self.points = np.array([]) # coordinates of curve
self.ni = 0 # Number of points
if points is not None:
self.initialize(points)
def initialize(self, points):
self.points = points
self.ni = points.shape[0]
self._compute_s()
self._compute_dp()
self._build_splines()
def _compute_s(self):
"""
compute normalized curve length
"""
s = arc_length(self.points)
self.length = s[-1]
self.ds = np.diff(s)
self.s = s / s[-1]
def _compute_dp(self):
"""compute the unit direction vectors along the curve"""
t1 = np.gradient(self.points[:, :])[0]
self.dp = np.array([t1[i, :] / np.linalg.norm(t1[i, :]) for i in range(np.shape(t1)[0])])
def _build_splines(self):
self._splines = []
for j in range(np.shape(self.points)[1]):
self._splines.append(PchipInterpolator(self.s, self.points[:, j]))
def redistribute(self, dist=None, s=None):
if dist is not None:
self.s = distfunc(dist)
else:
self.s = s
self.ni = np.shape(self.s)[0]
points = np.zeros((self.ni, np.shape(self.points)[1]))
for i in range(points.shape[1]):
points[:, i] = self._splines[i](self.s)
self.initialize(points)
def interp_s(self, s):
"""
interpolate (x,y) at some curve fraction s
"""
p = np.zeros(np.shape(self.points)[1])
for i in range(np.shape(self.points)[1]):
p[i] = self._splines[i](s)
return p
class AirfoilShape(Curve):
"""
Base class for airfoil shapes.
The class automatically computes the LE and TE
and can redistribute the points smoothly along the surface.
Points along the surface need to be defined starting at the
TE pressure side ending at the TE suction side.
"""
def initialize(self, points):
self.LE = np.array([]) # Leading edge coordinates
self.TE = np.array([]) # Trailing edge coordinates
self.sLE = 0.0 # Leading edge curve fraction
self.chord = 0.0 # chord length
super(AirfoilShape, self).initialize(points)
self.computeLETE()
def computeLETE(self):
"""
computes the leading and trailing edge of the airfoil.
TE is computed as the mid-point between lower and upper TE points
LE is computed as the point with maximum distance from the TE.
"""
self.TE = np.array([np.average(self.points[[0, -1], 0]), np.average(self.points[[0, -1], 1])])
res = minimize(self._sdist, (0.5), method="SLSQP", bounds=[(0, 1)])
self.sLE = res["x"][0]
xLE = self._splines[0](self.sLE)
yLE = self._splines[1](self.sLE)
self.LE = np.array([xLE, yLE])
self.curvLE = NaturalCubicSpline(self.s, curvature(self.points))(self.sLE)
self.chord = np.linalg.norm(self.LE - self.TE)
def _sdist(self, s):
x = self._splines[0](s)
y = self._splines[1](s)
return -(((x - self.TE[0]) ** 2 + (y - self.TE[1]) ** 2) ** 0.5)
def leading_edge_dist(self, ni):
"""function that returns a suitable cell size based on airfoil LE curvature"""
min_ds1 = 1.0 / ni * 0.1
max_ds1 = 1.0 / ni * 0.5
ds1 = max((min_ds1 - max_ds1) / 30.0 * abs(self.curvLE) + max_ds1, min_ds1)
return ds1
def redistribute(self, ni, even=False, dist=None, dLE=False, dTE=-1.0, s=None):
"""
redistribute the points on the airfoil using fusedwind.lib.distfunc
Parameters
----------
ni : int
total number of points on airfoil
even : bool
flag for getting an even distribution of points
dist : list
optional list of control points with the form
[[s0, ds0, n0], [s1, ds1, n1], ... [s<n>, ds<n>, n<n>]]
where\n
s<n> is the normalized curve fraction at each control point,\n
ds<n> is the normalized cell size at each control point,\n
n<n> is the cell count at each control point.
dLE : bool
optional flag for automatically calculating a suitable leading edge cell
size based on the local curvature
dTE : float
optional trailing edge cell size. If set to -1 the cell size will increase
from the LE to TE according to the tanh distribution function used
in distfunc
"""
if even:
dist = [
[0, 1.0 / np.float(ni - 1), 1],
[self.sLE, 1.0 / np.float(ni - 1), int(ni * self.sLE)],
[1, 1.0 / np.float(ni - 1), ni],
]
elif dLE:
dist = [[0.0, dTE, 1], [self.sLE, self.leading_edge_dist(ni), ni / 2], [1.0, dTE, ni]]
super(AirfoilShape, self).redistribute(dist, s=s)
return self
def redistribute_chordwise(self, dist):
"""
redistribute the airfoil according to a chordwise distribution
"""
# self.redistribute(self.ni, even=True)
iLE = np.argmin(self.points[:, 0])
ni = np.shape(dist)[0]
dist = np.asarray(dist)
points = np.zeros((np.shape(dist)[0] * 2 - 1, np.shape(self.points)[1]))
# interpolate pressure side coordinates
yps = NaturalCubicSpline(self.points[: iLE + 1, 0][::-1], self.points[: iLE + 1, 1][::-1])
ps = yps(dist)
# interpolate suction side coordinates
yss = NaturalCubicSpline(self.points[iLE:, 0], self.points[iLE:, 1])
ss = yss(dist)
points[: ni - 1, 0] = dist[::-1][:-1]
points[ni - 1 :, 0] = dist
points[:, 1] = np.append(ps[::-1][:-1], ss, axis=0)
return AirfoilShape(points)
def s_to_11(self, s):
"""
Transform the s coordinates from AirfoilShape format:
* s=0 at TE pressure side (lower surface)
* s=1 at TE suction side (upper surface)
to the s coordinates from the input definition:
* s=0 at LE
* s=1 at TE suction side (upper surface)
* s=-1 at TE pressure side (lower surface)
"""
if s > self.sLE:
return (s - self.sLE) / (1.0 - self.sLE)
else:
return -1.0 + s / self.sLE
def s_to_01(self, s):
"""
Transform the s coordinates from the input definition:
* s=0 at LE
* s=1 at TE suction side (upper surface)
* s=-1 at TE pressure side (lower surface)
to the backend defintion compatible with AirfoilShape():
* s=0 at TE pressure side (lower surface)
* s=1 at TE suction side (upper surface)
"""
if s >= 0.0:
return s * (1.0 - self.sLE) + self.sLE
else:
return (1.0 + s) * self.sLE
def gurneyflap(self, gf_height, gf_length_factor):
"""add a Gurney flap shaped using a tanh function"""
if gf_height == 0.0:
return
# if the length is not specified it is set to 3 x gf_height
gf_length = gf_length_factor * gf_height
# identify starting point of the gf along the chord
x_gf = 1.0 - gf_length
id1 = (np.abs(x_gf - self.points[0 : self.ni / 2, 0])).argmin() + 1
s = np.linspace(x_gf, self.points[0, 0], 100)
smax = s[-1] - s[0]
h = np.zeros(100)
for i in range(100):
h[i] = (min(0.90 * gf_height, gf_height * (-np.tanh((s[i] - s[0]) / smax * 3) + 1.0))) / 0.90
h = h[::-1]
self.gfs = s
self.gfh = h
# add the gf shape to the airfoil
points = self.points.copy()
for i in range(0, id1):
points[i, 1] = points[i, 1] - np.interp(points[i, 0], s, h)
return AirfoilShape(points)
def open_trailing_edge(self, t):
"""
add thickness to airfoil
"""
t0 = np.abs(self.points[-1, 1] - self.points[0, 1])
dt = (t - t0) / 2.0
print("dt", dt)
# linearly add thickness from LE to TE
iLE = np.argmin(self.points[:, 0])
xLE = self.points[iLE, 0]
tlin = np.array([np.linspace(xLE, self.TE[0], 100), np.linspace(0.0, dt, 100)]).T
tspline = NaturalCubicSpline(tlin[:, 0], tlin[:, 1])
ys = tspline(self.points[iLE:, 0]) + self.points[iLE:, 1]
yp = -tspline(self.points[:iLE, 0][::-1])[::-1] + self.points[:iLE, 1]
self.points[iLE:, 1] = ys
self.points[:iLE, 1] = yp
self.initialize(self.points)
def remap2grid(x_ref, y_ref, x, spline=PchipInterpolator, axis=-1):
try:
if axis != -1:
spline_y = spline(x_ref, y_ref, axis=axis)
else:
spline_y = spline(x_ref, y_ref)
except:
x_ref = np.flip(x_ref, axis=0)
y_ref = np.flip(y_ref, axis=0)
if axis != -1:
spline_y = spline(x_ref, y_ref, axis=axis)
else:
spline_y = spline(x_ref, y_ref)
# error handling for x[-1] - x_ref[-1] > 0 and x[-1]~x_ref[-1]
try:
_ = iter(x)
if x[-1] > max(x_ref) and np.isclose(x[-1], x_ref[-1]):
x[-1] = x_ref[-1]
except:
if np.isclose(x, 0.0):
x = 0.0
if x > max(x_ref) and np.isclose(x, x_ref[-1]):
x = x_ref[-1]
y_out = spline_y(x)
np.place(y_out, y_out < np.min(y_ref), np.min(y_ref))
np.place(y_out, y_out > np.max(y_ref), np.max(y_ref))
return y_out
def trailing_edge_smoothing(data):
# correction to trailing edge shape for interpolated airfoils that smooths out unrealistic geometric errors
# often brought about when transitioning between round, flatback, or sharp trailing edges
# correct for self cross of TE (rare interpolation error)
if data[-1, 1] < data[0, 1]:
temp = data[0, 1]
data[0, 1] = data[-1, 1]
data[-1, 1] = temp
# Find indices on Suction and Pressure side for last 85-95% and 95-100% chordwise
idx_85_95 = [i_x for i_x, xi in enumerate(data[:, 0]) if xi > 0.85 and xi < 0.95]
idx_95_100 = [i_x for i_x, xi in enumerate(data[:, 0]) if xi > 0.95 and xi < 1.0]
idx_85_95_break = [i_idx for i_idx, d_idx in enumerate(np.diff(idx_85_95)) if d_idx > 1][0] + 1
idx_85_95_SS = idx_85_95[:idx_85_95_break]
idx_85_95_PS = idx_85_95[idx_85_95_break:]
idx_95_100_break = [i_idx for i_idx, d_idx in enumerate(np.diff(idx_95_100)) if d_idx > 1][0] + 1
idx_95_100_SS = idx_95_100[:idx_95_100_break]
idx_95_100_PS = idx_95_100[idx_95_100_break:]
# Interpolate the last 5% to the trailing edge
idx_in_PS = idx_85_95_PS + [-1]
x_corrected_PS = data[idx_95_100_PS, 0]
y_corrected_PS = remap2grid(data[idx_in_PS, 0], data[idx_in_PS, 1], x_corrected_PS)
idx_in_SS = [0] + idx_85_95_SS
x_corrected_SS = data[idx_95_100_SS, 0]
y_corrected_SS = remap2grid(data[idx_in_SS, 0], data[idx_in_SS, 1], x_corrected_SS)
# Overwrite profile with corrected TE
data[idx_95_100_SS, 1] = y_corrected_SS
data[idx_95_100_PS, 1] = y_corrected_PS
return data
|
5e12be55cac6f00473238785b2b33b6f096ab01b
|
4427aa4ccc6790123f662fce6acb0d5d16c645a9
|
/plextraktsync/config/ConfigMergeMixin.py
|
1e53a45a039bfaadb8c85d011e28abce2a4e8a5b
|
[
"MIT"
] |
permissive
|
Taxel/PlexTraktSync
|
83a02bf0f8f6229e5474f0132e1ff43cbed8c522
|
b536f3d6380c2bcc079bf4059071b9dc18789970
|
refs/heads/main
| 2023-08-31T13:29:57.715497
| 2023-08-30T16:27:56
| 2023-08-30T16:28:07
| 216,659,025
| 1,228
| 138
|
MIT
| 2023-09-14T21:31:02
| 2019-10-21T20:26:32
|
Python
|
UTF-8
|
Python
| false
| false
| 422
|
py
|
ConfigMergeMixin.py
|
class ConfigMergeMixin:
# https://stackoverflow.com/a/20666342/2314626
def merge(self, source, destination):
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = destination.setdefault(key, {})
self.merge(value, node)
else:
destination[key] = value
return destination
|
871e62547c411cedb827066a43c9411fbb0da3f0
|
43c3e02d28c3cdfe98c472f225f0411769c74cf6
|
/samsungtvws/encrypted/__init__.py
|
8a360606119da6232a26b46023c655f50c520777
|
[
"GPL-2.0-only"
] |
permissive
|
xchwarze/samsung-tv-ws-api
|
6dfb1d7edc51c18a25542c85161488e3c7df6ec3
|
8da89843809b3042bc4535d9b29d7218c02c7f53
|
refs/heads/master
| 2023-07-07T12:03:00.279900
| 2023-06-21T20:05:01
| 2023-06-21T20:05:01
| 215,668,241
| 177
| 33
|
MIT
| 2023-05-02T17:43:18
| 2019-10-17T00:18:15
|
Python
|
UTF-8
|
Python
| false
| false
| 42
|
py
|
__init__.py
|
"""API for the encrypted websocket API"""
|
1ca111ab2ad503533b79ec7e2129d0172ab38760
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/PhysicsTools/NanoAOD/python/met_cff.py
|
c06f21372090a844ab24a885cd836f4f52cf8911
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 6,920
|
py
|
met_cff.py
|
import FWCore.ParameterSet.Config as cms
from PhysicsTools.NanoAOD.common_cff import *
from PhysicsTools.NanoAOD.simpleSingletonCandidateFlatTableProducer_cfi import simpleSingletonCandidateFlatTableProducer
##################### Tables for final output and docs ##########################
metTable = simpleSingletonCandidateFlatTableProducer.clone(
src = cms.InputTag("slimmedMETs"),
name = cms.string("MET"),
doc = cms.string("slimmedMET, type-1 corrected PF MET"),
variables = cms.PSet(PTVars,
sumEt = Var("sumEt()", float, doc="scalar sum of Et",precision=10),
covXX = Var("getSignificanceMatrix().At(0,0)",float,doc="xx element of met covariance matrix", precision=8),
covXY = Var("getSignificanceMatrix().At(0,1)",float,doc="xy element of met covariance matrix", precision=8),
covYY = Var("getSignificanceMatrix().At(1,1)",float,doc="yy element of met covariance matrix", precision=8),
significance = Var("metSignificance()", float, doc="MET significance",precision=10),
sumPtUnclustered = Var("metSumPtUnclustered()", float, doc="sumPt used for MET significance",precision=10),
MetUnclustEnUpDeltaX = Var("shiftedPx('UnclusteredEnUp')-px()", float, doc="Delta (METx_mod-METx) Unclustered Energy Up",precision=10),
MetUnclustEnUpDeltaY = Var("shiftedPy('UnclusteredEnUp')-py()", float, doc="Delta (METy_mod-METy) Unclustered Energy Up",precision=10),
),
)
rawMetTable = simpleSingletonCandidateFlatTableProducer.clone(
src = metTable.src,
name = cms.string("RawMET"),
doc = cms.string("raw PF MET"),
variables = cms.PSet(#NOTA BENE: we don't copy PTVars here!
pt = Var("uncorPt", float, doc="pt", precision=10),
phi = Var("uncorPhi", float, doc="phi", precision=10),
sumEt = Var("uncorSumEt", float, doc="scalar sum of Et", precision=10),
),
)
caloMetTable = simpleSingletonCandidateFlatTableProducer.clone(
src = metTable.src,
name = cms.string("CaloMET"),
doc = cms.string("Offline CaloMET (muon corrected)"),
variables = cms.PSet(#NOTA BENE: we don't copy PTVars here!
pt = Var("caloMETPt", float, doc="pt", precision=10),
phi = Var("caloMETPhi", float, doc="phi", precision=10),
sumEt = Var("caloMETSumEt", float, doc="scalar sum of Et", precision=10),
),
)
puppiMetTable = simpleSingletonCandidateFlatTableProducer.clone(
src = cms.InputTag("slimmedMETsPuppi"),
name = cms.string("PuppiMET"),
doc = cms.string("PUPPI MET"),
variables = cms.PSet(PTVars,
sumEt = Var("sumEt()", float, doc="scalar sum of Et",precision=10),
ptJERUp = Var("shiftedPt('JetResUp')", float, doc="JER up pt",precision=10),
ptJERDown = Var("shiftedPt('JetResDown')", float, doc="JER down pt",precision=10),
phiJERUp = Var("shiftedPhi('JetResUp')", float, doc="JER up phi",precision=10),
phiJERDown = Var("shiftedPhi('JetResDown')", float, doc="JER down phi",precision=10),
ptJESUp = Var("shiftedPt('JetEnUp')", float, doc="JES up pt",precision=10),
ptJESDown = Var("shiftedPt('JetEnDown')", float, doc="JES down pt",precision=10),
phiJESUp = Var("shiftedPhi('JetEnUp')", float, doc="JES up phi",precision=10),
phiJESDown = Var("shiftedPhi('JetEnDown')", float, doc="JES down phi",precision=10),
ptUnclusteredUp = Var("shiftedPt('UnclusteredEnUp')", float, doc="Unclustered up pt",precision=10),
ptUnclusteredDown = Var("shiftedPt('UnclusteredEnDown')", float, doc="Unclustered down pt",precision=10),
phiUnclusteredUp = Var("shiftedPhi('UnclusteredEnUp')", float, doc="Unclustered up phi",precision=10),
phiUnclusteredDown = Var("shiftedPhi('UnclusteredEnDown')", float, doc="Unclustered down phi",precision=10),
),
)
rawPuppiMetTable = simpleSingletonCandidateFlatTableProducer.clone(
src = puppiMetTable.src,
name = cms.string("RawPuppiMET"),
doc = cms.string("raw Puppi MET"),
variables = cms.PSet(#NOTA BENE: we don't copy PTVars here!
pt = Var("uncorPt", float, doc="pt", precision=10),
phi = Var("uncorPhi", float, doc="phi", precision=10),
sumEt = Var("uncorSumEt", float, doc="scalar sum of Et", precision=10),
),)
tkMetTable = simpleSingletonCandidateFlatTableProducer.clone(
src = metTable.src,
name = cms.string("TkMET"),
doc = cms.string("Track MET computed with tracks from PV0 ( pvAssociationQuality()>=4 ) "),
variables = cms.PSet(#NOTA BENE: we don't copy PTVars here!
pt = Var("corPt('RawTrk')", float, doc="raw track MET pt",precision=10),
phi = Var("corPhi('RawTrk')", float, doc="raw track MET phi",precision=10),
sumEt = Var("corSumEt('RawTrk')", float, doc="raw track scalar sum of Et",precision=10),
),
)
chsMetTable = simpleSingletonCandidateFlatTableProducer.clone(
src = metTable.src,
name = cms.string("ChsMET"),
doc = cms.string("PF MET computed with CHS PF candidates"),
variables = cms.PSet(#NOTA BENE: we don't copy PTVars here!
pt = Var("corPt('RawChs')", float, doc="raw chs PF MET pt",precision=10),
phi = Var("corPhi('RawChs')", float, doc="raw chs PF MET phi",precision=10),
sumEt = Var("corSumEt('RawChs')", float, doc="raw chs PF scalar sum of Et",precision=10),
),
)
deepMetResolutionTuneTable = simpleSingletonCandidateFlatTableProducer.clone(
# current deepMets are saved in slimmedMETs in MiniAOD,
# in the same way as chsMet/TkMET
src = metTable.src,
name = cms.string("DeepMETResolutionTune"),
doc = cms.string("Deep MET trained with resolution tune"),
variables = cms.PSet(#NOTA BENE: we don't copy PTVars here!
pt = Var("corPt('RawDeepResolutionTune')", float, doc="DeepMET ResolutionTune pt",precision=-1),
phi = Var("corPhi('RawDeepResolutionTune')", float, doc="DeepmET ResolutionTune phi",precision=12),
),
)
deepMetResponseTuneTable = simpleSingletonCandidateFlatTableProducer.clone(
src = metTable.src,
name = cms.string("DeepMETResponseTune"),
doc = cms.string("Deep MET trained with extra response tune"),
variables = cms.PSet(#NOTA BENE: we don't copy PTVars here!
pt = Var("corPt('RawDeepResponseTune')", float, doc="DeepMET ResponseTune pt",precision=-1),
phi = Var("corPhi('RawDeepResponseTune')", float, doc="DeepMET ResponseTune phi",precision=12),
),
)
metMCTable = simpleSingletonCandidateFlatTableProducer.clone(
src = metTable.src,
name = cms.string("GenMET"),
doc = cms.string("Gen MET"),
variables = cms.PSet(
pt = Var("genMET.pt", float, doc="pt", precision=10),
phi = Var("genMET.phi", float, doc="phi", precision=10),
),
)
metTablesTask = cms.Task( metTable, rawMetTable, caloMetTable, puppiMetTable, rawPuppiMetTable, tkMetTable, chsMetTable, deepMetResolutionTuneTable, deepMetResponseTuneTable )
metMCTask = cms.Task( metMCTable )
|
5e311f3295c559b889c285fb8decb60b2fc5b85b
|
cc5031b999f0a47b3cfeb446601ac658cb9cc91a
|
/nbgrader/tests/apps/test_nbgrader_generate_feedback.py
|
cb76d3e139bee8d8f9ad0c34e7d65d31761470b1
|
[
"BSD-3-Clause"
] |
permissive
|
jupyter/nbgrader
|
9b681894e6f725647bb742ab136e360defb70d38
|
6db380039dab377157620516ae49eafcf7537fc8
|
refs/heads/main
| 2023-09-03T21:37:20.701676
| 2023-09-01T20:14:29
| 2023-09-01T20:14:29
| 23,984,796
| 1,274
| 371
|
BSD-3-Clause
| 2023-09-12T14:04:25
| 2014-09-13T03:27:36
|
Python
|
UTF-8
|
Python
| false
| false
| 22,997
|
py
|
test_nbgrader_generate_feedback.py
|
import os
import sys
import pytest
from os.path import join, exists, isfile
from ...utils import remove
from .. import run_nbgrader
from .base import BaseTestApp
class TestNbGraderFeedback(BaseTestApp):
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["generate_feedback", "--help-all"])
def test_deprecated(self, db, course_dir):
"""Can feedback be generated for an unchanged assignment?"""
run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
run_nbgrader(["db", "student", "add", "foo", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--db", db])
run_nbgrader(["feedback", "ps1", "--db", db])
assert exists(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
def test_single_file(self, db, course_dir):
"""Can feedback be generated for an unchanged assignment?"""
run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
run_nbgrader(["db", "student", "add", "foo", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["assign", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--db", db])
run_nbgrader(["generate_feedback", "ps1", "--db", db])
assert exists(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
def test_student_id_exclude(self, db, course_dir):
"""Does --CourseDirectory.student_id_exclude=X exclude students?"""
run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
run_nbgrader(["db", "student", "add", "foo", "--db", db])
run_nbgrader(["db", "student", "add", "bar", "--db", db])
run_nbgrader(["db", "student", "add", "baz", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["assign", "ps1", "--db", db])
for student in ["foo", "bar", "baz"]:
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", student, "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--db", db])
run_nbgrader(["generate_feedback", "ps1", "--db", db, "--CourseDirectory.student_id_exclude=bar,baz"])
for student in ["foo", "bar", "baz"]:
assert exists(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"))
assert exists(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert not exists(join(course_dir, "feedback", "bar", "ps1", "p1.html"))
assert not exists(join(course_dir, "feedback", "baz", "ps1", "p1.html"))
def test_force(self, db, course_dir):
"""Ensure the force option works properly"""
run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
run_nbgrader(["db", "student", "add", "foo", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._make_file(join(course_dir, "source", "ps1", "foo.txt"), "foo")
self._make_file(join(course_dir, "source", "ps1", "data", "bar.txt"), "bar")
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
self._make_file(join(course_dir, "submitted", "foo", "ps1", "foo.txt"), "foo")
self._make_file(join(course_dir, "submitted", "foo", "ps1", "data", "bar.txt"), "bar")
run_nbgrader(["autograde", "ps1", "--db", db])
self._make_file(join(course_dir, "autograded", "foo", "ps1", "blah.pyc"), "asdf")
run_nbgrader(["generate_feedback", "ps1", "--db", db])
assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt"))
assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc"))
# check that it skips the existing directory
remove(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
run_nbgrader(["generate_feedback", "ps1", "--db", db])
assert not isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
# force overwrite the supplemental files
run_nbgrader(["generate_feedback", "ps1", "--db", db, "--force"])
assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
# force overwrite
remove(join(course_dir, "autograded", "foo", "ps1", "foo.txt"))
run_nbgrader(["generate_feedback", "ps1", "--db", db, "--force"])
assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert not isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt"))
assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc"))
def test_force_f(self, db, course_dir):
"""Ensure the force option works properly"""
run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
run_nbgrader(["db", "student", "add", "foo", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._make_file(join(course_dir, "source", "ps1", "foo.txt"), "foo")
self._make_file(join(course_dir, "source", "ps1", "data", "bar.txt"), "bar")
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
self._make_file(join(course_dir, "submitted", "foo", "ps1", "foo.txt"), "foo")
self._make_file(join(course_dir, "submitted", "foo", "ps1", "data", "bar.txt"), "bar")
run_nbgrader(["autograde", "ps1", "--db", db])
self._make_file(join(course_dir, "autograded", "foo", "ps1", "blah.pyc"), "asdf")
run_nbgrader(["generate_feedback", "ps1", "--db", db])
assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt"))
assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc"))
# check that it skips the existing directory
remove(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
run_nbgrader(["generate_feedback", "ps1", "--db", db])
assert not isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
# force overwrite the supplemental files
run_nbgrader(["generate_feedback", "ps1", "--db", db, "-f"])
assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
# force overwrite
remove(join(course_dir, "autograded", "foo", "ps1", "foo.txt"))
run_nbgrader(["generate_feedback", "ps1", "--db", db, "--force"])
assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert not isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt"))
assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc"))
def test_filter_notebook(self, db, course_dir):
"""Does feedback filter by notebook properly?"""
run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
run_nbgrader(["db", "student", "add", "foo", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._make_file(join(course_dir, "source", "ps1", "foo.txt"), "foo")
self._make_file(join(course_dir, "source", "ps1", "data", "bar.txt"), "bar")
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
self._make_file(join(course_dir, "submitted", "foo", "ps1", "foo.txt"), "foo")
self._make_file(join(course_dir, "submitted", "foo", "ps1", "data", "bar.txt"), "bar")
self._make_file(join(course_dir, "submitted", "foo", "ps1", "blah.pyc"), "asdf")
run_nbgrader(["autograde", "ps1", "--db", db])
run_nbgrader(["generate_feedback", "ps1", "--db", db, "--notebook", "p1"])
assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt"))
assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc"))
# check that removing the notebook still causes it to run
remove(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
remove(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
run_nbgrader(["generate_feedback", "ps1", "--db", db, "--notebook", "p1"])
assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt"))
assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc"))
# check that running it again doesn"t do anything
remove(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
run_nbgrader(["generate_feedback", "ps1", "--db", db, "--notebook", "p1"])
assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert not isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt"))
assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc"))
# check that removing the notebook doesn"t cause it to run
remove(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
run_nbgrader(["generate_feedback", "ps1", "--db", db])
assert not isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert not isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt"))
assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc"))
@pytest.mark.parametrize("groupshared", [False, True])
def test_permissions(self, course_dir, groupshared):
"""Are permissions properly set?"""
run_nbgrader(["db", "assignment", "add", "ps1"])
run_nbgrader(["db", "student", "add", "foo"])
with open("nbgrader_config.py", "a") as fh:
if groupshared:
fh.write("""c.CourseDirectory.groupshared = True\n""")
self._empty_notebook(join(course_dir, "source", "ps1", "foo.ipynb"))
run_nbgrader(["generate_assignment", "ps1"])
self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "foo.ipynb"))
run_nbgrader(["autograde", "ps1"])
run_nbgrader(["generate_feedback", "ps1"])
if not groupshared:
if sys.platform == 'win32':
perms = '666'
else:
perms = '644'
else:
if sys.platform == 'win32':
perms = '666'
dirperms = '777'
else:
perms = '664'
dirperms = '2775'
assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.html"))
if groupshared:
# non-groupshared doesn't guarantee anything about directory perms
assert self._get_permissions(join(course_dir, "feedback", "foo", "ps1")) == dirperms
assert self._get_permissions(join(course_dir, "feedback", "foo", "ps1", "foo.html")) == perms
def test_custom_permissions(self, course_dir):
"""Are custom permissions properly set?"""
run_nbgrader(["db", "assignment", "add", "ps1"])
run_nbgrader(["db", "student", "add", "foo"])
self._empty_notebook(join(course_dir, "source", "ps1", "foo.ipynb"))
run_nbgrader(["generate_assignment", "ps1"])
self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "foo.ipynb"))
run_nbgrader(["autograde", "ps1"])
run_nbgrader(["generate_feedback", "ps1", "--GenerateFeedback.permissions=444"])
assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.html"))
assert self._get_permissions(join(course_dir, "feedback", "foo", "ps1", "foo.html")) == '444'
def test_force_single_notebook(self, course_dir):
run_nbgrader(["db", "assignment", "add", "ps1"])
run_nbgrader(["db", "student", "add", "foo"])
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb"))
run_nbgrader(["generate_assignment", "ps1"])
self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p2.ipynb"))
run_nbgrader(["autograde", "ps1"])
run_nbgrader(["generate_feedback", "ps1"])
assert exists(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert exists(join(course_dir, "feedback", "foo", "ps1", "p2.html"))
p1 = self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
p2 = self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p2.html"))
self._empty_notebook(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"))
self._empty_notebook(join(course_dir, "autograded", "foo", "ps1", "p2.ipynb"))
run_nbgrader(["generate_feedback", "ps1", "--notebook", "p1", "--force"])
assert exists(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert exists(join(course_dir, "feedback", "foo", "ps1", "p2.html"))
assert p1 != self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert p2 == self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p2.html"))
def test_update_newer(self, course_dir):
run_nbgrader(["db", "assignment", "add", "ps1"])
run_nbgrader(["db", "student", "add", "foo"])
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1"])
self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), "2015-02-02 15:58:23.948203 America/Los_Angeles")
run_nbgrader(["autograde", "ps1"])
run_nbgrader(["generate_feedback", "ps1"])
assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert isfile(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt"))
assert self._file_contents(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt")) == "2015-02-02 15:58:23.948203 America/Los_Angeles"
p = self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
self._empty_notebook(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"))
self._make_file(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt"), "2015-02-02 16:58:23.948203 America/Los_Angeles")
run_nbgrader(["generate_feedback", "ps1"])
assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert isfile(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt"))
assert self._file_contents(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt")) == "2015-02-02 16:58:23.948203 America/Los_Angeles"
assert p != self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
def test_update_newer_single_notebook(self, course_dir):
run_nbgrader(["db", "assignment", "add", "ps1"])
run_nbgrader(["db", "student", "add", "foo"])
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb"))
run_nbgrader(["generate_assignment", "ps1"])
self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p2.ipynb"))
self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), "2015-02-02 15:58:23.948203 America/Los_Angeles")
run_nbgrader(["autograde", "ps1"])
run_nbgrader(["generate_feedback", "ps1"])
assert exists(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert exists(join(course_dir, "feedback", "foo", "ps1", "p2.html"))
assert isfile(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt"))
assert self._file_contents(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt")) == "2015-02-02 15:58:23.948203 America/Los_Angeles"
p1 = self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
p2 = self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p2.html"))
self._empty_notebook(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"))
self._empty_notebook(join(course_dir, "autograded", "foo", "ps1", "p2.ipynb"))
self._make_file(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt"), "2015-02-02 16:58:23.948203 America/Los_Angeles")
run_nbgrader(["generate_feedback", "ps1", "--notebook", "p1"])
assert exists(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert exists(join(course_dir, "feedback", "foo", "ps1", "p2.html"))
assert isfile(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt"))
assert self._file_contents(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt")) == "2015-02-02 16:58:23.948203 America/Los_Angeles"
assert p1 != self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert p2 == self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p2.html"))
def test_autotests(self, course_dir):
"""Can feedback be generated for an assignment with autotests?"""
run_nbgrader(["db", "assignment", "add", "ps1"])
run_nbgrader(["db", "student", "add", "foo"])
self._copy_file(join("files", "autotest-simple.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._copy_file(join("files", "autotest-simple.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb"))
self._copy_file(join("files", "autotests.yml"), join(course_dir, "autotests.yml"))
run_nbgrader(["generate_assignment", "ps1"])
self._copy_file(join("files", "autotest-simple.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
self._copy_file(join("files", "autotest-simple.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p2.ipynb"))
self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), "2015-02-02 15:58:23.948203 America/Los_Angeles")
run_nbgrader(["autograde", "ps1"])
run_nbgrader(["generate_feedback", "ps1"])
assert exists(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert exists(join(course_dir, "feedback", "foo", "ps1", "p2.html"))
assert isfile(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt"))
assert self._file_contents(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt")) == "2015-02-02 15:58:23.948203 America/Los_Angeles"
p1 = self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
p2 = self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p2.html"))
self._empty_notebook(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"))
self._empty_notebook(join(course_dir, "autograded", "foo", "ps1", "p2.ipynb"))
self._make_file(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt"), "2015-02-02 16:58:23.948203 America/Los_Angeles")
run_nbgrader(["generate_feedback", "ps1", "--notebook", "p1"])
assert exists(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert exists(join(course_dir, "feedback", "foo", "ps1", "p2.html"))
assert isfile(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt"))
assert self._file_contents(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt")) == "2015-02-02 16:58:23.948203 America/Los_Angeles"
assert p1 != self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert p2 == self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p2.html"))
def test_single_user(self, course_dir):
run_nbgrader(["db", "assignment", "add", "ps1", "--duedate",
"2015-02-02 14:58:23.948203 America/Los_Angeles"])
run_nbgrader(["db", "student", "add", "foo"])
run_nbgrader(["db", "student", "add", "bar"])
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb"))
run_nbgrader(["assign", "ps1"])
self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p2.ipynb"))
self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb"))
self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p2.ipynb"))
run_nbgrader(["autograde", "ps1"])
run_nbgrader(["generate_feedback", "ps1", "--student", "foo"])
assert exists(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
assert exists(join(course_dir, "feedback", "foo", "ps1", "p2.html"))
assert not exists(join(course_dir, "feedback", "bar", "ps1", "p1.html"))
assert not exists(join(course_dir, "feedback", "bar", "ps1", "p2.html"))
|
2bf6ae70250c1a88028b9bfb78ccddd1315d6c2c
|
15f0514701a78e12750f68ba09d68095172493ee
|
/Python3/948.py
|
435f04ca5c6a03f86b2247caeb632c401a8a9325
|
[
"MIT"
] |
permissive
|
strengthen/LeetCode
|
5e38c8c9d3e8f27109b9124ae17ef8a4139a1518
|
3ffa6dcbeb787a6128641402081a4ff70093bb61
|
refs/heads/master
| 2022-12-04T21:35:17.872212
| 2022-11-30T06:23:24
| 2022-11-30T06:23:24
| 155,958,163
| 936
| 365
|
MIT
| 2021-11-15T04:02:45
| 2018-11-03T06:47:38
| null |
UTF-8
|
Python
| false
| false
| 1,438
|
py
|
948.py
|
__________________________________________________________________________________________________
sample 56 ms submission
class Solution:
def bagOfTokensScore(self, tokens: List[int], P: int) -> int:
if len(tokens) == 0:
return 0
tokens = list(sorted(tokens))
if tokens[0] > P:
return 0
s = 0
i=0
j=len(tokens)-1
while i <= j:
if P >= tokens[i]:
P -= tokens[i]
i += 1
s += 1
elif P < tokens[i] and (P + tokens[j]) >= tokens[i]:
P = P + tokens[j] - tokens[i]
i += 1
j -= 1
else:
break
return s
__________________________________________________________________________________________________
sample 13216 kb submission
from collections import deque
class Solution:
def bagOfTokensScore(self, tokens: List[int], P: int) -> int:
tokens.sort()
points = ans = 0
que = deque(tokens)
while que and (points or P >=que[0]):
while que and P >= que[0]:
P -= que.popleft()
points+=1
ans = max(ans,points)
if points and que:
P+= que.pop()
points-=1
return ans
__________________________________________________________________________________________________
|
0ca0d034e95acdb8fdf0c44ef14b2c35bf91ec7c
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowRunAllSectionInterface/cli/equal/golden_output_6_expected.py
|
aaaac683252ab6ad56a6b0d830cde9787465fd76
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 437
|
py
|
golden_output_6_expected.py
|
expected_output = {
"interfaces": {
"TenGigabitEthernet0/1/3": {
"mtu": 1468,
"logging_event_link_status": True,
"load_interval": 30,
"shutdown": False,
"medium_p2p": False,
"mka_policy": "MKAPolicy",
"mka_primary_keychain": "KCP256",
"macsec_access_control": "should-secure",
"macsec_enabled": True
}
}
}
|
9acae7590825b9fb4fda4bb06d911d360f0f670d
|
279f415dd1e06c594c6c87deda57e201c73c4542
|
/espnet2/spk/espnet_model.py
|
f947dee6e1e5add351ee592d52277edf8e1f6ff7
|
[
"Apache-2.0"
] |
permissive
|
espnet/espnet
|
f7ba47271c1a6b1ed606dbbfb04a7f14220bb585
|
bcd20948db7846ee523443ef9fd78c7a1248c95e
|
refs/heads/master
| 2023-08-28T23:43:34.238336
| 2023-08-23T02:51:39
| 2023-08-23T02:51:39
| 114,054,873
| 7,242
| 2,244
|
Apache-2.0
| 2023-09-14T08:01:11
| 2017-12-13T00:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 5,667
|
py
|
espnet_model.py
|
# Copyright 2023 Jee-weon Jung
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from typing import Dict, Optional, Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.spk.loss.aamsoftmax import AAMSoftmax
from espnet2.spk.loss.abs_loss import AbsLoss
from espnet2.spk.pooling.abs_pooling import AbsPooling
from espnet2.spk.projector.abs_projector import AbsProjector
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.train.abs_espnet_model import AbsESPnetModel
class ESPnetSpeakerModel(AbsESPnetModel):
"""
Speaker embedding extraction model.
Core model for diverse speaker-related tasks (e.g., verification, open-set
identification, diarization)
The model architecture comprises mainly 'encoder', 'pooling', and
'projector'.
In common speaker recognition field, the combination of three would be
usually named as 'speaker_encoder' (or speaker embedding extractor).
We splitted it into three for flexibility in future extensions:
- 'encoder' : extract frame-level speaker embeddings.
- 'pooling' : aggregate into single utterance-level embedding.
- 'projector' : (optional) additional processing (e.g., one fully-
connected layer) to derive speaker embedding.
Possibly, in the future, 'pooling' and/or 'projector' can be integrated as
a 'decoder', depending on the extension for joint usage of different tasks
(e.g., ASR, SE, target speaker extraction).
"""
def __init__(
self,
frontend: Optional[AbsFrontend],
specaug: Optional[AbsSpecAug],
normalize: Optional[AbsNormalize],
encoder: Optional[AbsEncoder],
pooling: Optional[AbsPooling],
projector: Optional[AbsProjector],
loss: Optional[AbsLoss],
):
assert check_argument_types()
super().__init__()
self.frontend = frontend
self.specaug = specaug
self.normalize = normalize
self.encoder = encoder
self.pooling = pooling
self.projector = projector
self.loss = loss
def forward(
self,
speech: torch.Tensor,
# speech_lengths: torch.Tensor = None,
spk_labels: torch.Tensor,
extract_embd: bool = False,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""
Feed-forward through encoder layers and aggregate into utterance-level
feature.
Args:
speech: (Batch, samples)
speech_lengths: (Batch,)
extract_embd: a flag which doesn't go through the classification
head when set True
spk_labels: (Batch, )
"""
if spk_labels is not None:
assert speech.shape[0] == spk_labels.shape[0], (
speech.shape,
spk_labels.shape,
)
batch_size = speech.shape[0]
# 1. extract low-level feats (e.g., mel-spectrogram or MFCC)
# Will do nothing for raw waveform-based models (e.g., RawNets)
feats, _ = self.extract_feats(speech, None)
frame_level_feats = self.encode_frame(feats)
# 2. aggregation into utterance-level
utt_level_feat = self.pooling(frame_level_feats)
# 3. (optionally) go through further projection(s)
spk_embd = self.project_spk_embd(utt_level_feat)
if extract_embd:
return spk_embd
# 4. calculate loss
loss = self.loss(spk_embd, spk_labels)
stats = dict(loss=loss.detach())
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
def extract_feats(
self, speech: torch.Tensor, speech_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
batch_size = speech.shape[0]
speech_lengths = (
speech_lengths
if speech_lengths is not None
else torch.ones(batch_size).int() * speech.shape[1]
)
# 1. extract feats
if self.frontend is not None:
feats, feat_lengths = self.frontend(speech, speech_lengths)
else:
feats = speech
feat_lengths = None
# 2. apply augmentations
if self.specaug is not None and self.training:
feats, _ = self.specaug(feats, feat_lengths)
# 3. normalize
if self.normalize is not None:
feats, _ = self.normalize(feats, feat_lengths)
return feats, feat_lengths
def encode_frame(self, feats: torch.Tensor) -> torch.Tensor:
frame_level_feats = self.encoder(feats)
return frame_level_feats
def aggregate(self, frame_level_feats: torch.Tensor) -> torch.Tensor:
utt_level_feat = self.aggregator(frame_level_feats)
return utt_level_feat
def project_spk_embd(self, utt_level_feat: torch.Tensor) -> torch.Tensor:
if self.projector is not None:
spk_embd = self.projector(utt_level_feat)
else:
spk_embd = utt_level_feat
return spk_embd
def collect_feats(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spk_labels: torch.Tensor = None,
**kwargs,
) -> Dict[str, torch.Tensor]:
feats, feats_lengths = self.extract_feats(speech, speech_lengths)
return {"feats": feats}
|
0456411c2146480bda697203c6ec4c15aefe4fe8
|
d6aa13cb1021773d88e2ef780bc4450b38455644
|
/apex/contrib/fmha/__init__.py
|
ec2e9c66c8f5b19bbee4028c73201c2f41f80854
|
[
"BSD-3-Clause"
] |
permissive
|
NVIDIA/apex
|
f54a9ced5d8b1c14f777e6bb53f11b3dc3ff2d6b
|
7995de18677295c5edeeab082179edbfdb6ee16a
|
refs/heads/master
| 2023-08-21T13:25:44.408616
| 2023-08-19T04:36:48
| 2023-08-19T04:36:48
| 130,725,814
| 7,932
| 1,381
|
BSD-3-Clause
| 2023-09-13T16:09:42
| 2018-04-23T16:28:52
|
Python
|
UTF-8
|
Python
| false
| false
| 26
|
py
|
__init__.py
|
from .fmha import FMHAFun
|
7d1d582f1a1487df3937ab3783738a8f7755f464
|
5f2863d51f07969deebd04e3639c802aac6574d0
|
/statannotations/Annotation.py
|
9f79398fe3e22301da7034c137cb63a546f5d62c
|
[
"MIT"
] |
permissive
|
trevismd/statannotations
|
5a60deb1dc62a506f6525315bcb978b0cd460299
|
2a5ebe1a4d90c641552db580f3f285559e1bb277
|
refs/heads/master
| 2023-08-08T05:00:38.354339
| 2022-10-19T07:01:10
| 2022-10-19T07:01:10
| 296,015,778
| 468
| 57
|
NOASSERTION
| 2023-07-28T13:03:32
| 2020-09-16T11:44:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,501
|
py
|
Annotation.py
|
from typing import Union
from statannotations.PValueFormat import Formatter
from statannotations.stats.StatResult import StatResult
class Annotation:
"""
Holds data, linked structs and an optional Formatter.
"""
def __init__(self, structs, data: Union[str, StatResult],
formatter: Formatter = None):
"""
:param structs: plot structures concerned by the data
:param data: a string or StatResult to be formatted by a formatter
:param formatter: A Formatter object. Statannotations provides a
PValueFormatter for StatResult objects.
"""
self.structs = structs
self.data = data
self.is_custom = isinstance(data, str)
self.formatter = formatter
@property
def text(self):
if self.is_custom:
return self.data
else:
if self.formatter is None:
raise ValueError("Missing a PValueFormat object to "
"format the statistical result.")
return self.formatter.format_data(self.data)
@property
def formatted_output(self):
if isinstance(self.data, str):
return self.data
else:
return self.data.formatted_output
def print_labels_and_content(self, sep=" vs. "):
labels_string = sep.join(str(struct["label"])
for struct in self.structs)
print(f"{labels_string}: {self.formatted_output}")
|
57b48f0046b962e5b5bac24b3067cb95858577f2
|
866cff63b1c2b71af512e165a00f81ed07a4f31d
|
/modin/core/execution/dask/implementations/pandas_on_dask/partitioning/partition.py
|
6173da7b94b73f169a9ab9914961fc18127a62e4
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
modin-project/modin
|
d313249315a7e0de1fb61d803727b41a53df991d
|
8f6e00378e095817deccd25f4140406c5ee6c992
|
refs/heads/master
| 2023-08-31T21:27:23.108633
| 2023-08-31T19:09:15
| 2023-08-31T19:09:15
| 138,224,079
| 9,241
| 693
|
Apache-2.0
| 2023-09-14T20:40:19
| 2018-06-21T21:35:05
|
Python
|
UTF-8
|
Python
| false
| false
| 12,573
|
py
|
partition.py
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses class that wraps data (block partition) and its metadata."""
from distributed import Future
from distributed.utils import get_ip
from modin.core.dataframe.pandas.partitioning.partition import PandasDataframePartition
from modin.pandas.indexing import compute_sliced_len
from modin.logging import get_logger
from modin.core.execution.dask.common import DaskWrapper
class PandasOnDaskDataframePartition(PandasDataframePartition):
"""
The class implements the interface in ``PandasDataframePartition``.
Parameters
----------
data : distributed.Future
A reference to pandas DataFrame that need to be wrapped with this class.
length : distributed.Future or int, optional
Length or reference to it of wrapped pandas DataFrame.
width : distributed.Future or int, optional
Width or reference to it of wrapped pandas DataFrame.
ip : distributed.Future or str, optional
Node IP address or reference to it that holds wrapped pandas DataFrame.
call_queue : list, optional
Call queue that needs to be executed on wrapped pandas DataFrame.
"""
execution_wrapper = DaskWrapper
def __init__(self, data, length=None, width=None, ip=None, call_queue=None):
super().__init__()
assert isinstance(data, Future)
self._data = data
if call_queue is None:
call_queue = []
self.call_queue = call_queue
self._length_cache = length
self._width_cache = width
self._ip_cache = ip
log = get_logger()
self._is_debug(log) and log.debug(
"Partition ID: {}, Height: {}, Width: {}, Node IP: {}".format(
self._identity,
str(self._length_cache),
str(self._width_cache),
str(self._ip_cache),
)
)
def apply(self, func, *args, **kwargs):
"""
Apply a function to the object wrapped by this partition.
Parameters
----------
func : callable or distributed.Future
A function to apply.
*args : iterable
Additional positional arguments to be passed in `func`.
**kwargs : dict
Additional keyword arguments to be passed in `func`.
Returns
-------
PandasOnDaskDataframePartition
A new ``PandasOnDaskDataframePartition`` object.
Notes
-----
The keyword arguments are sent as a dictionary.
"""
log = get_logger()
self._is_debug(log) and log.debug(f"ENTER::Partition.apply::{self._identity}")
call_queue = self.call_queue + [[func, args, kwargs]]
if len(call_queue) > 1:
self._is_debug(log) and log.debug(
f"SUBMIT::_apply_list_of_funcs::{self._identity}"
)
futures = DaskWrapper.deploy(
func=apply_list_of_funcs,
f_args=(call_queue, self._data),
num_returns=2,
pure=False,
)
else:
# We handle `len(call_queue) == 1` in a different way because
# this improves performance a bit.
func, f_args, f_kwargs = call_queue[0]
futures = DaskWrapper.deploy(
func=apply_func,
f_args=(self._data, func, *f_args),
f_kwargs=f_kwargs,
num_returns=2,
pure=False,
)
self._is_debug(log) and log.debug(f"SUBMIT::_apply_func::{self._identity}")
self._is_debug(log) and log.debug(f"EXIT::Partition.apply::{self._identity}")
return self.__constructor__(futures[0], ip=futures[1])
def drain_call_queue(self):
"""Execute all operations stored in the call queue on the object wrapped by this partition."""
log = get_logger()
self._is_debug(log) and log.debug(
f"ENTER::Partition.drain_call_queue::{self._identity}"
)
if len(self.call_queue) == 0:
return
call_queue = self.call_queue
if len(call_queue) > 1:
self._is_debug(log) and log.debug(
f"SUBMIT::_apply_list_of_funcs::{self._identity}"
)
futures = DaskWrapper.deploy(
func=apply_list_of_funcs,
f_args=(call_queue, self._data),
num_returns=2,
pure=False,
)
else:
# We handle `len(call_queue) == 1` in a different way because
# this improves performance a bit.
func, f_args, f_kwargs = call_queue[0]
self._is_debug(log) and log.debug(f"SUBMIT::_apply_func::{self._identity}")
futures = DaskWrapper.deploy(
func=apply_func,
f_args=(self._data, func, *f_args),
f_kwargs=f_kwargs,
num_returns=2,
pure=False,
)
self._data = futures[0]
self._ip_cache = futures[1]
self._is_debug(log) and log.debug(
f"EXIT::Partition.drain_call_queue::{self._identity}"
)
self.call_queue = []
def wait(self):
"""Wait completing computations on the object wrapped by the partition."""
self.drain_call_queue()
DaskWrapper.wait(self._data)
def mask(self, row_labels, col_labels):
"""
Lazily create a mask that extracts the indices provided.
Parameters
----------
row_labels : list-like, slice or label
The row labels for the rows to extract.
col_labels : list-like, slice or label
The column labels for the columns to extract.
Returns
-------
PandasOnDaskDataframePartition
A new ``PandasOnDaskDataframePartition`` object.
"""
log = get_logger()
self._is_debug(log) and log.debug(f"ENTER::Partition.mask::{self._identity}")
new_obj = super().mask(row_labels, col_labels)
if isinstance(row_labels, slice) and isinstance(self._length_cache, Future):
if row_labels == slice(None):
# fast path - full axis take
new_obj._length_cache = self._length_cache
else:
new_obj._length_cache = DaskWrapper.deploy(
func=compute_sliced_len, f_args=(row_labels, self._length_cache)
)
if isinstance(col_labels, slice) and isinstance(self._width_cache, Future):
if col_labels == slice(None):
# fast path - full axis take
new_obj._width_cache = self._width_cache
else:
new_obj._width_cache = DaskWrapper.deploy(
func=compute_sliced_len, f_args=(col_labels, self._width_cache)
)
self._is_debug(log) and log.debug(f"EXIT::Partition.mask::{self._identity}")
return new_obj
def __copy__(self):
"""
Create a copy of this partition.
Returns
-------
PandasOnDaskDataframePartition
A copy of this partition.
"""
return self.__constructor__(
self._data,
length=self._length_cache,
width=self._width_cache,
ip=self._ip_cache,
call_queue=self.call_queue,
)
@classmethod
def put(cls, obj):
"""
Put an object into distributed memory and wrap it with partition object.
Parameters
----------
obj : any
An object to be put.
Returns
-------
PandasOnDaskDataframePartition
A new ``PandasOnDaskDataframePartition`` object.
"""
return cls(DaskWrapper.put(obj, hash=False), len(obj.index), len(obj.columns))
@classmethod
def preprocess_func(cls, func):
"""
Preprocess a function before an ``apply`` call.
Parameters
----------
func : callable
The function to preprocess.
Returns
-------
callable
An object that can be accepted by ``apply``.
"""
return DaskWrapper.put(func, hash=False, broadcast=True)
def length(self, materialize=True):
"""
Get the length of the object wrapped by this partition.
Parameters
----------
materialize : bool, default: True
Whether to forcibly materialize the result into an integer. If ``False``
was specified, may return a future of the result if it hasn't been
materialized yet.
Returns
-------
int or distributed.Future
The length of the object.
"""
if self._length_cache is None:
self._length_cache = self.apply(len)._data
if isinstance(self._length_cache, Future) and materialize:
self._length_cache = DaskWrapper.materialize(self._length_cache)
return self._length_cache
def width(self, materialize=True):
"""
Get the width of the object wrapped by the partition.
Parameters
----------
materialize : bool, default: True
Whether to forcibly materialize the result into an integer. If ``False``
was specified, may return a future of the result if it hasn't been
materialized yet.
Returns
-------
int or distributed.Future
The width of the object.
"""
if self._width_cache is None:
self._width_cache = self.apply(lambda df: len(df.columns))._data
if isinstance(self._width_cache, Future) and materialize:
self._width_cache = DaskWrapper.materialize(self._width_cache)
return self._width_cache
def ip(self):
"""
Get the node IP address of the object wrapped by this partition.
Returns
-------
str
IP address of the node that holds the data.
"""
if self._ip_cache is None:
self._ip_cache = self.apply(lambda df: df)._ip_cache
if isinstance(self._ip_cache, Future):
self._ip_cache = DaskWrapper.materialize(self._ip_cache)
return self._ip_cache
def apply_func(partition, func, *args, **kwargs):
"""
Execute a function on the partition in a worker process.
Parameters
----------
partition : pandas.DataFrame
A pandas DataFrame the function needs to be executed on.
func : callable
The function to perform.
*args : list
Positional arguments to pass to ``func``.
**kwargs : dict
Keyword arguments to pass to ``func``.
Returns
-------
pandas.DataFrame
The resulting pandas DataFrame.
str
The node IP address of the worker process.
Notes
-----
Directly passing a call queue entry (i.e. a list of [func, args, kwargs]) instead of
destructuring it causes a performance penalty.
"""
result = func(partition, *args, **kwargs)
return result, get_ip()
def apply_list_of_funcs(call_queue, partition):
"""
Execute all operations stored in the call queue on the partition in a worker process.
Parameters
----------
call_queue : list
A call queue of ``[func, args, kwargs]`` triples that needs to be executed on the partition.
partition : pandas.DataFrame
A pandas DataFrame the call queue needs to be executed on.
Returns
-------
pandas.DataFrame
The resulting pandas DataFrame.
str
The node IP address of the worker process.
"""
for func, f_args, f_kwargs in call_queue:
partition = func(partition, *f_args, **f_kwargs)
return partition, get_ip()
|
a74a6f094135ff3896b4457e801ae1d2b4675e55
|
7f24023d365e013ec0924844c1a872edfb0c75b4
|
/tests/trac/test-trac-0027.py
|
1134fdc0fbff0e3b5b3006b2a1598e02c566e592
|
[
"Python-2.0",
"MIT",
"Apache-2.0"
] |
permissive
|
pabigot/pyxb
|
cd42c024607572c6363682d389e9296caf3f2857
|
5ee5ba54c9f702dc9c9efc2731ee547ecd4dae4a
|
refs/heads/next
| 2023-05-11T03:23:19.599756
| 2023-04-29T20:38:15
| 2023-04-29T20:45:13
| 20,547,850
| 130
| 63
|
Apache-2.0
| 2021-08-19T16:52:18
| 2014-06-06T01:49:03
|
Python
|
UTF-8
|
Python
| false
| false
| 7,270
|
py
|
test-trac-0027.py
|
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
# Undeclared XML namespace
import pyxb.binding.generate
import pyxb.binding.datatypes as xs
import pyxb.utils.domutils
from xml.dom import Node
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:attributeGroup name="required">
<xs:attribute name="rattr" use="required" type="xs:int"/>
<xs:attribute name="rattr_fixed" type="xs:int" fixed="30" use="required"/>
</xs:attributeGroup>
<xs:attributeGroup name="optional">
<xs:attribute name="attr" type="xs:int"/>
<xs:attribute name="attr_def" type="xs:int" default="10"/>
<xs:attribute name="attr_fixed" type="xs:int" fixed="20"/>
</xs:attributeGroup>
<xs:complexType name="opt_struct">
<xs:attributeGroup ref="optional"/>
</xs:complexType>
<xs:complexType name="req_struct">
<xs:attributeGroup ref="required"/>
</xs:complexType>
<xs:element name="ireq_struct" type="req_struct"/>
<xs:element name="iopt_struct" type="opt_struct"/>
<xs:complexType name="opt_def">
<!-- This does have three attributes; it just changes one of the ones it inherits -->
<xs:complexContent>
<xs:restriction base="opt_struct">
<xs:attribute name="attr" type="xs:int" default="5"/>
</xs:restriction>
</xs:complexContent>
</xs:complexType>
<xs:element name="iopt_def" type="opt_def"/>
<xs:complexType name="opt_pro">
<xs:complexContent>
<xs:restriction base="opt_struct">
<xs:attribute name="attr" use="prohibited"/>
<xs:attribute name="attr_def" use="prohibited"/>
</xs:restriction>
</xs:complexContent>
</xs:complexType>
<xs:element name="iopt_pro" type="opt_pro"/>
<xs:complexType name="opt_rest">
<xs:complexContent>
<xs:restriction base="opt_struct">
<xs:attribute name="attr" type="xs:byte"/>
</xs:restriction>
</xs:complexContent>
</xs:complexType>
<xs:element name="iopt_rest" type="opt_rest"/>
<!-- TEST: Cannot put back an attribute that was removed.
<xs:complexType name="opt_pro_ext">
<xs:complexContent>
<xs:extension base="opt_pro">
<xs:attribute name="attr" type="xs:float"/>
</xs:extension>
</xs:complexContent>
</xs:complexType>
-->
</xs:schema>'''
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
#open('code.py', 'w').write(code)
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestTrac0027 (unittest.TestCase):
def setRattr_fixed (self, instance, value):
instance.rattr_fixed = value
def setAttr_fixed (self, instance, value):
instance.attr_fixed = value
def setAttr (self, instance, value):
instance.attr = value
def testRequired (self):
self.assertEqual(2, len(req_struct._AttributeMap))
i = ireq_struct()
self.assertRaises(pyxb.MissingAttributeError, i.validateBinding)
self.assertTrue(i.rattr is None)
i.rattr = -4
self.assertEqual(-4, i.rattr)
self.assertTrue(i._AttributeMap['rattr'].provided(i))
self.assertRaises(pyxb.MissingAttributeError, i.validateBinding) # Should fail because rattr_fixed was not explicitly set
self.assertFalse(i._AttributeMap['rattr_fixed'].provided(i))
self.assertEqual(30, i.rattr_fixed)
self.assertRaises(pyxb.AttributeChangeError, self.setRattr_fixed, i, 41)
self.assertFalse(i._AttributeMap['rattr_fixed'].provided(i))
i.rattr_fixed = 30
self.assertTrue(i._AttributeMap['rattr_fixed'].provided(i))
self.assertEqual(30, i.rattr_fixed)
self.assertTrue(i.validateBinding())
self.assertRaises(pyxb.AttributeChangeError, self.setRattr_fixed, i, 41)
def testRequiredCTor (self):
i = ireq_struct(rattr=11, rattr_fixed=30)
self.assertTrue(i.validateBinding())
self.assertRaises(pyxb.AttributeChangeError, ireq_struct, rattr=11, rattr_fixed=31)
def testOptional (self):
self.assertEqual(3, len(opt_struct._AttributeMap))
i = iopt_struct()
self.assertTrue(i.attr is None)
self.assertEqual(i._AttributeMap['attr'].dataType(), xs.int)
self.assertFalse(i._AttributeMap['attr_def'].provided(i))
self.assertEqual(10, i.attr_def)
i.attr_def = 11
self.assertEqual(11, i.attr_def)
self.assertTrue(i._AttributeMap['attr_def'].provided(i))
self.assertFalse(i._AttributeMap['attr_fixed'].provided(i))
self.assertEqual(20, i.attr_fixed)
self.assertRaises(pyxb.AttributeChangeError, self.setAttr_fixed, i, 21)
self.assertFalse(i._AttributeMap['attr_fixed'].provided(i))
self.assertEqual(20, i.attr_fixed)
i.attr_fixed = 20
self.assertTrue(i._AttributeMap['attr_fixed'].provided(i))
self.assertEqual(20, i.attr_fixed)
i.attr = 1000
self.assertEqual(1000, i.attr)
def testOptionalCtor (self):
self.assertEqual(3, len(opt_struct._AttributeMap))
self.assertRaises(pyxb.AttributeChangeError, opt_struct, attr_fixed=21)
i = iopt_struct(attr=1, attr_def=2, attr_fixed=20)
self.assertTrue(i.validateBinding())
self.assertEqual(1, i.attr)
self.assertEqual(2, i.attr_def)
def testOptDef (self):
self.assertEqual(3, len(opt_def._AttributeMap))
self.assertNotEqual(opt_struct._AttributeMap['attr'], opt_def._AttributeMap['attr'])
self.assertEqual(opt_struct._AttributeMap['attr'].key(), opt_def._AttributeMap['attr'].key())
self.assertEqual(opt_struct._AttributeMap['attr_def'], opt_def._AttributeMap['attr_def'])
self.assertEqual(opt_struct._AttributeMap['attr_fixed'], opt_def._AttributeMap['attr_fixed'])
i = opt_def()
self.assertEqual(5, i.attr)
def testOptPro (self):
self.assertEqual(3, len(opt_pro._AttributeMap))
self.assertNotEqual(opt_struct._AttributeMap['attr'], opt_pro._AttributeMap['attr'])
self.assertTrue(opt_pro._AttributeMap['attr'].prohibited())
self.assertNotEqual(opt_struct._AttributeMap['attr_def'], opt_pro._AttributeMap['attr_def'])
self.assertTrue(opt_pro._AttributeMap['attr_def'].prohibited())
self.assertEqual(opt_struct._AttributeMap['attr_fixed'], opt_pro._AttributeMap['attr_fixed'])
i = opt_pro()
self.assertRaises(pyxb.ProhibitedAttributeError, lambda: i.attr)
def testOptProCtor (self):
self.assertRaises(pyxb.ProhibitedAttributeError, opt_pro, attr=1)
def testOptRest (self):
self.assertEqual(3, len(opt_rest._AttributeMap))
i = opt_rest()
self.assertEqual(i._AttributeMap['attr'].dataType(), xs.byte)
self.assertNotEqual(opt_struct._AttributeMap['attr'], opt_rest._AttributeMap['attr'])
self.assertEqual(opt_struct._AttributeMap['attr_def'], opt_rest._AttributeMap['attr_def'])
self.assertEqual(opt_struct._AttributeMap['attr_fixed'], opt_rest._AttributeMap['attr_fixed'])
self.assertRaises(pyxb.SimpleTypeValueError, self.setAttr, i, 1000)
if __name__ == '__main__':
unittest.main()
|
6189bb97050448ab80fc047ce61f21e0cb27af6e
|
ce7d8409dc7da020d82e479ae457f2b9d598b44d
|
/vel/util/module_util.py
|
e08e9eda20997756f4b45fe82d8cbae85a9f2d2e
|
[
"MIT"
] |
permissive
|
MillionIntegrals/vel
|
42f9aa241b0e07f51363e97630e3a0b4750f1e5e
|
f3ce7da64362ad207f40f2c0d58d9300a25df3e8
|
refs/heads/master
| 2022-12-24T12:58:03.377520
| 2019-10-24T19:04:53
| 2019-10-24T19:04:53
| 132,808,324
| 280
| 37
|
MIT
| 2022-12-08T04:50:05
| 2018-05-09T20:16:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,357
|
py
|
module_util.py
|
"""
Code based on
https://github.com/fastai/fastai/blob/master/fastai/model.py
"""
import torch.nn as nn
import collections
import itertools as it
def is_listy(x):
return isinstance(x, (list, tuple))
def model_children(module):
return module if isinstance(module, (list, tuple)) else list(module.children())
def apply_leaf(module, f):
if isinstance(module, nn.Module):
f(module)
children = model_children(module)
for l in children:
apply_leaf(l, f)
def module_apply_broadcast(m, broadcast_fn, args, kwargs):
if hasattr(m, broadcast_fn):
getattr(m, broadcast_fn)(*args, **kwargs)
def module_broadcast(m, broadcast_fn, *args, **kwargs):
""" Call given function in all submodules with given parameters """
apply_leaf(m, lambda x: module_apply_broadcast(x, broadcast_fn, args, kwargs))
def set_train_mode(module):
# Only fix ones which we don't want to "train"
if hasattr(module, 'running_mean') and (getattr(module, 'bn_freeze', False) or not getattr(module, 'trainable', True)):
module.eval()
elif getattr(module, 'drop_freeze', False) and hasattr(module, 'p') and ('drop' in type(module).__name__.lower()):
module.eval()
def set_trainable_attr(module, trainable):
module.trainable = trainable
def set_requires_gradient(module, trainable):
for p in module.parameters():
p.requires_grad = trainable
def freeze_layer(module):
apply_leaf(module, lambda x: set_trainable_attr(x, trainable=False))
set_requires_gradient(module, trainable=False)
def unfreeze_layer(module):
apply_leaf(module, lambda x: set_trainable_attr(x, trainable=True))
set_requires_gradient(module, trainable=True)
def trainable_params_(m):
""" Returns a list of trainable parameters in the model m. (i.e., those that require gradients.) """
if isinstance(m, collections.Sequence):
return [p for p in m if p.requires_grad]
else:
return [p for p in m.parameters() if p.requires_grad]
def chain_params(p):
if is_listy(p):
return list(it.chain(*[trainable_params_(o) for o in p]))
return trainable_params_(p)
def to_parameter_groups(layer_groups):
""" Convert from list of layer groups into list of parameter settings for an optimizer """
return [{'params': chain_params(x)} for x in layer_groups]
|
fe7d27b478dac8d747cd72d7ee400462eb1ca1ff
|
1d535b00f7517b8a9af8264f6c2f3fdee4652573
|
/tests/test_parse_exceptions.py
|
2dd53709f4edfe967d3393d390049077f738bff1
|
[
"Apache-2.0"
] |
permissive
|
rshk/python-pcapng
|
fdb82b012e75951734d273cade1989b933e0466d
|
33e722f6d5cc41154fda56d8dff62e2970078fd5
|
refs/heads/master
| 2023-04-27T11:20:46.801430
| 2023-04-16T11:51:45
| 2023-04-16T11:51:45
| 19,257,118
| 104
| 52
|
Apache-2.0
| 2023-04-13T07:36:39
| 2014-04-28T23:13:02
|
Python
|
UTF-8
|
Python
| false
| false
| 425
|
py
|
test_parse_exceptions.py
|
"""
Tests for errors during parsing
"""
import pytest
from pcapng.blocks import SectionHeader
def test_get_nonexistent_block_attribute():
shb = SectionHeader(
raw=b"\x00\x01\x00\x00" b"\xff\xff\xff\xff\xff\xff\xff\xff" b"\x00\x00\x00\x00",
endianness=">",
)
assert shb.version == (1, 0) # check that parsing was successful
with pytest.raises(AttributeError):
shb.does_not_exist
|
3d1cfb4ede419b999af1f4f452df8c1f965e76cc
|
6e56e6b4bb562cd1db6e38b5f089b863b77e087f
|
/dragonfly/utils/__init__.py
|
f48c488cac047ca3783cea28d4829966221eceb2
|
[
"MIT"
] |
permissive
|
dragonfly/dragonfly
|
aa5f3a64bfe7800c44c32e58b487b5733c40035d
|
3eef7d30bcc2e56f2221a624bd8ec7f933f81e40
|
refs/heads/master
| 2023-08-06T08:34:29.317771
| 2022-10-01T22:21:50
| 2022-10-01T22:21:50
| 130,418,835
| 868
| 374
|
MIT
| 2023-06-19T20:23:17
| 2018-04-20T22:19:50
|
Python
|
UTF-8
|
Python
| false
| false
| 81
|
py
|
__init__.py
|
"""
Some general utility functions we will need.
-- kandasamy@cs.cmu.edu
"""
|
540352f757bffa9825b125148cb7755c8f1c5581
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/mindspore/python/mindspore/boost/dim_reduce.py
|
417b96786a19e570a65a4db7d88b9d3ee44656e5
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 13,160
|
py
|
dim_reduce.py
|
# Copyright 2021-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""dim_reduce"""
from __future__ import absolute_import
import math
import numpy as np
from mindspore.nn.cell import Cell
from mindspore.ops import composite as C
from mindspore.ops import functional as F
from mindspore.ops import operations as P
from mindspore.common.tensor import Tensor
from mindspore.common.parameter import Parameter, ParameterTuple
from mindspore.common import dtype as mstype
__all__ = ["DimReduce"]
_scale_grad = C.MultitypeFuncGraph("_scale_grad")
@_scale_grad.register("Tensor", "Tensor")
def _scale_grad_process(scale, grad):
grad = F.cast(grad, mstype.float32)
grad = P.Div()(grad, scale)
return grad
_save_weight = C.MultitypeFuncGraph("_save_weight")
@_save_weight.register("Tensor", "Tensor")
def _save_weight_process(parameter, new_parameter):
P.Assign()(parameter, new_parameter)
return parameter
_pca_projection = C.MultitypeFuncGraph("_pca_projection")
@_pca_projection.register("Tensor", "Tensor")
def _pca_projection_process(pca_mat, grad):
grad_k = P.MatMul()(pca_mat, F.reshape(grad, (-1, 1)))
return grad_k
_pca_back_projection = C.MultitypeFuncGraph("_pca_back_projection")
@_pca_back_projection.register("Tensor", "Tensor", "Tensor")
def _pca_back_projection_process(grad_k, pca_mat, grad):
grad_proj = P.MatMul()(F.transpose(pca_mat, (1, 0)), grad_k)
grad_proj_reshape = F.reshape(grad_proj, F.shape(grad))
return grad_proj_reshape
_update_grad_res_momentum = C.MultitypeFuncGraph("_update_grad_res_momentum")
@_update_grad_res_momentum.register("Float32", "Float32", "Tensor", "Tensor", "Tensor")
def _update_grad_res_momentum_process(gamma, alpha, grad_res_momentum, grad, grad_proj):
grad_res_momentum_new = gamma * grad_res_momentum + grad - grad_proj
P.Assign()(grad_res_momentum, grad_res_momentum_new)
res = alpha * grad_res_momentum_new
return res
_get_delta_weight = C.MultitypeFuncGraph("_get_delta_weight")
@_get_delta_weight.register("Tensor", "Tensor", "Tensor")
def _get_delta_weight_process(rho, dn, grad_res_momentum):
delta_weight = grad_res_momentum - rho * dn
return delta_weight
class DimReduce(Cell):
r"""
The dimension reduce training, is a novel algorithm for accelerating convergence of Deep Learning models.
.. math::
\begin{align}
grad\_k &= pca\_mat \cdot grad\\
dk &= - bk \cdot grad\_k\\
sk &= rho ^ m \cdot dk\\
delta\_loss &= sigma \cdot grad\_k.T \cdot sk
\end{align}
Here:
- pca_mat (array): Shape :math:`(k*n)`, k is part of n_components, n is the size of weight.
- bk (array): Shape :math:`(k*k)`, is the symmetric positive definite matrix in Quasi-Newton method.
we need to find the m satisfy:
.. math::
new\_loss < old\_loss + delta\_loss
Then, get delta_grad to update the weights for model:
.. math::
\begin{align}
grad\_k\_proj &= pca\_mat.T \cdot grad\_k\\
new\_grad\_momentum &= gamma \cdot old\_grad\_momentum + grad - grad\_k\_proj\\
delta\_grad &= alpha \cdot new\_grad\_momentum - pca\_mat.T \cdot sk
\end{align}
Args:
network (Cell): The training network. The network only supports single output.
optimizer (Union[Cell]): Optimizer for updating the weights.
weight (Tuple(Parameter)): Tuple of parameters.
pca_mat_local (numpy.ndarray): For PCA operation, k*n, k is part of n_components, n is the size of weight.
n_components (int): PCA.components.
rho (float): Coefficient.
gamma (float): Coefficient.
alpha (float): Coefficient.
sigma (float): Coefficient.
rank (int): Rank number.
rank_size (int): Rank size.
Inputs:
- **loss** (Tensor) - Tensor with shape :math:`()`.
- **old_grad** (Tuple(Tensor)) - Tuple of gradient tensors.
- **weight** (Tuple(Tensor)) - Tuple of parameters.
- **weight_clone** (Tuple(Tensor)) - clone of weight
- **\*inputs** (Tuple(Tensor)) - Tuple of input tensors with shape :math:`(N, \ldots)`.
Outputs:
- **loss** (Tensor) - Tensor with shape :math:`()`.
"""
def __init__(self, network, optimizer, weight, pca_mat_local, n_components, rho, gamma, alpha, sigma, rank,
rank_size):
super(DimReduce, self).__init__()
self.network = network
self.optimizer = optimizer
self.rank = rank
self.rank_size = rank_size
self.gamma = gamma
self.alpha = alpha
self.sigma = sigma
self.float_type = mstype.float32
self._set_rho_list(rho)
self._set_local_pca_mat(pca_mat_local, n_components, weight)
self._set_init_parameter(weight)
self.hyper_map = C.HyperMap()
self.concat = P.Concat()
self.matmul = P.MatMul()
self.mul = P.Mul()
self.add = P.Add()
def construct(self, loss, old_grad, loss_scale, weight, weight_clone, *inputs):
gk, old_loss, gk_local = self._generate_gk(weight, loss, old_grad, loss_scale)
_save_weight(self.gk_last_back, self.gk_last)
_save_weight(self.bk_back, self.bk)
dk = self._apply_quasi_newton_update(gk)
if self.dk_pad_flag:
dk_pad = self.concat((dk, self.dk_pad_part))
else:
dk_pad = dk
dk_local = dk_pad[self.start_index: self.end_index, :]
dn_local = self.hyper_map(F.partial(_pca_back_projection, dk_local), self.pca_list_local, old_grad)
grad_proj_local = self.hyper_map(F.partial(_pca_back_projection, gk_local), self.pca_list_local, old_grad)
dn = self.dn_init if self.rank_size > 1 else dn_local
grad_proj = self.grad_proj_init if self.rank_size > 1 else grad_proj_local
if self.rank_size > 1:
for broadcast in self.broadcast_list:
dn_part = broadcast(dn_local)
dn = self.hyper_map(self.add, dn, dn_part)
grad_proj_part = broadcast(grad_proj_local)
grad_proj = self.hyper_map(self.add, grad_proj, grad_proj_part)
rho, find = self._line_search(gk, dk, dn, old_loss, weight, weight_clone, *inputs)
if not find:
_save_weight(self.gk_last, self.gk_last_back)
_save_weight(self.bk, self.bk_back)
clone = self._res_loss(old_grad, grad_proj, weight, weight_clone, rho, dn)
return F.depend(loss, clone)
def _set_rho_list(self, rho):
"""set rho list info."""
self.max_search_time = 2
self.rho_list = []
for i in range(self.max_search_time):
self.rho_list.append(Tensor(np.power(rho, i), dtype=self.float_type))
self.rho_list.append(Tensor(0, dtype=self.float_type))
def _set_local_pca_mat(self, pca_mat_local, n_components, parameter_tuple):
"""set pca info."""
self.n_components = n_components
local_dim = math.ceil(self.n_components // self.rank_size)
self.start_index = self.rank * local_dim
self.end_index = (self.rank + 1) * local_dim
start = 0
self.pca_list_local = ()
for param in parameter_tuple:
size = np.shape(param.asnumpy().reshape((-1, 1)))[0]
self.pca_list_local += (Tensor(pca_mat_local[:, start:start + size], dtype=self.float_type),)
start += size
self.dk_pad_flag = False
pad_num = self.rank_size * local_dim - self.n_components
if pad_num:
self.dk_pad_flag = True
self.dk_pad_part = Tensor(np.zeros([pad_num, 1]), dtype=self.float_type)
if self.rank_size > 1:
self.broadcast_list = []
for i in range(self.rank_size):
broadcast = P.Broadcast(i)
self.broadcast_list.append(broadcast)
self.allreduce = P.AllReduce()
self.allgather = P.AllGather()
def _set_init_parameter(self, parameter_tuple):
"""init parameters."""
self.true_flag = Tensor(True)
self.false_flag = Tensor(False)
self.epsilon = np.power(10.0, -20)
self.gk_last = Parameter(Tensor(np.zeros([self.n_components, 1]), dtype=self.float_type), name="gk_last")
self.gk_last_init = Parameter(Tensor(False), name="gk_last_init")
self.bk = Parameter(Tensor(np.eye(self.n_components), dtype=self.float_type), name="bk")
self.sk = Parameter(Tensor(np.zeros([self.n_components, 1]), dtype=self.float_type), name="sk")
self.eye = Tensor(np.eye(self.n_components), dtype=self.float_type)
self.grad_res_momentum = ParameterTuple(parameter_tuple).clone(prefix="grad_res_momentum", init="zeros")
self.gk_last_back = Parameter(Tensor(np.zeros([self.n_components, 1]), dtype=self.float_type),
name="gk_last_back")
self.bk_back = Parameter(Tensor(np.eye(self.n_components), dtype=self.float_type), name="bk_back")
self.grad_proj_init = ParameterTuple(parameter_tuple).clone(prefix="grad_proj_init", init="zeros")
self.dn_init = ParameterTuple(parameter_tuple).clone(prefix="dn_init", init="zeros")
def _res_loss(self, old_grad, grad_proj, weight, weight_clone, rho, dn):
"""update loss"""
update_grad = self.hyper_map(F.partial(_update_grad_res_momentum, self.gamma, self.alpha),
self.grad_res_momentum, old_grad, grad_proj)
delta_weight = self.hyper_map(F.partial(_get_delta_weight, rho), dn, update_grad)
update = self.optimizer(delta_weight)
weight = F.depend(weight, update)
clone = self.hyper_map(_save_weight, weight_clone, weight)
return clone
def _generate_gk(self, weight, loss, old_grad, loss_scale):
"""generate gk"""
weight = F.depend(weight, loss)
old_grad = F.depend(old_grad, weight)
old_grad = self.hyper_map(F.partial(_scale_grad, loss_scale), old_grad)
old_loss = self.allreduce(loss) // self.rank_size if self.rank_size > 1 else loss
gk_local = self.hyper_map(_pca_projection, self.pca_list_local, old_grad)
gk_local = F.addn(gk_local)
gk_pad = self.allgather(gk_local) if self.rank_size > 1 else gk_local
gk_pad = F.reshape(gk_pad, (-1, 1))
gk = gk_pad[0:self.n_components, :]
return gk, old_loss, gk_local
def _line_search(self, gk, dk, dn, old_loss, weight, weight_clone, *inputs):
"""line search rho."""
res = self.rho_list[-1]
find = self.false_flag
for i in range(self.max_search_time):
find = self._find_rho(gk, dk, dn, old_loss, weight, weight_clone, self.rho_list[i], *inputs)
if find:
res = self.rho_list[i]
break
return res, find
def _find_rho(self, gk, dk, dn, old_loss, weight, weight_clone, rho, *inputs):
"""search rho."""
res = self.false_flag
sn = self.hyper_map(F.partial(self.mul, -1 * rho), dn)
sn = F.depend(sn, old_loss)
update = self.optimizer(sn)
new_loss = F.depend(self.network(*inputs), update)
if self.rank_size > 1:
new_loss = self.allreduce(new_loss) // self.rank_size
old_loss_delta = old_loss + self.sigma * rho * F.squeeze(self.matmul(F.transpose(gk, (1, 0)), dk))
if old_loss_delta > new_loss:
_save_weight(self.sk, rho * dk)
res = self.true_flag
weight_clone = F.depend(weight_clone, old_loss_delta)
restore = self.hyper_map(_save_weight, weight, weight_clone)
res = F.depend(res, restore)
return res
def _apply_quasi_newton_update(self, gk):
"""apply quasi_newton update."""
if self.gk_last_init:
yk = gk - self.gk_last
g = self.matmul(F.transpose(yk, (1, 0)), self.sk)
g = F.squeeze(g)
if g > self.epsilon:
pk = 1. / g
t1 = self.eye - self.matmul(pk * yk, F.transpose(self.sk, (1, 0)))
new_bk = self.matmul(self.matmul(F.transpose(t1, (1, 0)), self.bk), t1) + \
self.matmul(pk * self.sk, F.transpose(self.sk, (1, 0)))
_save_weight(self.bk, new_bk)
else:
_save_weight(self.gk_last_init, self.true_flag)
_save_weight(self.gk_last, gk)
dk = -1 * self.matmul(self.bk, gk)
return dk
|
dca099170e47c101ce48cfd665ed25b7a16dbf6e
|
0577a46d8d28e1fd8636893bbdd2b18270bb8eb8
|
/chromium/chrome/browser/share/core/resources/gen_share_targets_proto.py
|
8008c40e942bb71fe45d2e38a6e410d9dfc67f9b
|
[
"BSD-3-Clause"
] |
permissive
|
ric2b/Vivaldi-browser
|
388a328b4cb838a4c3822357a5529642f86316a5
|
87244f4ee50062e59667bf8b9ca4d5291b6818d7
|
refs/heads/master
| 2022-12-21T04:44:13.804535
| 2022-12-17T16:30:35
| 2022-12-17T16:30:35
| 86,637,416
| 166
| 41
|
BSD-3-Clause
| 2021-03-31T18:49:30
| 2017-03-29T23:09:05
| null |
UTF-8
|
Python
| false
| false
| 3,400
|
py
|
gen_share_targets_proto.py
|
#!/usr/bin/env python3
# Copyright 2021 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Convert the ASCII share_targets.asciipb proto into a binary resource.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
# Import the binary proto generator. Walks up to the root of the source tree
# which is six directories above, and the finds the protobufs directory from
# there.
proto_generator_path = os.path.normpath(
os.path.join(os.path.abspath(__file__),
*[os.path.pardir] * 6 + ['components/resources/protobufs']))
sys.path.insert(0, proto_generator_path)
from binary_proto_generator import BinaryProtoGenerator
def ParseInputPb(input_pb):
""" Return a protobuf based on input pb """
new_pb = share_target_pb2.MapLocaleTargets()
temp_pb = share_target_pb2.TargetLocalesForParsing()
temp_pb.CopyFrom(input_pb)
new_pb.version_id = temp_pb.version_id
all_targets_pb = share_target_pb2.TmpShareTargetMap()
for s in temp_pb.targets:
all_targets_pb.all_targets[s.nickname].nickname = s.nickname
all_targets_pb.all_targets[s.nickname].url = s.url
all_targets_pb.all_targets[s.nickname].icon = s.icon
all_targets_pb.all_targets[s.nickname].icon_2x = s.icon_2x
all_targets_pb.all_targets[s.nickname].icon_3x = s.icon_3x
for s in temp_pb.locale_mapping:
tmp_share_targets = share_target_pb2.ShareTargets()
for target in s.targets:
added = tmp_share_targets.targets.add()
added.nickname = all_targets_pb.all_targets[target].nickname
added.url = all_targets_pb.all_targets[target].url
added.icon = all_targets_pb.all_targets[target].icon
added.icon_2x = all_targets_pb.all_targets[target].icon_2x
added.icon_3x = all_targets_pb.all_targets[target].icon_3x
for locale in s.locale_keys:
for target in tmp_share_targets.targets:
added = new_pb.map_target_locale_map[locale].targets.add()
added.nickname = target.nickname
added.url = target.url
added.icon = target.icon
added.icon_2x = target.icon_2x
added.icon_3x = target.icon_3x
return new_pb
def ParsePbAndWrite(input_pb, outfile):
parsed_pb = ParseInputPb(input_pb)
binary_pb_str = parsed_pb.SerializeToString()
open(outfile, 'wb').write(binary_pb_str)
class ShareTargetProtoGenerator(BinaryProtoGenerator):
def ImportProtoModule(self):
import share_target_pb2
globals()['share_target_pb2'] = share_target_pb2
def EmptyProtoInstance(self):
return share_target_pb2.TargetLocalesForParsing()
def ValidatePb(self, opts, pb):
""" Validate the basic values of the protobuf."""
assert pb.version_id > 0
assert len(pb.locale_mapping) > 1
assert len(pb.targets) > 1
def ProcessPb(self, opts, pb):
""" Generate one or more binary protos using the parsed proto. """
outfile = os.path.join(opts.outdir, opts.outbasename)
ParsePbAndWrite(pb, outfile)
def VerifyArgs(self, opts):
return True
def main():
return ShareTargetProtoGenerator().Run()
if __name__ == '__main__':
sys.exit(main())
|
9792546d05c8a0a616f88105068460464ee96b09
|
edac021795dddd5b029b5e4e80fff03cbb4498ff
|
/convert_y.py
|
3edb54f6978b623b37311a1748b6edd9910fd360
|
[
"MIT"
] |
permissive
|
jiny2001/dcscn-super-resolution
|
f1534a926b847864b4a7f5af29be17f2e41ad693
|
d382bc30d41c94ac72f8cb148d61cc4864b3b380
|
refs/heads/master
| 2023-05-31T15:24:50.893444
| 2023-04-06T21:18:09
| 2023-04-06T21:18:09
| 94,180,676
| 737
| 268
|
MIT
| 2020-09-26T08:01:59
| 2017-06-13T07:01:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,252
|
py
|
convert_y.py
|
"""
Paper: "Fast and Accurate Image Super Resolution by Deep CNN with Skip Connection and Network in Network"
Author: Jin Yamanaka
Github: https://github.com/jiny2001/dcscn-image-super-resolution
Convert RGB(A)-(PNG or Jpeg) Image to Y-BMP images
Put your images under data/[your dataset name]/ and specify [your dataset name] for --dataset.
"""
import os
import tensorflow.compat.v1 as tf
from helper import args, utilty as util
FLAGS = args.get()
def main(not_parsed_args):
if len(not_parsed_args) > 1:
print("Unknown args:%s" % not_parsed_args)
exit()
print("Building Y channel data...")
training_filenames = util.get_files_in_directory(FLAGS.data_dir + "/" + FLAGS.dataset + "/")
target_dir = FLAGS.data_dir + "/" + FLAGS.dataset + "_y/"
util.make_dir(target_dir)
for file_path in training_filenames:
org_image = util.load_image(file_path)
if org_image.shape[2] == 3:
org_image = util.convert_rgb_to_y(org_image)
filename = os.path.basename(file_path)
filename, extension = os.path.splitext(filename)
new_filename = target_dir + filename
util.save_image(new_filename + ".bmp", org_image)
if __name__ == '__main__':
tf.app.run()
|
4b283c3a2af87067bd6fa8a34ebc65dd230d2329
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/nxos/tests/ShowIpv6MrouteSummary/cli/equal/golden_output_expected.py
|
96416cc8712ec910f27d6bf3a2bf61690cd6e9aa
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 3,470
|
py
|
golden_output_expected.py
|
expected_output = {
'vrf': {
'default': {
'address_family': {
'ipv6': {
'count_multicast_starg': 0,
'count_multicast_sg': 0,
'count_multicast_total': 13,
'count_multicast_starg_prefix': 13,
'group_count': 0,
'avg_source_per_group': 0.0,
'groups': {
'ff32::/32': {
'source_count': 0,
'source': {
'(*,G)': {
'packets': 0,
'bytes': 0,
'aps': 0,
'pps': 0,
'bitrate': 0.000,
'bitrate_unit': 'bps',
'oifs': 0,
}
}
},
'ff33::/32': {
'source_count': 0,
'source': {
'(*,G)': {
'packets': 0,
'bytes': 0,
'aps': 0,
'pps': 0,
'bitrate': 0.000,
'bitrate_unit': 'bps',
'oifs': 0,
},
}
}
}
}
}
},
'vxlan-1007': {
'address_family': {
'ipv6': {
'count_multicast_starg': 9,
'count_multicast_sg': 2,
'count_multicast_total': 24,
'count_multicast_starg_prefix': 13,
'group_count': 11,
'avg_source_per_group': 0.1,
'groups': {
'ff33:0:0:1197::1/128': {
'source_count': 1,
'source': {
'2001:180:1:57::1181': {
'packets': 968,
'bytes': 49478,
'aps': 51,
'pps': 0,
'bitrate': 0.000,
'bitrate_unit': 'bps',
'oifs': 1,
}
}
},
'ff33:0:0:11d7::1/128': {
'source_count': 1,
'source': {
'2001:1:1:57::1141': {
'packets': 1027,
'bytes': 52377,
'aps': 51,
'pps': 0,
'bitrate': 0.000,
'bitrate_unit': 'bps',
'oifs': 1,
},
},
}
}
}
}
}
}
}
|
0e660fee93664fc7c6ebd5cfd2357ec3a4930458
|
7653ddbbc2256fae9cc62251f0241d0e9696df7d
|
/pyshtools/legendre/legendre_functions.py
|
b6dd57df266a14680e8093744c3899a9782228d1
|
[
"BSD-3-Clause"
] |
permissive
|
SHTOOLS/SHTOOLS
|
c3415b38da290805ecdfd59699587e5ac5233cc8
|
93e77dcc6b36b2363f07d79d07ec47d86e6cba65
|
refs/heads/master
| 2023-08-31T01:35:49.211882
| 2023-08-28T10:50:08
| 2023-08-28T10:50:08
| 24,725,612
| 315
| 117
|
BSD-3-Clause
| 2023-08-28T10:50:10
| 2014-10-02T15:53:36
|
Python
|
UTF-8
|
Python
| false
| false
| 10,521
|
py
|
legendre_functions.py
|
"""
Convenience functions for computing the associated Legendre functions.
legendre Compute all the associated Legendre functions up to a maximum
degree and order.
legendre_lm Compute the associated Legendre function for specific
degrees l and orders m.
"""
import numpy as _np
import warnings as _warnings
from . import PlmBar as _PlmBar
from . import PlmON as _PlmON
from . import PlmSchmidt as _PlmSchmidt
from . import PLegendreA as _PLegendreA
def legendre(lmax, z, normalization='4pi', csphase=1, cnorm=0, packed=False):
"""
Compute all the associated Legendre functions up to a maximum degree and
order.
Usage
-----
plm = legendre (lmax, z, [normalization, csphase, cnorm, packed])
Returns
-------
plm : float, dimension (lmax+1, lmax+1) or ((lmax+1)*(lmax+2)/2)
An array of associated Legendre functions, plm[l, m], where l and m
are the degree and order, respectively. If packed is True, the array
is 1-dimensional with the index corresponding to l*(l+1)/2+m.
Parameters
----------
lmax : integer
The maximum degree of the associated Legendre functions to be computed.
z : float
The argument of the associated Legendre functions.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for use with geodesy 4pi
normalized, orthonormalized, Schmidt semi-normalized, or unnormalized
spherical harmonic functions, respectively.
csphase : integer, optional, default = 1
If 1 (default), the Condon-Shortley phase will be excluded. If -1, the
Condon-Shortley phase of (-1)^m will be appended to the associated
Legendre functions.
cnorm : integer, optional, default = 0
If 1, the complex normalization of the associated Legendre functions
will be used. The default is to use the real normalization.
packed : bool, optional, default = False
If True, return a 1-dimensional packed array with the index
corresponding to l*(l+1)/2+m, where l and m are respectively the
degree and order.
Notes
-----
legendre will calculate all of the associated Legendre functions up to
degree lmax for a given argument. The Legendre functions are used typically
as a part of the spherical harmonic functions, and three parameters
determine how they are defined. normalization can be either '4pi'
(default), 'ortho', 'schmidt', or 'unnorm' for use with 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized spherical
harmonic functions, respectively. csphase determines whether to include
or exclude (default) the Condon-Shortley phase factor. cnorm determines
whether to normalize the Legendre functions for use with real (default)
or complex spherical harmonic functions.
By default, the routine will return a 2-dimensional array, p[l, m]. If the
optional parameter packed is set to True, the output will instead be a
1-dimensional array where the indices correspond to l*(l+1)/2+m. The
Legendre functions are calculated using the standard three-term recursion
formula, and in order to prevent overflows, the scaling approach of Holmes
and Featherstone (2002) is utilized. The resulting functions are accurate
to about degree 2800. See Wieczorek and Meschede (2018) for exact
definitions on how the Legendre functions are defined.
References
----------
Holmes, S. A., and W. E. Featherstone, A unified approach to the Clenshaw
summation and the recursive computation of very high degree and order
normalised associated Legendre functions, J. Geodesy, 76, 279-299,
doi:10.1007/s00190-002-0216-2, 2002.
Wieczorek, M. A., and M. Meschede. SHTools — Tools for working with
spherical harmonics, Geochem., Geophys., Geosyst., 19, 2574-2592,
doi:10.1029/2018GC007529, 2018.
"""
if lmax < 0:
raise ValueError(
"lmax must be greater or equal to 0. Input value was {:s}."
.format(repr(lmax))
)
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value was {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be either 1 or -1. Input value was {:s}."
.format(repr(csphase))
)
if cnorm != 0 and cnorm != 1:
raise ValueError(
"cnorm must be either 0 or 1. Input value was {:s}."
.format(repr(cnorm))
)
if normalization.lower() == 'unnorm' and lmax > 85:
_warnings.warn("Calculations using unnormalized coefficients " +
"are stable only for degrees less than or equal " +
"to 85. lmax for the coefficients will be set to " +
"85. Input value was {:d}.".format(lmax),
category=RuntimeWarning)
lmax = 85
if normalization == '4pi':
p = _PlmBar(lmax, z, csphase=csphase, cnorm=cnorm)
elif normalization == 'ortho':
p = _PlmON(lmax, z, csphase=csphase, cnorm=cnorm)
elif normalization == 'schmidt':
p = _PlmSchmidt(lmax, z, csphase=csphase, cnorm=cnorm)
elif normalization == 'unnorm':
p = _PLegendreA(lmax, z, csphase=csphase)
if packed is True:
return p
else:
plm = _np.zeros((lmax+1, lmax+1))
for l in range(lmax+1):
for m in range(l+1):
plm[l, m] = p[(l*(l+1))//2+m]
return plm
@_np.vectorize
def legendre_lm(l, m, z, normalization='4pi', csphase=1, cnorm=0):
"""
Compute the associated Legendre function for specific degrees and orders.
Usage
-----
plm = legendre_lm (l, m, z, [normalization, csphase, cnorm])
Returns
-------
plm : float, ndarray
The associated Legendre functions for degree l and order m.
Parameters
----------
l : integer, array_like
The spherical harmonic degree.
m : integer, array_like
The spherical harmonic order.
z : float, array_like
The argument of the associated Legendre functions.
normalization : str, array_like, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for use with geodesy 4pi
normalized, orthonormalized, Schmidt semi-normalized, or unnormalized
spherical harmonic functions, respectively.
csphase : integer, array_like, optional, default = 1
If 1 (default), the Condon-Shortley phase will be excluded. If -1, the
Condon-Shortley phase of (-1)^m will be appended to the associated
Legendre functions.
cnorm : integer, array_like, optional, default = 0
If 1, the complex normalization of the associated Legendre functions
will be used. The default is to use the real normalization.
Notes
-----
legendre_lm will calculate the associated Legendre function for specific
degrees l and orders m. The Legendre functions are used typically as a part
of the spherical harmonic functions, and three parameters determine how
they are defined. normalization can be either '4pi' (default), 'ortho',
'schmidt', or 'unnorm' for use with 4pi normalized, orthonormalized,
Schmidt semi-normalized, or unnormalized spherical harmonic functions,
respectively. csphase determines whether to include or exclude (default)
the Condon-Shortley phase factor. cnorm determines whether to normalize
the Legendre functions for use with real (default) or complex spherical
harmonic functions.
The Legendre functions are calculated using the standard three-term
recursion formula, and in order to prevent overflows, the scaling approach
of Holmes and Featherstone (2002) is utilized. The resulting functions are
accurate to about degree 2800. See Wieczorek and Meschede (2018) for exact
definitions on how the Legendre functions are defined.
References
----------
Holmes, S. A., and W. E. Featherstone, A unified approach to the Clenshaw
summation and the recursive computation of very high degree and order
normalised associated Legendre functions, J. Geodesy, 76, 279-299,
doi:10.1007/s00190-002-0216-2, 2002.
Wieczorek, M. A., and M. Meschede. SHTools — Tools for working with
spherical harmonics, Geochem., Geophys., Geosyst., 19, 2574-2592,
doi:10.1029/2018GC007529, 2018.
"""
if l < 0:
raise ValueError(
"The degree l must be greater or equal to 0. Input value was {:s}."
.format(repr(l))
)
if m < 0:
raise ValueError(
"The order m must be greater or equal to 0. Input value was {:s}."
.format(repr(m))
)
if m > l:
raise ValueError(
"The order m must be less than or equal to the degree l. " +
"Input values were l={:s} and m={:s}.".format(repr(l), repr(m))
)
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value was {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be either 1 or -1. Input value was {:s}."
.format(repr(csphase))
)
if cnorm != 0 and cnorm != 1:
raise ValueError(
"cnorm must be either 0 or 1. Input value was {:s}."
.format(repr(cnorm))
)
if normalization.lower() == 'unnorm' and l > 85:
raise ValueError("Calculations using unnormalized coefficients " +
"are stable only for degrees less than or equal " +
"to 85. Input value was {:d}.".format(l))
if normalization == '4pi':
p = _PlmBar(l, z, csphase=csphase, cnorm=cnorm)
elif normalization == 'ortho':
p = _PlmON(l, z, csphase=csphase, cnorm=cnorm)
elif normalization == 'schmidt':
p = _PlmSchmidt(l, z, csphase=csphase, cnorm=cnorm)
elif normalization == 'unnorm':
p = _PLegendreA(l, z, csphase=csphase)
return p[(l*(l+1))//2+m]
|
8d6c3c30b7bdbdbf36ee28570c39125270594981
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/maximum-performance-of-a-team.py
|
c9293ba7a37f33c4226e78c4b8dadab394c0f30a
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 665
|
py
|
maximum-performance-of-a-team.py
|
# Time: O(nlogn)
# Space: O(n)
import itertools
import heapq
class Solution(object):
def maxPerformance(self, n, speed, efficiency, k):
"""
:type n: int
:type speed: List[int]
:type efficiency: List[int]
:type k: int
:rtype: int
"""
MOD = 10**9 + 7
result, s_sum = 0, 0
min_heap = []
for e, s in sorted(itertools.izip(efficiency, speed), reverse=True):
s_sum += s
heapq.heappush(min_heap, s)
if len(min_heap) > k:
s_sum -= heapq.heappop(min_heap)
result = max(result, s_sum*e)
return result % MOD
|
b1aeaae5317c29b58a606304c889b43b09302069
|
07ca66b6bc4d7a94ae78e6c622899458f1bb54fd
|
/sdk/python/tests/integration/e2e/test_validation.py
|
f49ed80a26507a0d23649a41b58a9377bfd0e78a
|
[
"Apache-2.0"
] |
permissive
|
feast-dev/feast
|
8136454dec73275d18133b96f74c3fec8abc57e8
|
58aff346832ebde1695a47cf724da3d65a4a8c53
|
refs/heads/master
| 2023-08-31T03:35:16.188051
| 2023-08-28T06:25:39
| 2023-08-28T06:25:39
| 161,133,770
| 3,956
| 754
|
Apache-2.0
| 2023-09-14T11:17:15
| 2018-12-10T07:20:15
|
Python
|
UTF-8
|
Python
| false
| false
| 13,478
|
py
|
test_validation.py
|
import datetime
import shutil
import pandas as pd
import pyarrow as pa
import pytest
from great_expectations.core import ExpectationSuite
from great_expectations.dataset import PandasDataset
from feast import FeatureService
from feast.dqm.errors import ValidationFailed
from feast.dqm.profilers.ge_profiler import ge_profiler
from feast.feature_logging import (
LOG_TIMESTAMP_FIELD,
FeatureServiceLoggingSource,
LoggingConfig,
)
from feast.protos.feast.serving.ServingService_pb2 import FieldStatus
from feast.utils import make_tzaware
from feast.wait import wait_retry_backoff
from tests.integration.feature_repos.repo_configuration import (
construct_universal_feature_views,
)
from tests.integration.feature_repos.universal.entities import (
customer,
driver,
location,
)
from tests.utils.cli_repo_creator import CliRunner
from tests.utils.test_log_creator import prepare_logs
_features = [
"customer_profile:current_balance",
"customer_profile:avg_passenger_count",
"customer_profile:lifetime_trip_count",
"order:order_is_success",
"global_stats:num_rides",
"global_stats:avg_ride_length",
]
@pytest.mark.integration
@pytest.mark.universal_offline_stores
def test_historical_retrieval_with_validation(environment, universal_data_sources):
store = environment.feature_store
(entities, datasets, data_sources) = universal_data_sources
feature_views = construct_universal_feature_views(data_sources)
store.apply([driver(), customer(), location(), *feature_views.values()])
# Create two identical retrieval jobs
entity_df = datasets.entity_df.drop(
columns=["order_id", "origin_id", "destination_id"]
)
reference_job = store.get_historical_features(
entity_df=entity_df,
features=_features,
)
job = store.get_historical_features(
entity_df=entity_df,
features=_features,
)
# Save dataset using reference job and retrieve it
store.create_saved_dataset(
from_=reference_job,
name="my_training_dataset",
storage=environment.data_source_creator.create_saved_dataset_destination(),
allow_overwrite=True,
)
saved_dataset = store.get_saved_dataset("my_training_dataset")
# If validation pass there will be no exceptions on this point
reference = saved_dataset.as_reference(name="ref", profiler=configurable_profiler)
job.to_df(validation_reference=reference)
@pytest.mark.integration
def test_historical_retrieval_fails_on_validation(environment, universal_data_sources):
store = environment.feature_store
(entities, datasets, data_sources) = universal_data_sources
feature_views = construct_universal_feature_views(data_sources)
store.apply([driver(), customer(), location(), *feature_views.values()])
entity_df = datasets.entity_df.drop(
columns=["order_id", "origin_id", "destination_id"]
)
reference_job = store.get_historical_features(
entity_df=entity_df,
features=_features,
)
store.create_saved_dataset(
from_=reference_job,
name="my_other_dataset",
storage=environment.data_source_creator.create_saved_dataset_destination(),
allow_overwrite=True,
)
job = store.get_historical_features(
entity_df=entity_df,
features=_features,
)
ds = store.get_saved_dataset("my_other_dataset")
profiler_expectation_suite = ds.get_profile(
profiler=profiler_with_unrealistic_expectations
)
assert len(profiler_expectation_suite.expectation_suite["expectations"]) == 3
with pytest.raises(ValidationFailed) as exc_info:
job.to_df(
validation_reference=store.get_saved_dataset(
"my_other_dataset"
).as_reference(name="ref", profiler=profiler_with_unrealistic_expectations)
)
failed_expectations = exc_info.value.report.errors
assert len(failed_expectations) == 2
assert failed_expectations[0].check_name == "expect_column_max_to_be_between"
assert failed_expectations[0].column_name == "current_balance"
assert failed_expectations[1].check_name == "expect_column_values_to_be_in_set"
assert failed_expectations[1].column_name == "avg_passenger_count"
@pytest.mark.integration
@pytest.mark.universal_offline_stores
def test_logged_features_validation(environment, universal_data_sources):
store = environment.feature_store
(_, datasets, data_sources) = universal_data_sources
feature_views = construct_universal_feature_views(data_sources)
feature_service = FeatureService(
name="test_service",
features=[
feature_views.customer[
["current_balance", "avg_passenger_count", "lifetime_trip_count"]
],
feature_views.order[["order_is_success"]],
feature_views.global_fv[["num_rides", "avg_ride_length"]],
],
logging_config=LoggingConfig(
destination=environment.data_source_creator.create_logged_features_destination()
),
)
store.apply(
[driver(), customer(), location(), feature_service, *feature_views.values()]
)
entity_df = datasets.entity_df.drop(
columns=["order_id", "origin_id", "destination_id"]
)
# add some non-existing entities to check NotFound feature handling
for i in range(5):
entity_df = pd.concat(
[
entity_df,
pd.DataFrame.from_records(
[
{
"customer_id": 2000 + i,
"driver_id": 6000 + i,
"event_timestamp": datetime.datetime.now(),
}
]
),
]
)
store_fs = store.get_feature_service(feature_service.name)
reference_dataset = store.create_saved_dataset(
from_=store.get_historical_features(
entity_df=entity_df, features=store_fs, full_feature_names=True
),
name="reference_for_validating_logged_features",
storage=environment.data_source_creator.create_saved_dataset_destination(),
allow_overwrite=True,
)
log_source_df = store.get_historical_features(
entity_df=entity_df, features=store_fs, full_feature_names=False
).to_df()
logs_df = prepare_logs(log_source_df, feature_service, store)
schema = FeatureServiceLoggingSource(
feature_service=feature_service, project=store.project
).get_schema(store._registry)
store.write_logged_features(
pa.Table.from_pandas(logs_df, schema=schema), source=feature_service
)
def validate():
"""
Return Tuple[succeed, completed]
Succeed will be True if no ValidateFailed exception was raised
"""
try:
store.validate_logged_features(
feature_service,
start=logs_df[LOG_TIMESTAMP_FIELD].min(),
end=logs_df[LOG_TIMESTAMP_FIELD].max() + datetime.timedelta(seconds=1),
reference=reference_dataset.as_reference(
name="ref", profiler=profiler_with_feature_metadata
),
)
except ValidationFailed:
return False, True
except Exception:
# log table is still being created
return False, False
return True, True
success = wait_retry_backoff(validate, timeout_secs=30)
assert success, "Validation failed (unexpectedly)"
@pytest.mark.integration
def test_e2e_validation_via_cli(environment, universal_data_sources):
runner = CliRunner()
store = environment.feature_store
(_, datasets, data_sources) = universal_data_sources
feature_views = construct_universal_feature_views(data_sources)
feature_service = FeatureService(
name="test_service",
features=[
feature_views.customer[
["current_balance", "avg_passenger_count", "lifetime_trip_count"]
],
],
logging_config=LoggingConfig(
destination=environment.data_source_creator.create_logged_features_destination()
),
)
store.apply([customer(), feature_service, feature_views.customer])
entity_df = datasets.entity_df.drop(
columns=["order_id", "origin_id", "destination_id", "driver_id"]
)
retrieval_job = store.get_historical_features(
entity_df=entity_df,
features=store.get_feature_service(feature_service.name),
full_feature_names=True,
)
logs_df = prepare_logs(retrieval_job.to_df(), feature_service, store)
saved_dataset = store.create_saved_dataset(
from_=retrieval_job,
name="reference_for_validating_logged_features",
storage=environment.data_source_creator.create_saved_dataset_destination(),
allow_overwrite=True,
)
reference = saved_dataset.as_reference(
name="test_reference", profiler=configurable_profiler
)
schema = FeatureServiceLoggingSource(
feature_service=feature_service, project=store.project
).get_schema(store._registry)
store.write_logged_features(
pa.Table.from_pandas(logs_df, schema=schema), source=feature_service
)
with runner.local_repo(example_repo_py="", offline_store="file") as local_repo:
local_repo.apply(
[customer(), feature_views.customer, feature_service, reference]
)
local_repo._registry.apply_saved_dataset(saved_dataset, local_repo.project)
validate_args = [
"validate",
"--feature-service",
feature_service.name,
"--reference",
reference.name,
(datetime.datetime.now() - datetime.timedelta(days=7)).isoformat(),
datetime.datetime.now().isoformat(),
]
p = runner.run(validate_args, cwd=local_repo.repo_path)
assert p.returncode == 0, p.stderr.decode()
assert "Validation successful" in p.stdout.decode(), p.stderr.decode()
# make sure second validation will use cached profile
shutil.rmtree(saved_dataset.storage.file_options.uri)
# Add some invalid data that would lead to failed validation
invalid_data = pd.DataFrame(
data={
"customer_id": [0],
"current_balance": [0],
"avg_passenger_count": [0],
"lifetime_trip_count": [0],
"event_timestamp": [
make_tzaware(datetime.datetime.utcnow())
- datetime.timedelta(hours=1)
],
}
)
invalid_logs = prepare_logs(invalid_data, feature_service, store)
store.write_logged_features(
pa.Table.from_pandas(invalid_logs, schema=schema), source=feature_service
)
p = runner.run(validate_args, cwd=local_repo.repo_path)
assert p.returncode == 1, p.stdout.decode()
assert "Validation failed" in p.stdout.decode(), p.stderr.decode()
# Great expectations profilers created for testing
@ge_profiler
def configurable_profiler(dataset: PandasDataset) -> ExpectationSuite:
from great_expectations.profile.user_configurable_profiler import (
UserConfigurableProfiler,
)
return UserConfigurableProfiler(
profile_dataset=dataset,
ignored_columns=["event_timestamp"],
excluded_expectations=[
"expect_table_columns_to_match_ordered_list",
"expect_table_row_count_to_be_between",
],
value_set_threshold="few",
).build_suite()
@ge_profiler(with_feature_metadata=True)
def profiler_with_feature_metadata(dataset: PandasDataset) -> ExpectationSuite:
from great_expectations.profile.user_configurable_profiler import (
UserConfigurableProfiler,
)
# always present
dataset.expect_column_values_to_be_in_set(
"global_stats__avg_ride_length__status", {FieldStatus.PRESENT}
)
# present at least in 70% of rows
dataset.expect_column_values_to_be_in_set(
"customer_profile__current_balance__status", {FieldStatus.PRESENT}, mostly=0.7
)
return UserConfigurableProfiler(
profile_dataset=dataset,
ignored_columns=["event_timestamp"]
+ [
c
for c in dataset.columns
if c.endswith("__timestamp") or c.endswith("__status")
],
excluded_expectations=[
"expect_table_columns_to_match_ordered_list",
"expect_table_row_count_to_be_between",
],
value_set_threshold="few",
).build_suite()
@ge_profiler
def profiler_with_unrealistic_expectations(dataset: PandasDataset) -> ExpectationSuite:
# note: there are 4 expectations here and only 3 are returned from the profiler
# need to create dataframe with corrupted data first
df = pd.DataFrame()
df["current_balance"] = [-100]
df["avg_passenger_count"] = [0]
other_ds = PandasDataset(df)
other_ds.expect_column_max_to_be_between("current_balance", -1000, -100)
other_ds.expect_column_values_to_be_in_set("avg_passenger_count", value_set={0})
# this should pass
other_ds.expect_column_min_to_be_between("avg_passenger_count", 0, 1000)
# this should fail
other_ds.expect_column_to_exist("missing random column")
return other_ds.get_expectation_suite()
|
e2887a87e03c54b391d053e7a03c79d7226d0d9a
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/govern/data-meta/OpenMetadata/ingestion/src/metadata/interfaces/profiler_protocol.py
|
0d2dc86efaa5d1884a78bc8c9cc693fa98be08c9
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 2,936
|
py
|
profiler_protocol.py
|
# Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Interfaces with database for all database engine
supporting sqlalchemy abstraction layer
"""
from abc import ABC, abstractmethod
from typing import Any, Dict, Optional, Union
from pydantic import BaseModel
from sqlalchemy import Column, MetaData
from metadata.generated.schema.entity.data.table import PartitionProfilerConfig, Table
from metadata.generated.schema.entity.services.connections.database.datalakeConnection import (
DatalakeConnection,
)
from metadata.generated.schema.entity.services.databaseService import DatabaseConnection
from metadata.ingestion.ometa.ometa_api import OpenMetadata
from metadata.orm_profiler.api.models import ProfileSampleConfig
from metadata.orm_profiler.metrics.registry import Metrics
class ProfilerInterfaceArgs(BaseModel):
"""Profiler Interface Args Model"""
service_connection_config: Any
sqa_metadata_obj: Optional[MetaData]
ometa_client: Optional[OpenMetadata]
thread_count: Optional[float]
table_entity: Optional[Union[Table, Any]]
profile_sample_config: Optional[ProfileSampleConfig] = None
table_sample_query: Optional[Union[int, str]]
table_partition_config: Optional[PartitionProfilerConfig]
timeout_seconds: Optional[int]
class Config:
arbitrary_types_allowed = True
class ProfilerProtocol(ABC):
"""Protocol interface for the profiler processor"""
@abstractmethod
def __init__(
self,
ometa_client: OpenMetadata,
service_connection_config: Union[DatabaseConnection, DatalakeConnection],
):
"""Required attribute for the interface"""
raise NotImplementedError
@property
@abstractmethod
def table(self):
"""OM Table entity"""
raise NotImplementedError
@abstractmethod
def _get_metrics(self, *args, **kwargs):
"""Get metrics"""
raise NotImplementedError
@abstractmethod
def get_all_metrics(self, metric_funcs) -> dict:
"""run profiler metrics"""
raise NotImplementedError
@abstractmethod
def get_composed_metrics(
self, column: Column, metric: Metrics, column_results: Dict
) -> dict:
"""run profiler metrics"""
raise NotImplementedError
@abstractmethod
def fetch_sample_data(self, table) -> dict:
"""run profiler metrics"""
raise NotImplementedError
|
82c405339c0f5dc3670e4f6ee273a11fc7692007
|
60cffd76b5d2f499ed845116c7af0794093af041
|
/tests/test_acl.py
|
863959d1a4c33a8a338f27c7aecdca4bd65b5807
|
[
"MIT"
] |
permissive
|
jrxFive/python-nomad
|
2ed5a1dc6c7520c6acb63c1876a57aef0d36925d
|
41df8692ed3fca69ff92cf5e806ad969fe0e0ffa
|
refs/heads/master
| 2023-08-28T17:53:12.371524
| 2023-06-02T21:59:17
| 2023-06-02T21:59:17
| 58,209,578
| 135
| 92
|
MIT
| 2023-08-02T09:59:40
| 2016-05-06T13:34:56
|
Python
|
UTF-8
|
Python
| false
| false
| 5,617
|
py
|
test_acl.py
|
import pytest
import tests.common as common
import json
import os
# integration tests requires nomad Vagrant VM or Binary running
# IMPORTANT: without token activated
@pytest.mark.skipif(
tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 7, 0), reason="Nomad dispatch not supported"
)
@pytest.mark.run(order=0)
def test_create_bootstrap(nomad_setup):
bootstrap = nomad_setup.acl.generate_bootstrap()
assert "SecretID" in bootstrap
common.NOMAD_TOKEN = bootstrap["SecretID"]
@pytest.mark.skipif(
tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 7, 0), reason="Nomad dispatch not supported"
)
@pytest.mark.run(order=1)
def test_list_tokens(nomad_setup):
assert "Bootstrap Token" in nomad_setup.acl.get_tokens()[0]["Name"]
@pytest.mark.skipif(
tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 7, 0), reason="Nomad dispatch not supported"
)
@pytest.mark.run(order=2)
def test_create_token(nomad_setup):
token_example = '{"Name": "Readonly token","Type": "client","Policies": ["readonly"],"Global": false}'
json_token = json.loads(token_example)
created_token = nomad_setup.acl.create_token(json_token)
assert "Readonly token" in created_token["Name"]
@pytest.mark.skipif(
tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 7, 0), reason="Nomad dispatch not supported"
)
@pytest.mark.run(order=3)
def test_list_all_tokens(nomad_setup):
tokens = nomad_setup.acl.get_tokens()
assert isinstance(tokens, list)
@pytest.mark.skipif(
tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 7, 0), reason="Nomad dispatch not supported"
)
@pytest.mark.run(order=4)
def test_update_token(nomad_setup):
token_example = '{"Name": "CreatedForUpdate","Type": "client","Policies": ["readonly"],"Global": false}'
json_token = json.loads(token_example)
created_token = nomad_setup.acl.create_token(json_token)
token_update = (
'{"AccessorID":"'
+ created_token["AccessorID"]
+ '","Name": "Updated" ,"Type": "client","Policies": ["readonly"]}'
)
json_token_update = json.loads(token_update)
update_token = nomad_setup.acl.update_token(id_=created_token["AccessorID"], token=json_token_update)
assert "Updated" in update_token["Name"]
@pytest.mark.skipif(
tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 7, 0), reason="Nomad dispatch not supported"
)
@pytest.mark.run(order=5)
def test_get_token(nomad_setup):
token_example = '{"Name": "GetToken","Type": "client","Policies": ["readonly"],"Global": false}'
json_token = json.loads(token_example)
created_token = nomad_setup.acl.create_token(json_token)
get_token = nomad_setup.acl.get_token(created_token["AccessorID"])
assert "GetToken" in created_token["Name"]
@pytest.mark.skipif(
tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 7, 0), reason="Nomad dispatch not supported"
)
@pytest.mark.run(order=6)
def test_delete_token(nomad_setup):
token_example = '{"Name": "DeleteToken","Type": "client","Policies": ["readonly"],"Global": false}'
json_token = json.loads(token_example)
created_token = nomad_setup.acl.create_token(json_token)
assert "DeleteToken" in created_token["Name"]
nomad_setup.acl.delete_token(created_token["AccessorID"])
assert False == any("DeleteToken" in x for x in nomad_setup.acl.get_tokens())
@pytest.mark.skipif(
tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 7, 0), reason="Nomad dispatch not supported"
)
def test_get_self_token(nomad_setup):
current_token = nomad_setup.acl.get_self_token()
assert nomad_setup.get_token() in current_token["SecretID"]
@pytest.mark.skipif(
tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 7, 0), reason="Nomad dispatch not supported"
)
def test_get_policies(nomad_setup):
policies = nomad_setup.acl.get_policies()
assert isinstance(policies, list)
@pytest.mark.skipif(
tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 7, 0), reason="Nomad dispatch not supported"
)
def test_create_policy(nomad_setup):
policy_example = '{ "Name": "my-policy", "Description": "This is a great policy", "Rules": "" }'
json_policy = json.loads(policy_example)
nomad_setup.acl.create_policy(id_="my-policy", policy=json_policy)
assert False == any("my-policy" in x for x in nomad_setup.acl.get_policies())
@pytest.mark.skipif(
tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 7, 0), reason="Nomad dispatch not supported"
)
def test_get_policy(nomad_setup):
policy = nomad_setup.acl.get_policy("my-policy")
assert "This is a great policy" in policy["Description"]
@pytest.mark.skipif(
tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 7, 0), reason="Nomad dispatch not supported"
)
def test_update_policy(nomad_setup):
policy_update = '{"Name": "my-policy","Description": "Updated","Rules": ""}'
json_policy_update = json.loads(policy_update)
nomad_setup.acl.update_policy(id_="my-policy", policy=json_policy_update)
assert False == any("Updated" in x for x in nomad_setup.acl.get_policies())
@pytest.mark.skipif(
tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 7, 0), reason="Nomad dispatch not supported"
)
def test_delete_policy(nomad_setup):
nomad_setup.acl.delete_policy(id_="my-policy")
assert False == any("my-policy" in x for x in nomad_setup.acl.get_policies())
|
553accc84214b74a7826e5d1f0c7a84f18171e6b
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/pytorch/source/caffe2/python/_import_c_extension.py
|
aca9e52af29293f09ccd079768372b9dce8c3972
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,042
|
py
|
_import_c_extension.py
|
## @package _import_c_extension
# Module caffe2.python._import_c_extension
import atexit
import logging
import sys
from caffe2.python import extension_loader
# We will first try to load the gpu-enabled caffe2. If it fails, we will then
# attempt to load the cpu version. The cpu backend is the minimum required, so
# if that still fails, we will exit loud.
with extension_loader.DlopenGuard():
has_hip_support = False
has_gpu_support = False
try:
from caffe2.python.caffe2_pybind11_state_gpu import * # noqa
if num_cuda_devices(): # noqa
has_gpu_support = True
except ImportError as gpu_e:
logging.info('Failed to import cuda module: {}'.format(gpu_e))
try:
RTLD_LAZY = 1
with extension_loader.DlopenGuard(RTLD_LAZY):
from caffe2.python.caffe2_pybind11_state_hip import * # noqa
if num_hip_devices():
has_hip_support = True
logging.info('This caffe2 python run has AMD GPU support!')
except ImportError as hip_e:
logging.info('Failed to import AMD hip module: {}'.format(hip_e))
logging.warning(
'This caffe2 python run does not have GPU support. '
'Will run in CPU only mode.')
try:
from caffe2.python.caffe2_pybind11_state import * # noqa
except ImportError as cpu_e:
logging.critical(
'Cannot load caffe2.python. Error: {0}'.format(str(cpu_e)))
sys.exit(1)
# libcaffe2_python contains a global Workspace that we need to properly delete
# when exiting. Otherwise, cudart will cause segfaults sometimes.
atexit.register(on_module_exit) # noqa
# Add functionalities for the TensorCPU interface.
def _TensorCPU_shape(self):
return tuple(self._shape)
def _TensorCPU_reshape(self, shape):
return self._reshape(list(shape))
TensorCPU.shape = property(_TensorCPU_shape) # noqa
TensorCPU.reshape = _TensorCPU_reshape # noqa
|
213ebef874e6ade9be0d1378f93701ffc44fc4f9
|
860c31e414c4c280b70ec0872042d715a2d56978
|
/torch_ecg/databases/aux_data/cinc2020_aux_data.py
|
c34d789446bd2694930cd5ef357a04e77a9ea71b
|
[
"MIT"
] |
permissive
|
DeepPSP/torch_ecg
|
255e49ff436e13044a1f049141f982680e56970e
|
a40c65f4fefa83ba7d3d184072a4c05627b7e226
|
refs/heads/master
| 2023-09-01T06:47:17.153216
| 2023-08-31T18:00:47
| 2023-08-31T18:00:47
| 298,482,237
| 111
| 16
|
MIT
| 2023-08-21T11:25:07
| 2020-09-25T06:03:17
|
Python
|
UTF-8
|
Python
| false
| false
| 51,757
|
py
|
cinc2020_aux_data.py
|
"""
from 3 files of the official evaluation repo:
dx_mapping_scored.csv, dx_mapping_unscored.csv, weights.csv
"""
from io import StringIO
from numbers import Real
from typing import Dict, Optional, Sequence, Union
import numpy as np
import pandas as pd
from ...cfg import CFG
__all__ = [
"df_weights",
"df_weights_abbr",
"df_weights_fullname",
"dx_mapping_scored",
"dx_mapping_unscored",
"dx_mapping_all",
"equiv_class_dict",
"load_weights",
"get_class",
"get_class_count",
"get_class_weight",
"normalize_class",
"dx_cooccurrence_all",
"dx_cooccurrence_scored",
"get_cooccurrence",
]
df_weights = pd.read_csv(
StringIO(
""",270492004,164889003,164890007,426627000,713427006,713426002,445118002,39732003,164909002,251146004,698252002,10370003,284470004,427172004,164947007,111975006,164917005,47665007,59118001,427393009,426177001,426783006,427084000,63593006,164934002,59931005,17338001
270492004,1.0,0.3,0.3,0.5,0.4,0.5,0.45,0.45,0.325,0.375,0.45,0.425,0.4625,0.425,0.5,0.35,0.2,0.45,0.4,0.5,0.5,0.45,0.425,0.4625,0.3,0.3,0.425
164889003,0.3,1.0,0.5,0.3,0.4,0.3,0.35,0.35,0.475,0.425,0.35,0.375,0.3375,0.375,0.3,0.45,0.4,0.35,0.4,0.3,0.3,0.25,0.375,0.3375,0.5,0.5,0.375
164890007,0.3,0.5,1.0,0.3,0.4,0.3,0.35,0.35,0.475,0.425,0.35,0.375,0.3375,0.375,0.3,0.45,0.4,0.35,0.4,0.3,0.3,0.25,0.375,0.3375,0.5,0.5,0.375
426627000,0.5,0.3,0.3,1.0,0.4,0.5,0.45,0.45,0.325,0.375,0.45,0.425,0.4625,0.425,0.5,0.35,0.2,0.45,0.4,0.5,0.5,0.45,0.425,0.4625,0.3,0.3,0.425
713427006,0.4,0.4,0.4,0.4,1.0,0.4,0.45,0.45,0.425,0.475,0.45,0.475,0.4375,0.475,0.4,0.45,0.3,0.45,1.0,0.4,0.4,0.35,0.475,0.4375,0.4,0.4,0.475
713426002,0.5,0.3,0.3,0.5,0.4,1.0,0.45,0.45,0.325,0.375,0.45,0.425,0.4625,0.425,0.5,0.35,0.2,0.45,0.4,0.5,0.5,0.45,0.425,0.4625,0.3,0.3,0.425
445118002,0.45,0.35,0.35,0.45,0.45,0.45,1.0,0.5,0.375,0.425,0.5,0.475,0.4875,0.475,0.45,0.4,0.25,0.5,0.45,0.45,0.45,0.4,0.475,0.4875,0.35,0.35,0.475
39732003,0.45,0.35,0.35,0.45,0.45,0.45,0.5,1.0,0.375,0.425,0.5,0.475,0.4875,0.475,0.45,0.4,0.25,0.5,0.45,0.45,0.45,0.4,0.475,0.4875,0.35,0.35,0.475
164909002,0.325,0.475,0.475,0.325,0.425,0.325,0.375,0.375,1.0,0.45,0.375,0.4,0.3625,0.4,0.325,0.475,0.375,0.375,0.425,0.325,0.325,0.275,0.4,0.3625,0.475,0.475,0.4
251146004,0.375,0.425,0.425,0.375,0.475,0.375,0.425,0.425,0.45,1.0,0.425,0.45,0.4125,0.45,0.375,0.475,0.325,0.425,0.475,0.375,0.375,0.325,0.45,0.4125,0.425,0.425,0.45
698252002,0.45,0.35,0.35,0.45,0.45,0.45,0.5,0.5,0.375,0.425,1.0,0.475,0.4875,0.475,0.45,0.4,0.25,0.5,0.45,0.45,0.45,0.4,0.475,0.4875,0.35,0.35,0.475
10370003,0.425,0.375,0.375,0.425,0.475,0.425,0.475,0.475,0.4,0.45,0.475,1.0,0.4625,0.5,0.425,0.425,0.275,0.475,0.475,0.425,0.425,0.375,0.5,0.4625,0.375,0.375,0.5
284470004,0.4625,0.3375,0.3375,0.4625,0.4375,0.4625,0.4875,0.4875,0.3625,0.4125,0.4875,0.4625,1.0,0.4625,0.4625,0.3875,0.2375,0.4875,0.4375,0.4625,0.4625,0.4125,0.4625,1.0,0.3375,0.3375,0.4625
427172004,0.425,0.375,0.375,0.425,0.475,0.425,0.475,0.475,0.4,0.45,0.475,0.5,0.4625,1.0,0.425,0.425,0.275,0.475,0.475,0.425,0.425,0.375,0.5,0.4625,0.375,0.375,1.0
164947007,0.5,0.3,0.3,0.5,0.4,0.5,0.45,0.45,0.325,0.375,0.45,0.425,0.4625,0.425,1.0,0.35,0.2,0.45,0.4,0.5,0.5,0.45,0.425,0.4625,0.3,0.3,0.425
111975006,0.35,0.45,0.45,0.35,0.45,0.35,0.4,0.4,0.475,0.475,0.4,0.425,0.3875,0.425,0.35,1.0,0.35,0.4,0.45,0.35,0.35,0.3,0.425,0.3875,0.45,0.45,0.425
164917005,0.2,0.4,0.4,0.2,0.3,0.2,0.25,0.25,0.375,0.325,0.25,0.275,0.2375,0.275,0.2,0.35,1.0,0.25,0.3,0.2,0.2,0.15,0.275,0.2375,0.4,0.4,0.275
47665007,0.45,0.35,0.35,0.45,0.45,0.45,0.5,0.5,0.375,0.425,0.5,0.475,0.4875,0.475,0.45,0.4,0.25,1.0,0.45,0.45,0.45,0.4,0.475,0.4875,0.35,0.35,0.475
59118001,0.4,0.4,0.4,0.4,1.0,0.4,0.45,0.45,0.425,0.475,0.45,0.475,0.4375,0.475,0.4,0.45,0.3,0.45,1.0,0.4,0.4,0.35,0.475,0.4375,0.4,0.4,0.475
427393009,0.5,0.3,0.3,0.5,0.4,0.5,0.45,0.45,0.325,0.375,0.45,0.425,0.4625,0.425,0.5,0.35,0.2,0.45,0.4,1.0,0.5,0.45,0.425,0.4625,0.3,0.3,0.425
426177001,0.5,0.3,0.3,0.5,0.4,0.5,0.45,0.45,0.325,0.375,0.45,0.425,0.4625,0.425,0.5,0.35,0.2,0.45,0.4,0.5,1.0,0.45,0.425,0.4625,0.3,0.3,0.425
426783006,0.45,0.25,0.25,0.45,0.35,0.45,0.4,0.4,0.275,0.325,0.4,0.375,0.4125,0.375,0.45,0.3,0.15,0.4,0.35,0.45,0.45,1.0,0.375,0.4125,0.25,0.25,0.375
427084000,0.425,0.375,0.375,0.425,0.475,0.425,0.475,0.475,0.4,0.45,0.475,0.5,0.4625,0.5,0.425,0.425,0.275,0.475,0.475,0.425,0.425,0.375,1.0,0.4625,0.375,0.375,0.5
63593006,0.4625,0.3375,0.3375,0.4625,0.4375,0.4625,0.4875,0.4875,0.3625,0.4125,0.4875,0.4625,1.0,0.4625,0.4625,0.3875,0.2375,0.4875,0.4375,0.4625,0.4625,0.4125,0.4625,1.0,0.3375,0.3375,0.4625
164934002,0.3,0.5,0.5,0.3,0.4,0.3,0.35,0.35,0.475,0.425,0.35,0.375,0.3375,0.375,0.3,0.45,0.4,0.35,0.4,0.3,0.3,0.25,0.375,0.3375,1.0,0.5,0.375
59931005,0.3,0.5,0.5,0.3,0.4,0.3,0.35,0.35,0.475,0.425,0.35,0.375,0.3375,0.375,0.3,0.45,0.4,0.35,0.4,0.3,0.3,0.25,0.375,0.3375,0.5,1.0,0.375
17338001,0.425,0.375,0.375,0.425,0.475,0.425,0.475,0.475,0.4,0.45,0.475,0.5,0.4625,1.0,0.425,0.425,0.275,0.475,0.475,0.425,0.425,0.375,0.5,0.4625,0.375,0.375,1.0"""
),
index_col=0,
)
df_weights.index = df_weights.index.map(str)
dx_mapping_scored = pd.read_csv(
StringIO(
"""Dx,SNOMED CT Code,Abbreviation,CPSC,CPSC-Extra,StPetersburg,PTB,PTB-XL,Georgia,Total,Notes
1st degree av block,270492004,IAVB,722,106,0,0,797,769,2394,
atrial fibrillation,164889003,AF,1221,153,2,15,1514,570,3475,
atrial flutter,164890007,AFL,0,54,0,1,73,186,314,
bradycardia,426627000,Brady,0,271,11,0,0,6,288,
complete right bundle branch block,713427006,CRBBB,0,113,0,0,542,28,683,We score 713427006 and 59118001 as the same diagnosis.
incomplete right bundle branch block,713426002,IRBBB,0,86,0,0,1118,407,1611,
left anterior fascicular block,445118002,LAnFB,0,0,0,0,1626,180,1806,
left axis deviation,39732003,LAD,0,0,0,0,5146,940,6086,
left bundle branch block,164909002,LBBB,236,38,0,0,536,231,1041,
low qrs voltages,251146004,LQRSV,0,0,0,0,182,374,556,
nonspecific intraventricular conduction disorder,698252002,NSIVCB,0,4,1,0,789,203,997,
pacing rhythm,10370003,PR,0,3,0,0,296,0,299,
premature atrial contraction,284470004,PAC,616,73,3,0,398,639,1729,We score 284470004 and 63593006 as the same diagnosis.
premature ventricular contractions,427172004,PVC,0,188,0,0,0,0,188,We score 427172004 and 17338001 as the same diagnosis.
prolonged pr interval,164947007,LPR,0,0,0,0,340,0,340,
prolonged qt interval,111975006,LQT,0,4,0,0,118,1391,1513,
qwave abnormal,164917005,QAb,0,1,0,0,548,464,1013,
right axis deviation,47665007,RAD,0,1,0,0,343,83,427,
right bundle branch block,59118001,RBBB,1857,1,2,0,0,542,2402,We score 713427006 and 59118001 as the same diagnosis.
sinus arrhythmia,427393009,SA,0,11,2,0,772,455,1240,
sinus bradycardia,426177001,SB,0,45,0,0,637,1677,2359,
sinus rhythm,426783006,NSR,918,4,0,80,18092,1752,20846,
sinus tachycardia,427084000,STach,0,303,11,1,826,1261,2402,
supraventricular premature beats,63593006,SVPB,0,53,4,0,157,1,215,We score 284470004 and 63593006 as the same diagnosis.
t wave abnormal,164934002,TAb,0,22,0,0,2345,2306,4673,
t wave inversion,59931005,TInv,0,5,1,0,294,812,1112,
ventricular premature beats,17338001,VPB,0,8,0,0,0,357,365,We score 427172004 and 17338001 as the same diagnosis."""
)
)
dx_mapping_scored = dx_mapping_scored.fillna("")
dx_mapping_scored["SNOMED CT Code"] = dx_mapping_scored["SNOMED CT Code"].apply(str)
dx_mapping_unscored = pd.read_csv(
StringIO(
"""Dx,SNOMED CT Code,Abbreviation,CPSC,CPSC-Extra,StPetersburg,PTB,PTB-XL,Georgia,Total
2nd degree av block,195042002,IIAVB,0,21,0,0,14,23,58
abnormal QRS,164951009,abQRS,0,0,0,0,3389,0,3389
accelerated junctional rhythm,426664006,AJR,0,0,0,0,0,19,19
acute myocardial infarction,57054005,AMI,0,0,6,0,0,0,6
acute myocardial ischemia,413444003,AMIs,0,1,0,0,0,1,2
anterior ischemia,426434006,AnMIs,0,0,0,0,44,281,325
anterior myocardial infarction,54329005,AnMI,0,62,0,0,354,0,416
atrial bigeminy,251173003,AB,0,0,3,0,0,0,3
atrial fibrillation and flutter,195080001,AFAFL,0,39,0,0,0,2,41
atrial hypertrophy,195126007,AH,0,2,0,0,0,60,62
atrial pacing pattern,251268003,AP,0,0,0,0,0,52,52
atrial tachycardia,713422000,ATach,0,15,0,0,0,28,43
atrioventricular junctional rhythm,29320008,AVJR,0,6,0,0,0,0,6
av block,233917008,AVB,0,5,0,0,0,74,79
blocked premature atrial contraction,251170000,BPAC,0,2,3,0,0,0,5
brady tachy syndrome,74615001,BTS,0,1,1,0,0,0,2
bundle branch block,6374002,BBB,0,0,1,20,0,116,137
cardiac dysrhythmia,698247007,CD,0,0,0,16,0,0,16
chronic atrial fibrillation,426749004,CAF,0,1,0,0,0,0,1
chronic myocardial ischemia,413844008,CMI,0,161,0,0,0,0,161
complete heart block,27885002,CHB,0,27,0,0,16,8,51
congenital incomplete atrioventricular heart block,204384007,CIAHB,0,0,0,2,0,0,2
coronary heart disease,53741008,CHD,0,0,16,21,0,0,37
decreased qt interval,77867006,SQT,0,1,0,0,0,0,1
diffuse intraventricular block,82226007,DIB,0,1,0,0,0,0,1
early repolarization,428417006,ERe,0,0,0,0,0,140,140
fusion beats,13640000,FB,0,0,7,0,0,0,7
heart failure,84114007,HF,0,0,0,7,0,0,7
heart valve disorder,368009,HVD,0,0,0,6,0,0,6
high t-voltage,251259000,HTV,0,1,0,0,0,0,1
idioventricular rhythm,49260003,IR,0,0,2,0,0,0,2
incomplete left bundle branch block,251120003,ILBBB,0,42,0,0,77,86,205
indeterminate cardiac axis,251200008,ICA,0,0,0,0,156,0,156
inferior ischaemia,425419005,IIs,0,0,0,0,219,451,670
inferior ST segment depression,704997005,ISTD,0,1,0,0,0,0,1
junctional escape,426995002,JE,0,4,0,0,0,5,9
junctional premature complex,251164006,JPC,0,2,0,0,0,0,2
junctional tachycardia,426648003,JTach,0,2,0,0,0,4,6
lateral ischaemia,425623009,LIs,0,0,0,0,142,903,1045
left atrial abnormality,253352002,LAA,0,0,0,0,0,72,72
left atrial enlargement,67741000119109,LAE,0,1,0,0,427,870,1298
left atrial hypertrophy,446813000,LAH,0,40,0,0,0,0,40
left posterior fascicular block,445211001,LPFB,0,0,0,0,177,25,202
left ventricular hypertrophy,164873001,LVH,0,158,10,0,2359,1232,3759
left ventricular strain,370365005,LVS,0,1,0,0,0,0,1
mobitz type i wenckebach atrioventricular block,54016002,MoI,0,0,3,0,0,0,3
myocardial infarction,164865005,MI,0,376,9,368,5261,7,6021
myocardial ischemia,164861001,MIs,0,384,0,0,2175,0,2559
nonspecific st t abnormality,428750005,NSSTTA,0,1290,0,0,381,1883,3554
old myocardial infarction,164867002,OldMI,0,1168,0,0,0,0,1168
paired ventricular premature complexes,251182009,VPVC,0,0,23,0,0,0,23
paroxysmal atrial fibrillation,282825002,PAF,0,0,1,1,0,0,2
paroxysmal supraventricular tachycardia,67198005,PSVT,0,0,3,0,24,0,27
paroxysmal ventricular tachycardia,425856008,PVT,0,0,15,0,0,0,15
r wave abnormal,164921003,RAb,0,1,0,0,0,10,11
rapid atrial fibrillation,314208002,RAF,0,0,0,2,0,0,2
right atrial abnormality,253339007,RAAb,0,0,0,0,0,14,14
right atrial hypertrophy,446358003,RAH,0,18,0,0,99,0,117
right ventricular hypertrophy,89792004,RVH,0,20,0,0,126,86,232
s t changes,55930002,STC,0,1,0,0,770,6,777
shortened pr interval,49578007,SPRI,0,3,0,0,0,2,5
sinoatrial block,65778007,SAB,0,9,0,0,0,0,9
sinus node dysfunction,60423000,SND,0,0,2,0,0,0,2
st depression,429622005,STD,869,57,4,0,1009,38,1977
st elevation,164931005,STE,220,66,4,0,28,134,452
st interval abnormal,164930006,STIAb,0,481,2,0,0,992,1475
supraventricular bigeminy,251168009,SVB,0,0,1,0,0,0,1
supraventricular tachycardia,426761007,SVT,0,3,1,0,27,32,63
suspect arm ecg leads reversed,251139008,ALR,0,0,0,0,0,12,12
transient ischemic attack,266257000,TIA,0,0,7,0,0,0,7
u wave abnormal,164937009,UAb,0,1,0,0,0,0,1
ventricular bigeminy,11157007,VBig,0,5,9,0,82,2,98
ventricular ectopics,164884008,VEB,700,0,49,0,1154,41,1944
ventricular escape beat,75532003,VEsB,0,3,1,0,0,0,4
ventricular escape rhythm,81898007,VEsR,0,1,0,0,0,1,2
ventricular fibrillation,164896001,VF,0,10,0,25,0,3,38
ventricular flutter,111288001,VFL,0,1,0,0,0,0,1
ventricular hypertrophy,266249003,VH,0,5,0,13,30,71,119
ventricular pacing pattern,251266004,VPP,0,0,0,0,0,46,46
ventricular pre excitation,195060002,VPEx,0,6,0,0,0,2,8
ventricular tachycardia,164895002,VTach,0,1,1,10,0,0,12
ventricular trigeminy,251180001,VTrig,0,4,4,0,20,1,29
wandering atrial pacemaker,195101003,WAP,0,0,0,0,0,7,7
wolff parkinson white pattern,74390002,WPW,0,0,4,2,80,2,88"""
)
)
dx_mapping_unscored["SNOMED CT Code"] = dx_mapping_unscored["SNOMED CT Code"].apply(str)
dms = dx_mapping_scored.copy()
dms["scored"] = True
dmn = dx_mapping_unscored.copy()
dmn["Notes"] = ""
dmn["scored"] = False
dx_mapping_all = pd.concat([dms, dmn], ignore_index=True).fillna("")
df_weights_snomed = df_weights # alias
snomed_ct_code_to_abbr = CFG(
{row["SNOMED CT Code"]: row["Abbreviation"] for _, row in dx_mapping_all.iterrows()}
)
abbr_to_snomed_ct_code = CFG({v: k for k, v in snomed_ct_code_to_abbr.items()})
df_weights_abbr = df_weights.copy()
df_weights_abbr.columns = df_weights_abbr.columns.map(
lambda i: snomed_ct_code_to_abbr[i]
)
df_weights_abbr.index = df_weights_abbr.index.map(lambda i: snomed_ct_code_to_abbr[i])
snomed_ct_code_to_fullname = CFG(
{row["SNOMED CT Code"]: row["Dx"] for _, row in dx_mapping_all.iterrows()}
)
fullname_to_snomed_ct_code = CFG({v: k for k, v in snomed_ct_code_to_fullname.items()})
df_weights_fullname = df_weights.copy()
df_weights_fullname.columns = df_weights_fullname.columns.map(
lambda i: snomed_ct_code_to_fullname[i]
)
df_weights_fullname.index = df_weights_fullname.index.map(
lambda i: snomed_ct_code_to_fullname[i]
)
abbr_to_fullname = CFG(
{row["Abbreviation"]: row["Dx"] for _, row in dx_mapping_all.iterrows()}
)
fullname_to_abbr = CFG({v: k for k, v in abbr_to_fullname.items()})
equiv_class_dict = CFG(
{
"CRBBB": "RBBB",
"SVPB": "PAC",
"VPB": "PVC",
"713427006": "59118001",
"63593006": "284470004",
"17338001": "427172004",
"complete right bundle branch block": "right bundle branch block",
"supraventricular premature beats": "premature atrial contraction",
"ventricular premature beats": "premature ventricular contractions",
}
)
def load_weights(
classes: Sequence[Union[int, str]] = None, return_fmt: str = "np"
) -> Union[np.ndarray, pd.DataFrame]:
"""Load the weight matrix of the `classes`.
Parameters
----------
classes : Sequence[int] or Sequence[str], optional
The classes (abbr. or SNOMEDCTCode) to load their weights.
If not given, weights of all classes in `dx_mapping_scored` will be loaded.
equivalent_classes : dict or list, optional
:class:`list` or :class:`dict` of equivalent classes.
If not specified, defaults to `equiv_class_dict`.
return_fmt : {"np", "pd"}, optional
The values in the form of a 2d :class:`~numpy.ndarray`
or a :class:`~pandas.DataFrame`,
by default "np".
Returns
-------
mat : numpy.ndarray or pandas.DataFrame
The weight matrix of the `classes`.
"""
if classes:
l_nc = [normalize_class(c, ensure_scored=True) for c in classes]
assert len(set(l_nc)) == len(classes), "`classes` has duplicates!"
mat = df_weights_abbr.loc[l_nc, l_nc]
else:
mat = df_weights_abbr.copy()
classes = df_weights_abbr.columns.tolist()
if return_fmt.lower() == "np":
mat = mat.values
elif return_fmt.lower() == "pd":
# columns and indices back to the original input format
mat.columns = list(map(str, classes))
mat.index = list(map(str, classes))
else:
raise ValueError(f"format of `{return_fmt}` is not supported!")
return mat
def normalize_class(c: Union[str, int], ensure_scored: bool = False) -> str:
"""Normalize the class name to its abbr.
This function facilitates the computation of the :func:`load_weights` function.
Parameters
----------
c : str or int
Abbr. or SNOMEDCTCode of the class.
ensure_scored : bool, default False
Ensure that the class is a scored class.
If True, ``ValueError`` would be raised if `c` is not scored.
Returns
-------
nc : str
The abbr. of the class.
"""
nc = snomed_ct_code_to_abbr.get(str(c), str(c))
if ensure_scored and nc not in df_weights_abbr.columns:
raise ValueError(f"class `{c}` not among the scored classes")
return nc
def get_class(snomed_ct_code: Union[str, int]) -> Dict[str, str]:
"""Look up the abbreviation and the full name of an ECG arrhythmia,
given its SNOMEDCTCode.
Parameters
----------
snomed_ct_code : str or int
The SNOMEDCTCode of the arrhythmia.
Returns
-------
arrhythmia_class : dict
The :class:`dict` ontaining ``abbr`` (the abbreviation)
and ``fullname`` (the full name of the arrhythmia).
"""
arrhythmia_class = {
"abbr": snomed_ct_code_to_abbr[str(snomed_ct_code)],
"fullname": snomed_ct_code_to_fullname[str(snomed_ct_code)],
}
return arrhythmia_class
def get_class_count(
tranches: Union[str, Sequence[str]],
exclude_classes: Optional[Sequence[str]] = None,
scored_only: bool = False,
normalize: bool = True,
threshold: Optional[Real] = 0,
fmt: str = "a",
) -> Dict[str, int]:
"""Get the number of classes in the `tranches`.
Parameters
----------
tranches : str or Sequence[str]
Tranches to count classes,
can be combinations of "A", "B", "C", "D", "E", "F".
exclude_classes : Sequence[str], optional
Abbrevations or SNOMED CT Codes of classes to be excluded from counting.
scored_only : bool, default True
If True, only scored classes are counted.
normalize : bool, default True
whether collapse equivalent classes into one or not,
used only when `scored_only` is True.
threshold : numbers.Real, default 0
Minimum ratio (0-1) or absolute number (>1) of a class to be counted.
fmt : str, default "a"
Format of the names of the classes in the returned dict,
can be one of the following (case insensitive):
- "a", abbreviations
- "f", full names
- "s", SNOMED CT Code
Returns
-------
class_count : dict
key: class in the format of `fmt`.
value: count of a class in `tranches`.
"""
assert threshold >= 0
tranche_names = CFG(
{
"A": "CPSC",
"B": "CPSC-Extra",
"C": "StPetersburg",
"D": "PTB",
"E": "PTB-XL",
"F": "Georgia",
}
)
tranche_names = [tranche_names[t] for t in tranches]
_exclude_classes = [normalize_class(c) for c in (exclude_classes or [])]
df = dx_mapping_scored.copy() if scored_only else dx_mapping_all.copy()
class_count = CFG()
for _, row in df.iterrows():
key = row["Abbreviation"]
val = row[tranche_names].values.sum()
if val == 0:
continue
if key in _exclude_classes:
continue
if normalize and scored_only:
key = equiv_class_dict.get(key, key)
if key in _exclude_classes:
continue
if key in class_count.keys():
class_count[key] += val
else:
class_count[key] = val
tmp = CFG()
tot_count = sum(class_count.values())
_threshold = threshold if threshold >= 1 else threshold * tot_count
if fmt.lower() == "s":
for key, val in class_count.items():
if val < _threshold:
continue
tmp[abbr_to_snomed_ct_code[key]] = val
class_count = tmp.copy()
elif fmt.lower() == "f":
for key, val in class_count.items():
if val < _threshold:
continue
tmp[abbr_to_fullname[key]] = val
class_count = tmp.copy()
else:
class_count = {
key: val for key, val in class_count.items() if val >= _threshold
}
del tmp
return class_count
def get_class_weight(
tranches: Union[str, Sequence[str]],
exclude_classes: Optional[Sequence[str]] = None,
scored_only: bool = False,
normalize: bool = True,
threshold: Optional[Real] = 0,
fmt: str = "a",
min_weight: Real = 0.5,
) -> Dict[str, int]:
"""Get the weight of each class in each tranche.
Parameters
----------
tranches : str or Sequence[str]
Tranches to count classes,
can be combinations of "A", "B", "C", "D", "E", "F".
exclude_classes : Sequence[str], optional
Abbrevations or SNOMED CT Codes of classes to be excluded from counting.
scored_only: bool, default True
If True, only scored classes are counted.
normalize : bool, default True
Whether collapse equivalent classes into one or not,
used only when `scored_only` is True.
threshold : numbers.Real, default 0
Minimum ratio (0-1) or absolute number (>1) of a class to be counted.
fmt : str, default "a"
Format of the names of the classes in the returned dict,
can be one of the following (case insensitive):
- "a", abbreviations
- "f", full names
- "s", SNOMED CT Code
min_weight : numbers.Real, default 0.5
Minimum value of the weight of all classes,
or equivalently the weight of the largest class.
Returns:
--------
class_weight : dict
- key: class in the format of `fmt`
- value: weight of a class in `tranches`
"""
class_count = get_class_count(
tranches=tranches,
exclude_classes=exclude_classes,
scored_only=scored_only,
normalize=normalize,
threshold=threshold,
fmt=fmt,
)
class_weight = CFG(
{key: sum(class_count.values()) / val for key, val in class_count.items()}
)
class_weight = CFG(
{
key: min_weight * val / min(class_weight.values())
for key, val in class_weight.items()
}
)
return class_weight
dx_cooccurrence_all = pd.read_csv(
StringIO(
""",IAVB,AF,AFL,Brady,CRBBB,IRBBB,LAnFB,LAD,LBBB,LQRSV,NSIVCB,PR,PAC,PVC,LPR,LQT,QAb,RAD,RBBB,SA,SB,NSR,STach,SVPB,TAb,TInv,VPB,IIAVB,abQRS,AJR,AMI,AMIs,AnMIs,AnMI,AB,AFAFL,AH,AP,ATach,AVJR,AVB,BPAC,BTS,BBB,CD,CAF,CMI,CHB,CIAHB,CHD,SQT,DIB,ERe,FB,HF,HVD,HTV,IR,ILBBB,ICA,IIs,ISTD,JE,JPC,JTach,LIs,LAA,LAE,LAH,LPFB,LVH,LVS,MoI,MI,MIs,NSSTTA,OldMI,VPVC,PAF,PSVT,PVT,RAb,RAF,RAAb,RAH,RVH,STC,SPRI,SAB,SND,STD,STE,STIAb,SVB,SVT,ALR,TIA,UAb,VBig,VEB,VEsB,VEsR,VF,VFL,VH,VPP,VPEx,VTach,VTrig,WAP,WPW
IAVB,2394,24,7,16,85,77,148,469,158,15,92,0,77,8,125,119,61,32,84,58,251,614,89,17,223,67,43,3,177,0,0,0,22,23,0,0,3,0,4,0,0,2,1,19,0,0,6,0,0,0,0,0,7,0,0,0,0,0,30,5,29,0,0,0,0,87,6,179,2,24,202,0,0,391,121,166,25,0,0,0,0,0,0,0,7,15,18,0,0,0,38,17,88,0,2,3,0,0,4,93,0,1,0,0,13,2,0,0,1,1,0
AF,24,3475,32,4,104,139,148,528,124,36,102,4,20,19,0,102,68,70,244,2,17,37,13,4,455,110,20,6,313,2,0,0,17,32,0,16,0,3,10,4,9,0,0,11,8,1,10,4,0,3,0,0,3,0,1,0,0,0,29,12,92,0,1,0,1,103,1,7,0,40,355,0,0,606,452,330,79,0,0,1,0,0,0,0,0,10,217,0,0,0,252,10,79,0,1,2,0,0,9,215,0,0,4,0,8,9,0,1,0,1,2
AFL,7,32,314,0,6,11,5,40,3,13,13,0,7,2,1,26,13,6,12,1,4,5,11,4,69,21,5,2,12,0,0,0,11,0,0,0,0,0,0,1,65,0,0,1,1,0,2,0,0,0,0,0,4,0,0,0,0,0,3,0,23,0,0,0,0,16,0,4,0,1,28,0,0,10,7,64,4,0,0,0,0,0,0,0,0,1,11,0,0,0,4,5,14,0,7,1,0,0,0,1,0,0,7,0,0,1,0,0,0,0,0
Brady,16,4,0,288,10,15,0,0,2,1,1,0,5,14,0,1,0,0,1,0,1,0,0,5,4,2,1,2,0,0,0,0,1,1,0,0,0,0,0,0,1,0,0,0,0,0,12,9,0,4,0,0,0,0,0,0,0,0,3,0,1,0,1,0,0,0,0,0,1,0,23,0,0,16,8,111,107,3,0,0,1,0,0,0,0,0,0,0,2,0,2,9,49,0,0,0,1,0,1,10,1,0,0,0,0,0,0,0,0,0,1
CRBBB,85,104,6,10,683,54,162,219,3,0,1,0,30,8,28,1,43,74,14,13,14,364,33,14,9,4,4,2,149,1,0,0,0,6,0,1,0,0,1,1,2,0,0,0,0,0,4,5,0,0,0,0,0,0,0,0,0,0,0,8,11,0,0,0,0,5,0,30,1,86,12,0,0,291,19,43,63,0,0,0,0,0,0,0,3,56,14,0,0,0,14,3,14,0,2,0,0,0,5,40,0,0,0,0,0,0,0,0,0,0,0
IRBBB,77,139,11,15,54,1611,136,340,2,4,14,2,70,6,24,76,45,74,62,69,106,873,109,10,176,39,15,8,160,1,0,0,32,29,0,1,1,9,2,2,1,0,0,0,0,0,1,5,0,0,0,0,0,0,0,0,0,0,1,11,32,0,1,0,0,24,0,75,1,36,92,0,0,258,79,135,41,0,0,1,0,0,0,0,16,81,25,0,0,0,36,5,83,0,4,0,0,0,4,71,0,0,1,0,19,0,0,0,1,0,3
LAnFB,148,148,5,0,162,136,1806,1386,1,9,30,1,78,0,62,39,69,16,9,48,53,1241,107,21,169,42,13,1,353,0,0,1,12,44,0,0,1,1,1,0,0,0,0,3,0,0,0,1,0,0,0,0,1,0,0,0,0,0,5,14,20,0,0,0,0,37,2,73,0,3,184,0,0,718,193,66,0,0,0,0,0,0,0,0,9,25,50,0,0,0,88,5,17,0,1,0,0,0,8,139,0,0,0,0,7,2,0,0,2,0,0
LAD,469,528,40,0,219,340,1386,6086,405,52,246,7,138,0,81,134,148,0,100,208,320,4052,354,76,574,110,54,12,1630,3,0,0,37,125,0,0,0,11,6,0,1,0,0,66,0,0,0,5,0,0,0,0,4,0,0,0,0,0,50,0,50,0,0,0,1,201,6,221,0,8,683,0,0,2227,580,191,0,0,0,2,0,0,0,1,16,41,146,0,0,0,132,21,72,0,8,0,0,0,32,385,0,0,0,0,23,4,0,0,4,1,41
LBBB,158,124,3,2,3,2,1,405,1041,1,4,1,30,4,16,6,5,1,0,17,45,371,55,10,10,23,7,1,12,1,0,0,1,2,0,1,2,1,2,0,0,0,0,91,0,0,1,4,0,0,0,0,0,0,0,0,0,0,15,11,3,0,0,0,0,12,2,93,2,1,31,0,0,41,12,32,20,0,0,0,0,0,0,0,2,2,4,0,0,0,4,4,12,0,2,1,0,0,2,62,0,0,0,0,2,0,0,0,0,0,0
LQRSV,15,36,13,1,0,4,9,52,1,556,18,1,40,0,5,36,4,2,1,16,59,154,87,0,128,24,9,0,12,1,0,0,15,0,0,0,0,0,2,0,6,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,6,18,0,0,0,1,34,0,8,0,0,2,0,0,47,9,83,0,0,0,0,0,0,0,0,2,1,13,0,0,0,9,5,25,0,5,1,0,0,0,10,0,0,0,0,0,1,0,0,0,0,0
NSIVCB,92,102,13,1,1,14,30,246,4,18,997,1,39,1,23,35,83,31,2,24,70,589,56,4,160,46,8,2,186,0,0,0,8,32,0,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2,7,23,0,0,0,0,49,3,62,1,11,154,0,0,353,180,53,0,0,0,0,0,0,0,0,7,1,35,1,0,0,86,2,26,0,3,0,0,0,4,81,0,1,0,0,1,0,0,0,0,0,0
PR,0,4,0,0,0,2,1,7,1,1,1,299,1,0,0,0,0,3,0,0,2,5,0,0,0,1,0,0,2,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,2,0,0,5,2,4,1,0,0,0,0,0,0,0,0,1,1,0,0,0,2,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0
PAC,77,20,7,5,30,70,78,138,30,40,39,1,1729,14,24,96,46,4,89,16,100,351,140,2,229,77,27,2,13,0,0,0,16,9,3,0,6,1,2,0,1,3,0,8,0,0,1,2,0,3,0,0,1,1,0,0,0,0,12,8,31,0,1,0,0,70,3,63,0,4,153,0,0,120,63,188,17,1,0,1,1,0,0,1,4,6,26,0,0,0,76,11,69,0,1,0,0,0,22,61,0,0,0,0,8,1,0,0,5,1,1
PVC,8,19,2,14,8,6,0,0,4,0,1,0,14,188,0,0,0,1,0,1,4,0,15,1,0,0,2,0,0,0,0,0,0,6,0,1,0,0,2,1,2,0,0,0,0,0,5,4,0,0,0,1,0,0,0,0,0,0,3,0,0,0,2,0,1,0,0,0,2,0,10,0,0,39,14,78,49,0,0,0,0,0,0,0,2,2,0,0,3,0,3,1,12,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0
LPR,125,0,1,0,28,24,62,81,16,5,23,0,24,0,340,0,37,1,0,5,8,319,13,0,75,17,0,0,3,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,3,1,0,0,0,0,2,0,35,0,5,66,0,0,131,60,15,0,0,0,0,0,0,0,0,1,1,12,0,0,0,59,0,0,0,0,0,0,0,1,36,0,0,0,0,0,0,0,0,2,0,0
LQT,119,102,26,1,1,76,39,134,6,36,35,0,96,0,0,1513,112,18,13,37,104,96,75,2,521,140,68,5,13,7,0,1,61,2,0,0,13,12,3,0,9,0,0,1,0,0,0,1,0,0,0,0,4,0,0,0,0,0,22,1,101,0,1,0,1,147,11,187,0,5,231,0,0,23,11,403,0,0,0,0,0,2,0,3,3,25,9,0,0,0,15,23,936,0,5,2,0,0,0,15,0,0,2,0,18,2,0,0,1,0,0
QAb,61,68,13,0,43,45,69,148,5,4,83,0,46,0,37,112,1013,16,18,29,88,452,73,0,458,138,16,1,2,1,0,0,86,23,0,0,0,1,1,0,2,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,5,10,124,0,0,0,0,170,1,40,0,8,127,0,0,421,84,65,1,0,0,0,0,0,0,0,1,3,16,0,0,0,102,0,84,0,2,1,0,0,1,38,0,0,0,0,12,1,1,0,1,0,0
RAD,32,70,6,0,74,74,16,0,1,2,31,3,4,1,1,18,16,427,1,24,10,226,46,5,34,9,6,1,145,0,0,0,7,8,0,0,1,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,19,0,0,0,0,3,1,22,0,102,12,0,0,156,26,33,0,0,0,1,0,0,0,2,18,47,4,1,0,0,6,2,11,0,0,3,0,0,3,30,0,0,0,0,13,0,0,0,0,0,3
RBBB,84,244,12,1,14,62,9,100,0,1,2,0,89,0,0,13,18,1,2402,16,88,0,61,0,72,47,18,2,0,1,0,0,13,0,0,0,4,6,5,0,1,0,0,25,0,0,0,3,0,0,0,0,2,0,0,0,0,0,2,0,42,0,0,0,1,70,1,53,0,4,36,0,0,1,0,45,1,0,0,0,0,0,0,1,0,10,0,0,0,0,22,22,17,0,0,1,2,0,1,54,0,0,2,0,4,1,0,0,0,2,0
SA,58,2,1,0,13,69,48,208,17,16,24,0,16,1,5,37,29,24,16,1240,123,428,0,1,132,27,6,1,128,0,0,0,10,9,0,0,2,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,11,0,0,0,1,0,3,1,19,0,0,0,0,34,0,40,0,6,114,0,0,158,53,82,4,0,0,0,0,0,0,0,2,7,15,0,0,0,16,7,27,0,1,2,1,0,3,123,0,0,0,0,4,1,0,0,0,0,3
SB,251,17,4,1,14,106,53,320,45,59,70,2,100,4,8,104,88,10,88,123,2359,353,1,1,318,114,25,6,85,0,0,0,47,9,0,0,0,1,0,0,2,0,0,9,0,0,3,5,0,0,0,0,43,0,0,0,0,0,12,9,45,0,2,0,0,110,12,106,0,5,265,0,0,109,56,257,12,0,0,0,0,0,0,0,1,10,20,1,0,0,30,21,70,0,0,0,0,0,3,6,1,0,0,0,7,2,1,0,0,1,3
NSR,614,37,5,0,364,873,1241,4052,371,154,589,5,351,0,319,96,452,226,0,428,353,20846,236,36,1784,223,0,1,2629,0,0,0,22,265,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,47,134,142,0,0,0,0,87,0,343,0,107,1721,0,0,3789,1385,220,2,0,0,2,0,0,0,0,62,103,410,1,0,0,688,23,0,0,3,0,0,0,38,662,0,0,0,0,23,0,0,0,11,0,66
STach,89,13,11,0,33,109,107,354,55,87,56,0,140,15,13,75,73,46,61,0,1,236,2402,8,409,130,67,9,151,0,0,0,44,28,0,2,26,3,5,0,1,0,1,13,0,0,13,1,0,5,0,0,8,5,0,1,0,0,24,8,76,0,2,0,2,111,24,195,9,17,241,0,1,302,120,424,165,4,0,1,2,1,0,7,34,24,60,1,0,0,90,36,84,0,3,2,0,0,13,108,0,0,0,0,15,4,0,0,6,0,0
SVPB,17,4,4,5,14,10,21,76,10,0,4,0,2,1,0,2,0,5,0,1,1,36,8,215,13,0,0,0,50,0,0,0,1,5,0,2,1,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,3,0,2,0,1,0,0,3,0,4,3,3,23,0,0,65,24,25,8,2,0,3,0,0,0,0,2,3,3,0,0,0,1,0,5,1,0,0,0,0,3,11,1,0,0,0,0,0,0,1,2,0,6
TAb,223,455,69,4,9,176,169,574,10,128,160,0,229,0,75,521,458,34,72,132,318,1784,409,13,4673,413,88,11,85,10,0,0,255,37,0,2,18,14,14,0,27,0,0,2,0,0,0,6,0,0,0,0,16,0,0,0,0,0,34,21,412,0,0,0,1,692,5,226,0,12,536,0,0,348,163,418,12,0,0,1,0,2,0,2,5,22,63,0,0,0,370,30,364,0,16,3,0,0,7,184,0,0,0,0,23,5,0,0,4,3,4
TInv,67,110,21,2,4,39,42,110,23,24,46,1,77,0,17,140,138,9,47,27,114,223,130,0,413,1112,34,1,0,4,1,0,45,12,0,1,7,3,3,0,8,0,0,11,0,0,0,1,0,0,0,0,9,0,0,0,0,0,11,4,70,0,0,0,0,129,2,95,0,6,226,0,0,179,151,391,2,0,0,0,0,0,0,0,2,9,1,0,0,0,157,22,78,0,6,2,0,0,3,34,0,0,0,0,13,0,0,0,0,0,0
VPB,43,20,5,1,4,15,13,54,7,9,8,0,27,2,0,68,16,6,18,6,25,0,67,0,88,34,365,1,0,3,0,0,5,0,0,0,2,1,2,0,2,0,0,5,0,0,0,0,0,0,0,0,3,0,0,0,0,0,11,0,15,0,1,0,0,40,2,42,0,1,43,0,0,4,0,93,1,0,0,0,0,0,0,1,0,5,0,0,0,0,1,7,48,0,1,1,0,0,1,3,0,0,0,0,6,7,0,0,0,0,0
IIAVB,3,6,2,2,2,8,1,12,1,0,2,0,2,0,0,5,1,1,2,1,6,1,9,0,11,1,1,58,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,4,0,0,1,1,9,0,0,1,6,7,1,0,0,0,0,0,0,0,0,1,2,0,0,0,0,0,4,0,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0
abQRS,177,313,12,0,149,160,353,1630,12,12,186,2,13,0,3,13,2,145,0,128,85,2629,151,50,85,0,0,2,3389,0,0,0,12,158,0,0,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,19,1,26,0,0,0,0,55,0,86,0,44,383,0,0,2644,464,45,0,0,0,2,0,0,0,0,19,26,37,0,0,0,6,0,0,0,0,0,0,0,6,251,0,0,0,0,6,0,0,0,3,0,41
AJR,0,2,0,0,1,1,0,3,1,1,0,0,0,0,0,7,1,0,1,0,0,0,0,0,10,4,3,0,0,19,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,4,0,0,0,0,2,0,0,0,2,2,0,0,0,0,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,6,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0
AMI,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,6,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0
AMIs,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
AnMIs,22,17,11,1,0,32,12,37,1,15,8,0,16,0,0,61,86,7,13,10,47,22,44,1,255,45,5,0,12,0,0,0,325,2,0,0,1,4,2,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,27,0,0,0,1,17,2,24,0,0,32,0,0,27,6,32,0,0,0,0,0,0,0,1,0,1,0,0,0,0,3,2,40,0,0,0,0,0,0,2,0,0,0,0,5,1,0,0,0,0,0
AnMI,23,32,0,1,6,29,44,125,2,0,32,1,9,6,4,2,23,8,0,9,9,265,28,5,37,12,0,0,158,0,0,0,2,416,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,4,4,0,0,0,0,6,0,8,1,8,45,0,0,159,55,26,2,0,0,0,0,0,0,0,5,4,7,0,1,0,31,14,13,0,0,0,0,0,5,24,0,0,0,0,0,0,0,0,0,0,0
AB,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0
AFAFL,0,16,0,0,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,2,2,2,1,0,0,0,0,0,0,0,0,0,41,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,5,0,0,0,0,13,12,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0
AH,3,0,0,0,0,1,1,0,2,0,0,0,6,0,0,13,0,1,4,2,0,0,26,1,18,7,2,0,0,0,0,0,1,0,0,0,62,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,4,0,0,0,1,7,0,0,0,1,17,0,0,0,0,16,1,0,0,0,0,0,0,0,0,5,0,0,0,0,2,1,8,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0
AP,0,3,0,0,0,9,1,11,1,0,3,0,1,0,0,12,1,2,6,1,1,0,3,0,14,3,1,0,0,0,0,0,4,0,0,0,0,52,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,4,0,0,0,2,4,0,0,0,0,8,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,10,0,0,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0
ATach,4,10,0,0,1,2,1,6,2,2,2,0,2,2,0,3,1,0,5,0,0,0,5,0,14,3,2,0,0,0,0,0,2,1,0,2,0,0,43,0,1,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,5,0,0,0,0,1,1,1,0,0,3,0,0,0,0,14,5,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,4,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0
AVJR,0,4,1,0,1,2,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,6,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
AVB,0,9,65,1,2,1,0,1,0,6,0,0,1,2,0,9,2,1,1,0,2,0,1,0,27,8,2,1,0,0,0,0,1,0,0,0,0,0,1,0,79,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,11,0,0,0,0,3,0,0,0,0,12,0,0,0,0,27,2,0,0,0,0,0,0,0,0,1,0,0,1,0,1,1,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
BPAC,2,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,5,0,0,0,0,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0
BTS,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0
BBB,19,11,1,0,0,0,3,66,91,0,0,0,8,0,0,1,0,1,25,1,9,0,13,0,2,11,5,0,0,0,0,0,0,0,0,0,2,2,2,0,0,0,0,137,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,2,43,0,0,0,0,0,2,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1
CD,0,8,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,16,0,0,0,2,3,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
CAF,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
CMI,6,10,2,12,4,1,0,0,1,0,0,0,1,5,0,0,0,0,0,1,3,0,13,1,0,0,0,3,0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,161,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,65,2,22,82,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0
CHB,0,4,0,9,5,5,1,5,4,2,0,0,2,4,0,1,3,1,3,0,5,3,1,0,6,1,0,0,4,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,51,0,0,0,0,0,0,0,0,0,0,3,0,0,0,2,0,0,2,0,1,0,1,3,0,0,10,5,8,2,0,0,0,0,0,0,0,0,0,1,0,0,0,1,1,1,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0
CIAHB,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
CHD,0,3,0,4,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,5,1,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,3,0,2,3,0,0,0,0,37,0,0,0,4,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,10,0,0,9,0,0,0,8,0,1,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,10,0,0,1,0,2,0,0,2,1,0,0
SQT,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
DIB,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
ERe,7,3,4,0,0,0,1,4,0,0,1,0,1,0,0,4,0,1,2,11,43,0,8,0,16,9,3,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,140,0,0,0,0,0,0,0,2,0,0,0,0,2,0,8,0,1,35,0,0,0,0,12,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,36,3,0,0,1,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0
FB,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,4,0,0,0,7,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,4,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0
HF,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,7,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0
HVD,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,6,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
HTV,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
IR,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0
ILBBB,30,29,3,3,0,1,5,50,15,0,2,0,12,3,3,22,5,0,2,3,12,47,24,3,34,11,11,3,19,1,0,0,1,3,0,0,0,0,2,0,0,0,0,1,0,0,0,3,0,0,0,0,0,0,0,0,0,0,205,0,4,0,1,0,0,28,0,16,0,0,33,0,0,41,31,43,16,0,0,0,0,0,0,0,0,0,2,1,0,0,3,2,21,0,0,0,0,0,0,13,0,0,0,0,3,0,0,0,0,0,0
ICA,5,12,0,0,8,11,14,0,11,6,7,1,8,0,3,1,10,0,0,1,9,134,8,0,21,4,0,0,1,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,156,0,0,0,0,0,0,0,7,0,0,21,0,0,25,12,8,0,0,0,0,0,0,0,0,3,2,6,0,0,0,11,3,0,0,0,0,0,0,3,7,0,0,0,0,1,0,0,0,0,0,0
IIs,29,92,23,1,11,32,20,50,3,18,23,0,31,0,1,101,124,19,42,19,45,142,76,2,412,70,15,0,26,4,0,0,27,4,0,0,4,1,5,0,11,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,4,0,670,0,1,0,0,22,0,39,0,12,73,0,0,39,73,44,0,0,0,0,0,0,0,1,3,14,3,0,0,0,23,3,74,0,2,3,0,0,1,16,0,0,0,0,6,3,0,0,0,1,1
ISTD,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
JE,0,1,0,1,0,1,0,0,0,0,0,0,1,2,0,1,0,0,0,0,2,0,2,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,1,0,9,0,0,0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
JPC,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
JTach,0,1,0,0,0,0,0,1,0,1,0,0,0,1,0,1,0,0,1,0,0,0,2,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,6,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
LIs,87,103,16,0,5,24,37,201,12,34,49,0,70,0,2,147,170,3,70,34,110,87,111,3,692,129,40,4,55,2,0,0,17,6,0,1,7,4,1,0,3,0,0,2,0,0,0,2,0,0,0,0,2,0,0,0,0,0,28,0,22,0,0,0,0,1045,6,97,0,1,132,0,0,89,12,73,0,0,0,0,0,0,0,1,1,8,1,0,0,0,12,9,104,0,3,2,0,0,1,22,0,0,0,0,5,3,0,0,0,1,0
LAA,6,1,0,0,0,0,2,6,2,0,3,0,3,0,0,11,1,1,1,0,12,0,24,0,5,2,2,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,6,72,0,0,1,13,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,5,3,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0
LAE,179,7,4,0,30,75,73,221,93,8,62,0,63,0,35,187,40,22,53,40,106,343,195,4,226,95,42,0,86,0,0,1,24,8,0,0,0,0,1,0,0,0,0,43,0,0,0,1,0,0,0,0,8,0,0,0,0,0,16,7,39,0,0,0,0,97,0,1298,0,12,189,1,0,162,97,200,0,0,0,0,0,1,0,0,16,9,19,0,0,0,52,7,156,0,1,0,0,0,1,56,0,0,0,0,34,0,0,0,0,0,0
LAH,2,0,0,1,1,1,0,0,2,0,1,0,0,2,0,0,0,0,0,0,0,0,9,3,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,40,0,5,0,0,9,3,17,21,0,0,0,0,0,0,0,3,4,0,0,0,0,1,1,3,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0
LPFB,24,40,1,0,86,36,3,8,1,0,11,0,4,0,5,5,8,102,4,6,5,107,17,3,12,6,1,1,44,2,0,0,0,8,0,0,1,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,12,0,0,0,0,1,1,12,0,202,5,0,0,79,9,10,0,0,0,0,0,0,0,0,12,29,6,0,0,0,7,0,3,0,0,1,0,0,0,11,0,0,0,0,0,1,0,0,0,0,0
LVH,202,355,28,23,12,92,184,683,31,2,154,2,153,10,66,231,127,12,36,114,265,1721,241,23,536,226,43,9,383,2,0,0,32,45,3,5,17,4,3,2,12,4,0,0,0,1,0,3,0,10,0,0,35,3,0,0,0,0,33,21,73,1,0,0,0,132,13,189,5,5,3759,1,0,691,1225,400,23,4,0,1,2,1,0,1,6,18,100,1,2,0,404,39,178,0,6,0,0,0,11,219,0,0,0,0,60,2,0,0,3,0,1
LVS,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
MoI,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0
MI,391,606,10,16,291,258,718,2227,41,47,353,5,120,39,131,23,421,156,1,158,109,3789,302,65,348,179,4,1,2644,0,0,0,27,159,0,0,0,0,0,0,0,0,0,2,0,0,65,10,0,9,0,0,0,1,0,0,0,0,41,25,39,0,0,0,0,89,0,162,9,79,691,0,0,6021,805,195,60,2,1,0,3,1,2,0,30,22,105,0,3,0,370,20,21,0,5,0,0,0,24,481,0,0,25,0,6,0,0,8,6,0,7
MIs,121,452,7,8,19,79,193,580,12,9,180,2,63,14,60,11,84,26,0,53,56,1385,120,24,163,151,0,6,464,0,0,0,6,55,0,0,0,0,0,0,0,0,0,0,0,0,2,5,0,0,0,0,0,0,0,0,0,0,31,12,73,0,0,0,1,12,0,97,3,9,1225,0,0,805,2559,183,69,0,0,0,0,0,0,0,16,6,45,0,0,0,462,2,0,0,6,0,0,0,12,212,0,0,1,0,9,0,0,0,3,0,0
NSSTTA,166,330,64,111,43,135,66,191,32,83,53,4,188,78,15,403,65,33,45,82,257,220,424,25,418,391,93,7,45,8,0,1,32,26,0,13,16,8,14,3,27,0,0,1,0,1,22,8,0,0,0,0,12,0,0,0,0,0,43,8,44,0,5,1,2,73,9,200,17,10,400,1,0,195,183,3554,212,0,0,1,0,2,0,1,11,26,32,3,2,0,150,33,277,0,12,1,0,1,3,34,1,0,1,0,17,2,4,1,2,1,0
OldMI,25,79,4,107,63,41,0,0,20,0,0,1,17,49,0,0,1,0,1,4,12,2,165,8,12,2,1,1,0,0,0,0,0,2,0,12,1,0,5,0,2,0,0,0,0,0,82,2,0,0,1,0,0,0,0,0,0,0,16,0,0,1,0,0,2,0,0,0,21,0,23,0,0,60,69,212,1168,0,0,0,0,0,0,0,11,12,0,0,0,0,20,7,23,0,1,0,0,0,1,0,0,0,1,0,2,0,1,0,2,0,0
VPVC,0,0,0,3,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,4,2,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,8,0,0,0,4,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,4,0,1,2,0,0,0,23,0,3,6,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,3,13,1,0,0,0,0,0,0,0,1,0,1
PAF,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
PSVT,0,1,0,0,0,1,0,2,0,0,0,0,1,0,0,0,0,1,0,0,0,2,1,3,1,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,3,0,27,0,0,0,0,0,0,6,0,0,1,1,0,0,0,9,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0
PVT,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,2,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,3,0,0,0,6,0,0,15,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,2,8,0,0,0,0,0,0,0,0,0,0,0
RAb,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,1,0,2,0,0,0,0,0,11,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
RAF,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
RAAb,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,3,0,2,1,0,0,0,7,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,14,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0
RAH,7,0,0,0,3,16,9,16,2,2,7,0,4,2,1,3,1,18,0,2,1,62,34,2,5,2,0,0,19,0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,3,0,0,0,0,1,0,16,3,12,6,0,0,30,16,11,11,0,0,0,0,0,0,0,117,21,4,0,0,0,10,0,0,0,0,0,0,0,1,5,0,0,0,0,0,0,0,0,0,0,0
RVH,15,10,1,0,56,81,25,41,2,1,1,1,6,2,1,25,3,47,10,7,10,103,24,3,22,9,5,1,26,0,0,0,1,4,0,0,5,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,14,0,1,0,0,8,0,9,4,29,18,0,0,22,6,26,12,0,0,0,0,0,0,0,21,232,0,0,0,0,1,1,20,0,0,1,0,0,0,4,0,0,0,0,17,0,0,0,0,0,0
STC,18,217,11,0,14,25,50,146,4,13,35,1,26,0,12,9,16,4,0,15,20,410,60,3,63,1,0,2,37,0,0,0,0,7,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,2,6,3,0,0,0,0,1,0,19,0,6,100,0,0,105,45,32,0,0,0,6,0,0,0,0,4,0,777,0,0,0,78,3,4,0,15,0,0,0,4,57,0,0,0,0,1,0,0,0,2,0,0
SPRI,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
SAB,0,0,0,2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
SND,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
STD,38,252,4,2,14,36,88,132,4,9,86,2,76,3,59,15,102,6,22,16,30,688,90,1,370,157,1,0,6,0,0,0,3,31,0,0,2,0,0,1,1,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,3,11,23,1,0,0,2,12,1,52,1,7,404,0,0,370,462,150,20,1,0,1,0,0,0,0,10,1,78,0,0,0,1977,15,6,0,4,0,1,0,3,112,0,0,0,0,2,0,0,0,1,0,2
STE,17,10,5,9,3,5,5,21,4,5,2,1,11,1,0,23,0,2,22,7,21,23,36,0,30,22,7,0,0,0,2,0,2,14,0,0,1,1,0,0,1,0,0,0,0,0,1,1,0,0,0,0,36,0,0,0,0,1,2,3,3,0,1,0,0,9,5,7,1,0,39,0,0,20,2,33,7,1,0,0,0,1,0,0,0,1,3,0,1,0,15,452,18,0,0,0,0,0,0,4,0,0,0,0,4,1,0,0,0,0,0
STIAb,88,79,14,49,14,83,17,72,12,25,26,0,69,12,0,936,84,11,17,27,70,0,84,5,364,78,48,4,0,6,0,1,40,13,0,1,8,10,4,0,5,1,1,0,0,0,0,1,0,0,0,1,3,0,0,0,1,0,21,0,74,0,0,0,1,104,3,156,3,3,178,0,0,21,0,277,23,0,0,0,0,0,0,3,0,20,4,1,0,0,6,18,1475,0,0,2,2,0,1,7,0,0,0,0,7,2,0,0,1,0,0
SVB,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0
SVT,2,1,7,0,2,4,1,8,2,5,3,0,1,0,0,5,2,0,0,1,0,3,3,0,16,6,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,3,0,1,0,0,6,0,0,5,6,12,1,0,0,9,0,0,0,0,0,0,15,0,0,0,4,0,0,0,63,0,1,0,0,7,0,0,0,0,0,0,0,0,0,0,0
ALR,3,2,1,0,0,0,0,0,1,1,0,0,0,0,0,2,1,3,1,2,0,0,2,0,3,2,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,0,0,2,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,0,12,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
TIA,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,0,1,0,7,0,1,7,0,0,0,0,0,0,0,0,0,0,0
UAb,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0
VBig,4,9,0,1,5,4,8,32,2,0,4,0,22,0,1,0,1,3,1,3,3,38,13,3,7,3,1,0,6,0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,3,1,0,0,0,0,1,0,1,1,0,11,0,0,24,12,3,1,3,0,0,2,0,0,0,1,0,4,0,0,0,3,0,1,0,0,0,1,0,98,48,0,0,0,0,0,0,0,0,2,0,0
VEB,93,215,1,10,40,71,139,385,62,10,81,2,61,0,36,15,38,30,54,123,6,662,108,11,184,34,3,2,251,0,4,0,2,24,1,0,0,0,1,0,0,1,1,0,0,0,0,0,0,10,0,0,0,4,0,0,0,1,13,7,16,0,0,0,0,22,0,56,0,11,219,0,1,481,212,34,0,13,0,2,8,0,0,0,5,4,57,0,0,0,112,4,7,1,7,0,7,0,48,1944,0,0,0,0,0,0,0,0,10,0,2
VEsB,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0
VEsR,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0
VF,0,4,7,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,25,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,38,0,1,0,0,1,0,0,0
VFL,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0
VH,13,8,0,0,0,19,7,23,2,0,1,0,8,0,0,18,12,13,4,4,7,23,15,0,23,13,6,0,6,0,0,0,5,0,0,2,2,2,0,0,0,0,0,0,0,0,1,0,0,2,0,0,2,0,2,0,0,0,3,1,6,0,0,0,0,5,1,34,1,0,60,0,0,6,9,17,2,0,0,0,0,0,0,0,0,17,1,0,0,0,2,4,7,0,0,0,0,0,0,0,0,0,1,0,119,0,0,0,0,0,0
VPP,2,9,1,0,0,0,2,4,0,1,0,0,1,0,0,2,1,0,1,1,2,0,4,0,5,0,7,0,0,2,0,0,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,3,0,0,0,1,2,0,0,0,0,2,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,46,0,0,0,0,0
VPEx,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,0
VTach,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,12,0,0,0
VTrig,1,0,0,0,0,1,2,4,0,0,0,0,5,0,2,1,1,0,0,0,0,11,6,2,4,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,6,3,2,2,1,0,0,0,0,0,0,0,0,2,0,0,0,1,0,1,0,0,0,0,0,2,10,0,0,0,0,0,0,0,0,29,0,0
WAP,1,1,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,7,0
WPW,0,2,0,1,0,3,0,41,0,0,0,0,1,0,0,0,0,3,0,3,3,66,0,6,4,0,0,0,41,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,7,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,88
"""
),
index_col=0,
)
dx_cooccurrence_scored = dx_cooccurrence_all.loc[
dx_mapping_scored.Abbreviation, dx_mapping_scored.Abbreviation
]
def get_cooccurrence(
c1: Union[str, int], c2: Union[str, int], ensure_scored: bool = False
) -> int:
"""Get the co-occurrence count between two diagnoses.
Parameters
----------
c1, c2 : str or int
The 2 classes.
ensure_scored : bool, default False
Whether ensure that the class is a scored class or not.
If True, ``ValueError`` would be raised if `c` is not scored.
Returns
-------
cooccurrence : int
Cooccurrence of class `c1` and `c2`, if they are not the same class;
otherwise the occurrence of the class `c1` (also `c2`).
"""
_c1 = normalize_class(c1, ensure_scored=ensure_scored)
_c2 = normalize_class(c2, ensure_scored=ensure_scored)
cooccurrence = dx_cooccurrence_all.loc[_c1, _c2].item()
return cooccurrence
"""
dx_cooccurrence_all is obtained via the following code
>>> db_dir = "/media/cfs/wenhao71/data/cinc2020_data/"
>>> working_dir = "./working_dir"
>>> dr = CINC2020Reader(db_dir=db_dir,working_dir=working_dir)
>>> dx_cooccurrence_all = pd.DataFrame(np.zeros((len(dx_mapping_all.Abbreviation), len(dx_mapping_all.Abbreviation)),dtype=int), columns=dx_mapping_all.Abbreviation.values)
>>> dx_cooccurrence_all.index = dx_mapping_all.Abbreviation.values
>>> for tranche, l_rec in dr.all_records.items():
... for rec in l_rec:
... ann = dr.load_ann(rec)
... d = ann["diagnosis"]["diagnosis_abbr"]
... for item in d:
... mat_cooccurance.loc[item,item] += 1
... for i in range(len(d)-1):
... for j in range(i+1,len(d)):
... mat_cooccurance.loc[d[i],d[j]] += 1
... mat_cooccurance.loc[d[j],d[i]] += 1
the diagonal entries are total occurrence of corresponding arrhythmias in the dataset
"""
|
f8bb3a9d60ca94e028e7202a52f8c92e2380fada
|
0ba2e5061577f6286ff9265ef1df9aca96769445
|
/sort/python/insertion_sort.py
|
6d142d3ef3a3e156e7c6710aba732c12573d4254
|
[
"CC0-1.0"
] |
permissive
|
ZoranPandovski/al-go-rithms
|
68d5d02f80a61de9baf8e50a81a52e7d0b3983a0
|
4ae6ba54e90af14af236e03e435eb0402dcac787
|
refs/heads/master
| 2023-09-04T16:04:04.321676
| 2023-06-06T15:22:16
| 2023-06-06T15:22:16
| 93,438,176
| 1,421
| 2,445
|
CC0-1.0
| 2023-06-15T14:24:28
| 2017-06-05T19:20:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,282
|
py
|
insertion_sort.py
|
"""
This is a pure python implementation of the insertion sort algorithm
For doctests run following command:
python -m doctest -v insertion_sort.py
or
python3 -m doctest -v insertion_sort.py
For manual testing run:
python insertion_sort.py
"""
from __future__ import print_function
def insertion_sort(collection):
"""Pure implementation of the insertion sort algorithm in Python
:param collection: some mutable ordered collection with heterogeneous
comparable items inside
:return: the same collection ordered by ascending
Examples:
>>> insertion_sort([0, 5, 3, 2, 2])
[0, 2, 2, 3, 5]
>>> insertion_sort([])
[]
>>> insertion_sort([-2, -5, -45])
[-45, -5, -2]
"""
for index in range(1, len(collection)):
while index > 0 and collection[index - 1] > collection[index]:
collection[index], collection[index - 1] = collection[index - 1], collection[index]
index -= 1
return collection
if __name__ == '__main__':
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
user_input = input('Enter numbers separated by a comma:\n').strip()
unsorted = [int(item) for item in user_input.split(',')]
print(insertion_sort(unsorted))
|
032df4b5f4c46005cfe2c08037fecf98d9c7e594
|
5105403f2b75990654519438d8ceabcf80962ebf
|
/tests/support/plugins/selenium.py
|
ffc614b712e85272e3e7b755534e648bb80068f1
|
[
"BSD-3-Clause"
] |
permissive
|
bokeh/bokeh
|
ed1d81eb07d27d27c6710c9fec9114886047f528
|
310cb2cbeabc4c4b8180cbda566df16039737cdc
|
refs/heads/branch-3.3
| 2023-08-31T23:53:06.537061
| 2023-08-30T03:43:05
| 2023-08-30T03:43:05
| 3,834,332
| 17,174
| 5,251
|
BSD-3-Clause
| 2023-09-14T11:37:23
| 2012-03-26T15:40:01
|
Python
|
UTF-8
|
Python
| false
| false
| 5,234
|
py
|
selenium.py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2023, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Define a Pytest plugin for selenium webdrivers.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import (
TYPE_CHECKING,
Callable,
Iterator,
NoReturn,
Sequence,
)
from warnings import warn
# External imports
import pytest
if TYPE_CHECKING:
import py
from _pytest import config, nodes
from selenium.webdriver.remote.webdriver import WebDriver
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'driver',
'has_no_console_errors',
'pytest_report_collectionfinish',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def pytest_report_collectionfinish(config: config.Config, startdir: py.path.local, items: Sequence[nodes.Item]) -> list[str]:
'''
'''
driver_name: str = config.getoption('driver', 'chrome').lower()
asserts = "ON" if driver_name == "chrome" else "OFF"
return ["", f"Bokeh selenium tests using {driver_name!r} driver (no-console-error assertions: {asserts})"]
@pytest.fixture(scope="session")
def driver(pytestconfig: config.Config) -> Iterator[WebDriver]:
''' Select and configure a Selenium webdriver for integration tests.
'''
driver_name: str = pytestconfig.getoption('driver', 'chrome').lower()
def chrome() -> WebDriver:
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.webdriver import WebDriver as Chrome
options = Options()
options.add_argument("--headless")
options.add_argument("--no-sandbox")
options.add_argument("--window-size=1920x1080")
return Chrome(options=options)
def firefox() -> WebDriver:
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.firefox.webdriver import WebDriver as Firefox
options = Options()
options.add_argument("--headless")
options.add_argument("--window-size=1920x1080")
return Firefox(options=options)
def safari() -> WebDriver:
from selenium.webdriver.safari.webdriver import WebDriver as Safari
return Safari()
driver: WebDriver
if driver_name == "chrome":
driver = chrome()
elif driver_name == "firefox":
driver = firefox()
elif driver_name == "safari":
driver = safari()
else:
raise ValueError("expected 'chrome', 'firefox' or 'safari'")
driver.implicitly_wait(10)
yield driver
driver.quit()
@pytest.fixture(scope="session")
def has_no_console_errors(pytestconfig: config.Config) -> Callable[[WebDriver], bool | NoReturn]:
''' Provide a function to assert no browser console errors are present.
Unfortunately logs are only accessibly with Chrome web driver, see e.g.
https://github.com/mozilla/geckodriver/issues/284
For non-Chrome webdrivers this check always returns True.
'''
driver_name: str = pytestconfig.getoption('driver').lower()
if driver_name == "chrome":
def func(driver: WebDriver) -> bool | NoReturn:
logs = driver.get_log('browser')
severe_errors = [x for x in logs if x.get('level') == 'SEVERE']
non_network_errors = [l for l in severe_errors if l.get('type') != 'network']
if len(non_network_errors) == 0:
if len(severe_errors) != 0:
warn(f"There were severe network errors (this may or may not have affected your test): {severe_errors}")
return True
# XXX: no return should be needed with NoReturn type (type-checker bug?)
return pytest.fail(f"Console errors: {non_network_errors}")
else:
def func(driver: WebDriver) -> bool | NoReturn:
return True
return func
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
ad536bd1141e8e4a2fe98e8708235532e69756bc
|
8488fa51bd937bc9403d636279ba03ee5b1bd4c0
|
/tracopt/perm/tests/authz_policy.py
|
af84bf4965794e3f5d68a50b9277987eb92b2dc3
|
[
"BSD-3-Clause"
] |
permissive
|
edgewall/trac
|
e7ecd994121c6e30b39e98dc6ad9b9edf5be4559
|
f7eba7b121c9ff227b062e9d032ff4d4582adc39
|
refs/heads/trunk
| 2023-08-17T00:13:12.555838
| 2023-07-02T15:13:51
| 2023-07-02T15:13:51
| 615,096
| 399
| 173
|
NOASSERTION
| 2023-03-07T13:46:40
| 2010-04-17T15:10:51
|
Python
|
UTF-8
|
Python
| false
| false
| 13,233
|
py
|
authz_policy.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2023 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
import configparser
import os
import textwrap
import unittest
from trac.core import Component, ComponentMeta, implements
from trac.config import ConfigurationError
from trac.perm import IPermissionRequestor, PermissionCache
from trac.resource import Resource
from trac.test import EnvironmentStub, Mock, makeSuite, mkdtemp
from trac.util import create_file
from trac.versioncontrol.api import Repository
from tracopt.perm.authz_policy import AuthzPolicy
class AuthzPolicyTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
class TestPermissionRequestor(Component):
implements(IPermissionRequestor)
def get_permission_actions(self):
perms = ['TEST_VIEW', 'TEST_CREATE', 'TEST_DELETE',
'TEST_MODIFY']
return [('TEST_ADMIN', perms)] + perms
cls.permission_requestors = [TestPermissionRequestor]
@classmethod
def tearDownClass(cls):
for component in cls.permission_requestors:
ComponentMeta.deregister(component)
def setUp(self):
temp_dir = mkdtemp()
self.authz_file = os.path.join(temp_dir, 'trac-authz-policy')
create_file(self.authz_file, textwrap.dedent("""\
# -*- coding: utf-8 -*-
# Unicode user names
[groups]
administrators = éat
[wiki:WikiStart]
änon = WIKI_VIEW
John = WIKI_VIEW
@administrators = WIKI_VIEW
* =
# Unicode page names
[wiki:résumé]
änon =
@administrators = WIKI_VIEW
* =
# Tickets
[ticket:43]
änon = TICKET_VIEW
@administrators =
* =
[ticket:*]
änon =
@administrators = TICKET_VIEW
* =
# Default repository
[repository:@*]
änon =
@administrators = BROWSER_VIEW, FILE_VIEW
* =
# Non-default repository
[repository:bláh@*]
änon = BROWSER_VIEW, FILE_VIEW
@administrators = BROWSER_VIEW, FILE_VIEW
* =
[milestone:milestone1]
anonymous = MILESTONE_VIEW
"""))
self.env = EnvironmentStub(enable=['trac.perm.*', AuthzPolicy] +
self.permission_requestors,
path=temp_dir,
config=[('logging', 'log_level', 'WARNING')])
self.env.config.set('trac', 'permission_policies',
'AuthzPolicy, DefaultPermissionPolicy')
self.env.config.set('authz_policy', 'authz_file', self.authz_file)
def tearDown(self):
self.env.reset_db_and_disk()
def check_permission(self, action, user, resource, perm=None):
authz_policy = AuthzPolicy(self.env)
return authz_policy.check_permission(action, user, resource, perm)
def get_repository(self, reponame):
params = {'id': 1, 'name': reponame}
return Mock(Repository, 'mock', params, self.env.log)
def get_perm(self, username, *args):
perm = PermissionCache(self.env, username)
if args:
return perm(*args)
return perm
def test_unicode_username(self):
resource = Resource('wiki', 'WikiStart')
perm = self.get_perm('anonymous')
self.assertFalse(
self.check_permission('WIKI_VIEW', 'anonymous', resource, perm))
self.assertNotIn('WIKI_VIEW', perm)
self.assertNotIn('WIKI_VIEW', perm(resource))
perm = self.get_perm('änon')
self.assertTrue(
self.check_permission('WIKI_VIEW', 'änon', resource, perm))
self.assertNotIn('WIKI_VIEW', perm)
self.assertIn('WIKI_VIEW', perm(resource))
def test_case_sensitive_username(self):
resource = Resource('wiki', 'WikiStart')
perm = self.get_perm('john')
self.assertFalse(
self.check_permission('WIKI_VIEW', 'john', resource, perm))
self.assertNotIn('WIKI_VIEW', perm)
self.assertNotIn('WIKI_VIEW', perm(resource))
perm = self.get_perm('John')
self.assertTrue(
self.check_permission('WIKI_VIEW', 'John', resource, perm))
self.assertNotIn('WIKI_VIEW', perm)
self.assertIn('WIKI_VIEW', perm(resource))
def test_unicode_resource_name(self):
resource = Resource('wiki', 'résumé')
perm = self.get_perm('anonymous')
self.assertFalse(
self.check_permission('WIKI_VIEW', 'anonymous', resource, perm))
self.assertNotIn('WIKI_VIEW', perm)
self.assertNotIn('WIKI_VIEW', perm(resource))
perm = self.get_perm('änon')
self.assertFalse(
self.check_permission('WIKI_VIEW', 'änon', resource, perm))
self.assertNotIn('WIKI_VIEW', perm)
self.assertNotIn('WIKI_VIEW', perm(resource))
perm = self.get_perm('éat')
self.assertTrue(
self.check_permission('WIKI_VIEW', 'éat', resource, perm))
self.assertNotIn('WIKI_VIEW', perm)
self.assertIn('WIKI_VIEW', perm(resource))
def test_resource_without_id(self):
perm = self.get_perm('anonymous')
self.assertNotIn('TICKET_VIEW', perm)
self.assertNotIn('TICKET_VIEW', perm('ticket'))
self.assertNotIn('TICKET_VIEW', perm('ticket', 42))
self.assertNotIn('TICKET_VIEW', perm('ticket', 43))
perm = self.get_perm('änon')
self.assertNotIn('TICKET_VIEW', perm)
self.assertNotIn('TICKET_VIEW', perm('ticket'))
self.assertNotIn('TICKET_VIEW', perm('ticket', 42))
self.assertIn('TICKET_VIEW', perm('ticket', 43))
perm = self.get_perm('éat')
self.assertNotIn('TICKET_VIEW', perm)
self.assertIn('TICKET_VIEW', perm('ticket'))
self.assertIn('TICKET_VIEW', perm('ticket', 42))
self.assertNotIn('TICKET_VIEW', perm('ticket', 43))
def test_default_repository(self):
repos = self.get_repository('')
self.assertFalse(repos.is_viewable(self.get_perm('anonymous')))
self.assertFalse(repos.is_viewable(self.get_perm('änon')))
self.assertTrue(repos.is_viewable(self.get_perm('éat')))
def test_non_default_repository(self):
repos = self.get_repository('bláh')
self.assertFalse(repos.is_viewable(self.get_perm('anonymous')))
self.assertTrue(repos.is_viewable(self.get_perm('änon')))
self.assertTrue(repos.is_viewable(self.get_perm('éat')))
def test_case_sensitive_resource(self):
resource = Resource('WIKI', 'wikistart')
self.assertIsNone(
self.check_permission('WIKI_VIEW', 'anonymous', resource))
self.assertIsNone(
self.check_permission('WIKI_VIEW', 'änon', resource))
def test_authenticated_inherits_anonymous_permission(self):
"""Metagroup authenticated inherits all permissions granted to
anonymous.
"""
resource = Resource('milestone', 'milestone1')
self.assertTrue(self.check_permission('MILESTONE_VIEW',
'anonymous', resource))
self.assertTrue(self.check_permission('MILESTONE_VIEW',
'authenticated', resource))
self.assertIn('MILESTONE_VIEW', self.get_perm('anonymous',
resource))
self.assertIn('MILESTONE_VIEW', self.get_perm('authenticated',
resource))
def test_undefined_action_is_logged(self):
"""Undefined action is logged at warning level."""
create_file(self.authz_file, textwrap.dedent("""\
[groups]
administrators = éat
[wiki:WikiStart]
änon = UNKNOWN_VIEW, TEST_CREATE, !TEST_MODIFY
[milestone:milestone1]
* = UNKNOWN_MODIFY, !TEST_VIEW
"""))
authz_policy = AuthzPolicy(self.env)
authz_policy.parse_authz()
self.assertEqual(2, len(self.env.log_messages))
self.assertIn(('WARNING',
'The action UNKNOWN_VIEW in the [wiki:WikiStart] '
'section of trac-authz-policy is not a valid action.'),
self.env.log_messages)
self.assertIn(('WARNING',
'The action UNKNOWN_MODIFY in the [milestone:milestone1] '
'section of trac-authz-policy is not a valid action.'),
self.env.log_messages)
def test_get_authz_file(self):
"""get_authz_file should resolve a relative path."""
authz_policy = AuthzPolicy(self.env)
authz_file = authz_policy.authz_file
self.assertTrue(os.path.isabs(authz_file))
def test_get_authz_file_notfound_raises(self):
"""ConfigurationError exception should be raised if file not found."""
authz_file = os.path.join(self.env.path, 'some-nonexistent-file')
self.env.config.set('authz_policy', 'authz_file', authz_file)
self.assertRaises(ConfigurationError, self.check_permission,
'WIKI_VIEW', 'änon', None, None)
def test_get_authz_file_notdefined_raises(self):
"""ConfigurationError exception should be raised if the option
`[authz_policy] authz_file` is not specified in trac.ini."""
self.env.config.remove('authz_policy', 'authz_file')
self.assertRaises(ConfigurationError, self.check_permission,
'WIKI_VIEW', 'änon', None, None)
def test_get_authz_file_empty_raises(self):
"""ConfigurationError exception should be raised if the option
`[authz_policy] authz_file` is empty."""
self.env.config.set('authz_policy', 'authz_file', '')
self.assertRaises(ConfigurationError, self.check_permission,
'WIKI_VIEW', 'änon', None, None)
def test_get_authz_file_removed_raises(self):
"""ConfigurationError exception is raised if file is removed."""
os.remove(self.authz_file)
self.assertRaises(ConfigurationError, self.check_permission,
'WIKI_VIEW', 'änon', None, None)
def test_parse_authz_empty(self):
"""Allow the file to be empty."""
create_file(self.authz_file, '')
authz_policy = AuthzPolicy(self.env)
authz_policy.parse_authz()
self.assertEqual([], authz_policy.authz.sections())
def test_parse_authz_no_settings(self):
"""Allow the file to have no settings."""
create_file(self.authz_file, textwrap.dedent("""\
# [wiki:WikiStart]
# änon = WIKI_VIEW
# * =
"""))
authz_policy = AuthzPolicy(self.env)
authz_policy.parse_authz()
self.assertEqual([], authz_policy.authz.sections())
def test_parse_authz_malformed_raises(self):
"""ConfigurationError should be raised if the file is malformed."""
create_file(self.authz_file, textwrap.dedent("""\
wiki:WikiStart]
änon = WIKI_VIEW
* =
"""))
authz_policy = AuthzPolicy(self.env)
authz_mtime = authz_policy.authz_mtime
self.assertRaises(ConfigurationError, authz_policy.parse_authz)
self.assertEqual(authz_mtime, authz_policy.authz_mtime)
def test_parse_authz_duplicated_sections_raises(self):
"""DuplicateSectionError should be raised if the file has duplicate
sections."""
create_file(self.authz_file, textwrap.dedent("""\
[wiki:WikiStart]
änon = WIKI_VIEW
[wiki:WikiStart]
änon = WIKI_VIEW
"""))
authz_policy = AuthzPolicy(self.env)
self.assertRaises(configparser.DuplicateSectionError,
authz_policy.parse_authz)
def test_parse_authz_duplicated_options_raises(self):
"""DuplicateOptionError should be raised if a section has duplicate
options."""
create_file(self.authz_file, textwrap.dedent("""\
[wiki:WikiStart]
änon = WIKI_VIEW
änon = WIKI_ADMIN
"""))
authz_policy = AuthzPolicy(self.env)
self.assertRaises(configparser.DuplicateOptionError,
authz_policy.parse_authz)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(makeSuite(AuthzPolicyTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
fbf04334c4f4e162b0ef27f757f8eeaa448d1dcd
|
753cd066a9bd26b6c37c8d53a86c7a9c659ec18c
|
/vision/cnns/pytorch/train/lr_schedule.py
|
6d9e42c6fdf71bc8c11e2fa5ad524752696b3332
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
graphcore/examples
|
ac872015808ed2a913d4d7bf0d63202ce15ebbae
|
e2f834dd60e7939672c1795b4ac62e89ad0bca49
|
refs/heads/master
| 2023-08-05T02:08:12.341836
| 2023-07-27T11:13:10
| 2023-07-27T11:13:10
| 143,977,106
| 311
| 80
|
MIT
| 2023-09-11T16:42:56
| 2018-08-08T07:29:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,877
|
py
|
lr_schedule.py
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
from torch.optim.lr_scheduler import _LRScheduler
class WarmUpLRDecorator(_LRScheduler):
def __init__(self, lr_scheduler, optimizer, warmup_epoch, last_epoch=-1):
self.lr_scheduler = lr_scheduler
self.warmup_epoch = warmup_epoch
super(WarmUpLRDecorator, self).__init__(optimizer, last_epoch=last_epoch)
def get_lr(self):
lr = self.lr_scheduler._get_closed_form_lr()
if self.last_epoch < self.warmup_epoch:
return [e * (self.last_epoch) / float(self.warmup_epoch) for e in lr]
else:
return lr
def step(self, epoch=None):
if epoch is not None:
self.last_epoch = (
epoch - 1
) # It is going to be increased by one in the parent's step function. Need to decrease by one to revert this effect.
super(WarmUpLRDecorator, self).step()
self.lr_scheduler.step(epoch) # This has to be stepped in this order since the constructor calls step()
class PeriodicLRDecorator(_LRScheduler):
def __init__(self, lr_scheduler, optimizer, period, last_epoch=-1):
self.lr_scheduler = lr_scheduler
self.period = period
self.next_update = 0
super(PeriodicLRDecorator, self).__init__(optimizer, last_epoch=last_epoch)
def get_lr(self):
return self.lr_scheduler._get_closed_form_lr()
def step(self, epoch=None):
epoch = epoch if epoch is not None else self.last_epoch + 1
self.last_epoch = epoch
if epoch >= self.next_update:
self.next_update += self.period
self.optimizer._step_count = 1 # Initialize step as PopTorch does not call optimizer.step() explicitly
self.lr_scheduler.step()
def _get_closed_form_lr(self):
return self.lr_scheduler._get_closed_form_lr()
|
9fd5de388608b89a94fa3be6288e6d88d9a4a452
|
824bbfe724a3c310de30e9ed8f76b2b195eb5faf
|
/neural_graph_cf/utils/parser.py
|
4c74c635e5fcb73aa37010f525b93427526144f5
|
[] |
no_license
|
jrzaurin/RecoTour
|
e6ff544f8e4c2632c825a133634431b3eb759d0a
|
a498adb0303697d169cda60c7ae00c19cbbc560f
|
refs/heads/master
| 2021-10-07T18:01:32.814373
| 2021-10-05T11:56:15
| 2021-10-05T11:56:15
| 134,463,121
| 186
| 42
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,250
|
py
|
parser.py
|
import argparse
def parse_args():
parser = argparse.ArgumentParser(description="Run NGCF.")
parser.add_argument('--data_dir', type=str,
default='/home/ubuntu/projects/RecoTour/datasets/',
help='Input data path.')
parser.add_argument('--dataset', type=str, default='Amazon',
help='Dataset name')
parser.add_argument('--results_dir', type=str, default='results',
help='Store model path.')
parser.add_argument('--model', type=str, default='ngcf',
help='Specify the model {ngcf, bpr}.')
parser.add_argument('--adj_type', type=str, default='norm',
help='Specify the type of the adjacency (laplacian) matrix from {plain, norm, mean}.')
parser.add_argument('--pretrain', type=int, default=0,
help='0: No pretrain, -1: Pretrain with the learned embeddings, 1:Pretrain with stored models.')
parser.add_argument('--n_epochs', type=int, default=100,
help='Number of epoch.')
parser.add_argument('--optimizer', type=str, default="Adam",
help='Specify the optimizer {Adam, RAdam, AdamW}')
parser.add_argument('--reg', type=float, default=0.,
help='l2 reg.')
parser.add_argument('--lr', type=float, default=0.001,
help='Learning rate.')
parser.add_argument('--lr_scheduler', type=str, default="No",
help='Specify the lr_scheduler {ReduceLROnPlateau, CyclicLR, No (nothing)}')
parser.add_argument('--emb_dim', type=int, default=32,
help='number of embeddings.')
parser.add_argument('--layers', type=str, default='[32,32]',
help='Output sizes of every layer')
parser.add_argument('--batch_size', type=int, default=8192,
help='Batch size.')
parser.add_argument('--node_dropout', type=float, default=0.,
help='Graph Node dropout.')
parser.add_argument('--edge_dropout', type=float, default=0.,
help='Graph edge dropout.')
parser.add_argument('--mess_dropout', type=float, default=0.,
help='Message dropout.')
parser.add_argument('--n_fold', type=int, default=10,
help='number of partitions for the adjacency matrix')
parser.add_argument('--Ks', type=str, default='[20, 40, 60, 80, 100]',
help='k order of metric evaluation (e.g. NDCG@k)')
parser.add_argument('--print_every', type=int, default=1,
help='print results every N epochs')
parser.add_argument('--eval_every', type=int, default=5,
help='Evaluate every N epochs')
parser.add_argument('--test_with', type=str, default='gpu',
help='test using cpu or gpu')
parser.add_argument('--save_results', type=int, default=1,
help='Save metrics to a dataframe')
parser.add_argument('--patience', type=int, default=2,
help='Patience for early stopping. In epochs = patience*eval_every')
return parser.parse_args()
|
723315e737d21406600a9f0c5103ae52addc516e
|
bade10db04689048ad1837ced0a6d19e3cc81bf8
|
/tests/test_geo_utils.py
|
68148dfeee66e653d381002f19c8083d72a9edc0
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
sentinel-hub/sentinelhub-py
|
a91dc13f0306b121d4c2bca50557938c93d203bd
|
98d0327e3929999ec07645f77b16fceb7f9c88b9
|
refs/heads/master
| 2023-09-02T17:19:09.717530
| 2023-05-08T07:35:21
| 2023-05-08T07:35:21
| 91,571,506
| 704
| 239
|
MIT
| 2023-08-22T13:29:48
| 2017-05-17T12:00:34
|
Python
|
UTF-8
|
Python
| false
| false
| 4,823
|
py
|
test_geo_utils.py
|
"""
Test for geo_utils module and correctness of geographical transformations
"""
from typing import Tuple, Union
import pytest
from sentinelhub import CRS, BBox
from sentinelhub.geo_utils import (
bbox_to_dimensions,
bbox_to_resolution,
get_image_dimension,
get_utm_crs,
pixel_to_utm,
transform_point,
utm_to_pixel,
)
BBOX_WGS84 = BBox(((111.6388, 8.6488), (111.6988, 8.6868)), CRS.WGS84)
BBOX_UTM = BBox(((570280, 956083), (576884, 960306)), CRS("32649"))
BBOX_POP_WEB = BBox(((12427574, 966457), (12434253, 970736)), CRS.POP_WEB)
BBOX_2 = BBox(((570000, 956000), (571000, 958000)), CRS("32649"))
BBOX_3 = BBox(((100, -10.5), (101, -10)), CRS.WGS84)
GEOREFERENCING_TRANSFORM = (570851.8316965176, 512, 0, 960429.6742984429, 0, -512)
@pytest.mark.parametrize(
"wgs84_coordinate, utm_crs",
[
((109.988, 9.988), CRS("32649")),
((49.889, 49.889), CRS("32639")),
((30, -15), CRS("32736")),
],
)
def test_get_utm_crs(wgs84_coordinate: Tuple[float, float], utm_crs: CRS) -> None:
assert get_utm_crs(*wgs84_coordinate) is utm_crs
@pytest.mark.parametrize(
"input_bbox, resolution, expected_dimensions",
[
(BBOX_WGS84, (512, 512), (12.8784, 8.2284)),
(BBOX_UTM, (512, 50), (12.8984, 84.46)),
(BBOX_POP_WEB, (50, 512), (131.87, 8.2284)),
(BBOX_2, (10, 10), (100, 200)),
(BBOX_3, (500, 500), (219.6, 109.58)),
],
)
def test_bbox_to_resolution(
input_bbox: BBox, resolution: Tuple[int, int], expected_dimensions: Tuple[float, float]
) -> None:
assert bbox_to_resolution(input_bbox, *resolution) == pytest.approx(expected_dimensions, rel=1e-4)
@pytest.mark.parametrize(
"input_bbox, resolution, expected_dimensions",
[
(BBOX_WGS84, 10, (659, 421)),
(BBOX_UTM, 10, (660, 422)),
(BBOX_POP_WEB, (20, 50), (330, 84)),
(BBOX_2, (20, 10), (50, 200)),
(BBOX_3, (100, 50), (1098, 1096)),
],
)
def test_bbox_to_dimensions(
resolution: Union[float, Tuple[float, float]], expected_dimensions: Tuple[int, int], input_bbox: BBox
) -> None:
assert bbox_to_dimensions(input_bbox, resolution) == expected_dimensions
@pytest.mark.parametrize(
"input_bbox, height, width",
[
(BBOX_WGS84, 715, 1119),
(BBOX_UTM, 715, 1118),
(BBOX_POP_WEB, 715, 1119),
(BBOX_2, 10, 5),
(BBOX_3, 15, 30),
],
)
def test_get_image_dimensions(input_bbox: BBox, height: int, width: int) -> None:
assert get_image_dimension(input_bbox, height=height) == width
assert get_image_dimension(input_bbox, width=width) == height
@pytest.mark.parametrize("input_bbox", [BBOX_WGS84, BBOX_UTM, BBOX_POP_WEB])
@pytest.mark.parametrize("expected_bbox", [BBOX_WGS84, BBOX_UTM, BBOX_POP_WEB])
def test_bbox_transform(input_bbox: BBox, expected_bbox: BBox) -> None:
test_bbox = input_bbox.transform(expected_bbox.crs)
assert tuple(test_bbox) == pytest.approx(tuple(expected_bbox), rel=1e-4)
assert test_bbox.crs is expected_bbox.crs
@pytest.mark.parametrize(
"point, source_crs, target_crs, target_point",
[
((111.644, 8.655), CRS.WGS84, CRS.POP_WEB, (12428153.23, 967155.41)),
((360000.0, 4635040.0), CRS.UTM_31N, CRS.WGS84, (1.313392213, 41.854888581)),
((360000.0, 4635040.0), CRS.UTM_31N, CRS.UTM_30N, (858072.82713, 4642667.30545)),
((1475000.0, 5100000.0), CRS(2193), CRS.WGS84, (171.43450808, -44.24250942)),
((543569.807, 6062625.7678), CRS(3346), CRS.UTM_35N, (350231.496834, 6063682.846723)),
],
)
def test_transform_point(
point: Tuple[float, float], source_crs: CRS, target_crs: CRS, target_point: Tuple[float, float]
) -> None:
new_point = transform_point(point, source_crs, target_crs)
assert new_point == pytest.approx(target_point, rel=1e-8)
assert transform_point(new_point, target_crs, source_crs) == pytest.approx(point, rel=1e-8)
@pytest.mark.parametrize(
"coordinate, expected_pixel",
[
((570851, 960429), (0, 0)),
((577006, 960429), (0, 12)),
((572351, 958770), (3, 3)),
((570851, 956770), (7, 0)),
((577006, 956770), (7, 12)),
],
)
def test_utm_to_pixel(coordinate: Tuple[float, float], expected_pixel: Tuple[int, int]) -> None:
assert utm_to_pixel(*coordinate, GEOREFERENCING_TRANSFORM) == expected_pixel
@pytest.mark.parametrize(
"pixel, expected_coordinate",
[
((0, 0), (570851, 960429)),
((0, 12), (576995, 960429)),
((3, 3), (572387, 958893)),
((7, 0), (570851, 956845)),
((7, 12), (576995, 956845)),
],
)
def test_pixel_to_utm(pixel: Tuple[int, int], expected_coordinate: Tuple[float, float]) -> None:
assert pixel_to_utm(*pixel, GEOREFERENCING_TRANSFORM) == pytest.approx(expected_coordinate, abs=1)
|
fd110552377c6ff5657037fa4207040c506f05ee
|
6ffd23679939f59f0a09c9507a126ba056b239d7
|
/imperative/python/megengine/utils/profiler.py
|
1b1f7e5745f178e484ef1c823b368327ab5345e4
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
MegEngine/MegEngine
|
74c1c9b6022c858962caf7f27e6f65220739999f
|
66b79160d35b2710c00befede0c3fd729109e474
|
refs/heads/master
| 2023-08-23T20:01:32.476848
| 2023-08-01T07:12:01
| 2023-08-11T06:04:12
| 248,175,118
| 5,697
| 585
|
Apache-2.0
| 2023-07-19T05:11:07
| 2020-03-18T08:21:58
|
C++
|
UTF-8
|
Python
| false
| false
| 10,216
|
py
|
profiler.py
|
# -*- coding: utf-8 -*-
import json
import os
import re
from contextlib import ContextDecorator, contextmanager
from functools import wraps
from typing import List
from weakref import WeakSet
from .. import _atexit
from ..core._imperative_rt.core2 import Tensor as raw_tensor
from ..core._imperative_rt.core2 import (
cupti_available,
disable_cupti,
enable_cupti,
full_sync,
pop_scope,
pop_scope_with_type,
push_scope,
push_scope_with_type,
set_python_backtrace_enabled,
start_profile,
stop_profile,
stop_step,
sync,
)
from ..logger import get_logger
_running_profiler = None
_living_profilers = WeakSet()
class Profiler(ContextDecorator):
r"""Profile graph execution in imperative mode.
Args:
path: default path prefix for profiler to dump.
with_backtrace: Whether to record backtrace information for ops.
with_scopes: Whether to keep more scopes to record record module/functional hierarchy. Enabling this option will slow down your program execution.
Examples:
.. code-block::
import megengine as mge
import megengine.module as M
from megengine.utils.profiler import Profiler
# With Learnable Parameters
profiler = Profiler()
for iter in range(0, 10):
# Only profile record of last iter would be saved
with profiler:
# your code here
# Then open the profile file in chrome timeline window
"""
CHROME_TIMELINE = "chrome_timeline.json"
valid_options = {
"sample_rate": 0,
"profile_device": 1,
"num_tensor_watch": 10,
"enable_cupti": 0,
}
valid_formats = {"chrome_timeline.json", "memory_flow.svg"}
def __init__(
self,
path: str = "profile",
format: str = "chrome_timeline.json",
formats: List[str] = None,
with_backtrace: bool = False,
with_scopes: bool = False,
**kwargs
) -> None:
if not formats:
formats = [format]
assert not isinstance(formats, str), "formats excepts list, got str"
for format in formats:
assert format in Profiler.valid_formats, "unsupported format {}".format(
format
)
self._path = path
self._formats = formats
self._options = {}
for opt, optval in Profiler.valid_options.items():
self._options[opt] = int(kwargs.pop(opt, optval))
self._pid = "<PID>"
self._dump_callback = None
self._api_patcher = None
self._with_scopes = with_scopes
if self._options.get("enable_cupti", 0):
if cupti_available():
enable_cupti()
else:
get_logger().warning("CuPTI unavailable")
self.with_backtrace = with_backtrace
@property
def path(self):
if len(self._formats) == 0:
format = "<FORMAT>"
elif len(self._formats) == 1:
format = self._formats[0]
else:
format = "{" + ",".join(self._formats) + "}"
return self.format_path(self._path, self._pid, format)
@property
def directory(self):
return self._path
@property
def _patcher(self):
if self._api_patcher != None:
return self._api_patcher
from ..traced_module.module_tracer import Patcher, module_tracer
from ..module import Module
def wrap_tensormethod_and_functional(origin_fn):
def get_tensormeth_name(obj, func):
tp = obj if isinstance(obj, type) else type(obj)
if not issubclass(tp, raw_tensor):
return None
for cls in tp.mro():
for k, v in cls.__dict__.items():
if v == func:
return k
return None
@wraps(origin_fn)
def wrapped_fn(*args, **kwargs):
methname = (
get_tensormeth_name(args[0], wrapped_fn) if len(args) > 0 else None
)
name, scope_type = (
("tensor." + methname, "tensor_method")
if methname is not None
else (origin_fn.__name__, "functional")
)
push_scope_with_type(name, scope_type)
rst = origin_fn(*args, **kwargs)
pop_scope_with_type(name, scope_type)
return rst
return wrapped_fn
def wrap_module_call(origin_fn):
@wraps(origin_fn)
def wrapped_fn(*args, **kwargs):
is_builtin_module = module_tracer.is_builtin(type(args[0]))
if not is_builtin_module:
return origin_fn(*args, **kwargs)
name, scope_type = type(args[0]).__name__, "module"
push_scope_with_type(name, scope_type)
rst = origin_fn(*args, **kwargs)
pop_scope_with_type(name, scope_type)
return rst
return wrapped_fn
self._api_patcher = Patcher(wrap_tensormethod_and_functional)
self._api_patcher.patch_method(Module, "__call__", wrap_module_call)
return self._api_patcher
@property
def formats(self):
return list(self._formats)
def start(self):
global _running_profiler
assert _running_profiler is None
_running_profiler = self
self._pid = os.getpid()
start_profile(self._options)
self._origin_enable_bt = set_python_backtrace_enabled(self.with_backtrace)
return self
def stop(self):
global _running_profiler
assert _running_profiler is self
_running_profiler = None
full_sync()
self._dump_callback = stop_profile()
self._pid = os.getpid()
_living_profilers.add(self)
set_python_backtrace_enabled(self._origin_enable_bt)
def step(self):
global _running_profiler
assert _running_profiler is not None
stop_step()
return self
def dump(self):
if self._dump_callback is not None:
if not os.path.exists(self._path):
os.makedirs(self._path)
if not os.path.isdir(self._path):
get_logger().warning(
"{} is not a directory, cannot write profiling results".format(
self._path
)
)
return
for format in self._formats:
path = self.format_path(self._path, self._pid, format)
get_logger().info("process {} generating {}".format(self._pid, format))
self._dump_callback(path, format)
get_logger().info("profiling results written to {}".format(path))
if os.path.getsize(path) > 64 * 1024 * 1024:
get_logger().warning(
"profiling results too large, maybe you are profiling multi iters,"
"consider attach profiler in each iter separately"
)
self._dump_callback = None
_living_profilers.remove(self)
def format_path(self, path, pid, format):
return os.path.join(path, "{}.{}".format(pid, format))
def __enter__(self):
self.start()
if self._with_scopes:
self._patcher.__enter__()
def __exit__(self, val, tp, trace):
self.stop()
if self._with_scopes and self._api_patcher is not None:
self._api_patcher.__exit__(val, tp, trace)
self._api_patcher = None
def __call__(self, func):
func = super().__call__(func)
func.__profiler__ = self
return func
def __del__(self):
if self._options.get("enable_cupti", 0):
if cupti_available():
disable_cupti()
self.dump()
@contextmanager
def scope(name):
push_scope(name)
yield
pop_scope(name)
def profile(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return Profiler()(args[0])
return Profiler(*args, **kwargs)
def merge_trace_events(directory: str):
names = filter(
lambda x: re.match(r"\d+\.chrome_timeline\.json", x), os.listdir(directory)
)
def load_trace_events(name):
with open(os.path.join(directory, name), "r", encoding="utf-8") as f:
return json.load(f)
def find_metadata(content):
if isinstance(content, dict):
assert "traceEvents" in content
content = content["traceEvents"]
if len(content) == 0:
return None
assert content[0]["name"] == "Metadata"
return content[0]["args"]
contents = list(map(load_trace_events, names))
metadata_list = list(map(find_metadata, contents))
min_local_time = min(
map(lambda x: x["localTime"], filter(lambda x: x is not None, metadata_list))
)
events = []
for content, metadata in zip(contents, metadata_list):
local_events = content["traceEvents"]
if len(local_events) == 0:
continue
local_time = metadata["localTime"]
time_shift = local_time - min_local_time
for event in local_events:
if "ts" in event:
event["ts"] = int(event["ts"] + time_shift)
events.extend(filter(lambda x: x["name"] != "Metadata", local_events))
result = {
"traceEvents": events,
}
path = os.path.join(directory, "merge.chrome_timeline.json")
with open(path, "w") as f:
json.dump(result, f, ensure_ascii=False, separators=(",", ":"))
get_logger().info("profiling results written to {}".format(path))
def is_profiling():
return _running_profiler is not None
def _stop_current_profiler():
global _running_profiler
if _running_profiler is not None:
_running_profiler.stop()
living_profilers = [*_living_profilers]
for profiler in living_profilers:
profiler.dump()
_atexit(_stop_current_profiler)
|
29a3d032db5b7c790415368ec0d0a9c8e7372edd
|
b2d06ad8145cbfe92835d62899f004dc207ad1b5
|
/bot/reviewbot/utils/log.py
|
f28a0d647b23fa2ec66769cadb3a977d56112e33
|
[
"MIT"
] |
permissive
|
reviewboard/ReviewBot
|
8027a9eb308b8c01f6d47e0372f543beff655014
|
b59b566e127b5ef1b08f3189f1aa0194b7437d94
|
refs/heads/master
| 2023-06-10T00:25:11.506154
| 2023-05-31T22:36:34
| 2023-05-31T22:36:34
| 3,355,797
| 110
| 26
|
MIT
| 2020-11-05T08:56:37
| 2012-02-04T22:20:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,447
|
py
|
log.py
|
"""Utility functions for loggers.
Version Added:
3.0
"""
from __future__ import unicode_literals
from celery.utils.log import (get_logger as _get_logger,
get_task_logger as _get_task_logger)
def get_logger(name, is_task_logger=True):
"""Return a logger with the given name.
The logger will by default be constructed as a task logger. This will
ensure it contains additional information on the current task name and task
ID, if running in a task. If executed outside of a task, the name name and
ID will be replaced with ``???`` by Celery.
Task logging should be turned off only when we know for sure that the
code isn't going to be running in a task.
Version Added:
3.0
Args:
name (unicode):
The name shown in the log line. This is expected to be a module
name.
is_task_logger (bool, optional):
Whether to construct a task logger.
Returns:
logging.Logger:
The new (or existing) logger for the given name.
"""
if is_task_logger:
return _get_task_logger(name)
return _get_logger(name)
def get_root_logger():
"""Return a root logger for Review Bot.
This will use "Review Bot" as the logger name.
Version Added:
3.0
Returns:
logging.Logger:
The root logger.
"""
return get_logger('Review Bot', is_task_logger=False)
|
38f9e38ab0bf68fe59e45c3c1a1cdb22ee8a54b4
|
e9712ad09a7e852b1f97e321469792b2fe01f5d2
|
/tests/test_server.py
|
76df757b3f22e0b0551a5c88c83ce28a6e4fd0d1
|
[
"MIT"
] |
permissive
|
gluk-w/django-grpc
|
f05677d86e186deeba481a56837b19f0e738b213
|
5cb7385ea176ae13c5cd91b0d3c164233c34e993
|
refs/heads/master
| 2023-07-20T11:42:02.272514
| 2023-05-10T14:24:21
| 2023-05-10T14:24:21
| 166,471,167
| 215
| 32
|
MIT
| 2023-07-05T21:30:45
| 2019-01-18T20:53:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
test_server.py
|
import os
import threading
from random import randint
from time import sleep
from django.core.management import call_command
from django_grpc_testtools.executor import TestGRPCServer
from tests.helpers import call_hello_method
def start_server(**params):
"""
Starts gRPC server in a separate thread using "grpcserver" management command with given parameters
:return: connection string
"""
def _grpc_server_async(options):
call_command("grpcserver", **options)
port = 50000 + randint(0, 10000)
params["port"] = port
# Start grpc server
srv = threading.Thread(
target=_grpc_server_async, args=[params]
)
srv.start()
sleep(5)
return "localhost:%s" % port
def test_management_command(grpc_server):
"""
Start gRPC server using management command and make sure it works
"""
assert call_hello_method(grpc_server, 'Django GRPC') == 'Hello, Django GRPC!'
def test_management_command_with_autoreload():
manage_py = os.path.join(os.path.dirname(os.path.abspath(__file__)), "manage.py")
server = TestGRPCServer(manage_py, {'--autoreload': ''})
server.start()
assert call_hello_method(server.addr(), 'Autoreload') == 'Hello, Autoreload!'
server.stop()
|
3887d045d357cd8bdf9c3f220dfa1466d5844210
|
53940f2aaf1537bb0c701f3963225aae5edc56e0
|
/src/deepqmc/wf/baseline/baseline.py
|
16fb2aba35c1405bce3af8a8885fb187ebc9c628
|
[
"MIT"
] |
permissive
|
deepqmc/deepqmc
|
d44d32c7ed528bc2c0a32e0c8a7f1d3ce70ad007
|
bf297a34c0304f9deb3a5ad704ddd3a8a3d7eea0
|
refs/heads/master
| 2023-08-16T16:30:54.227839
| 2023-08-02T13:24:22
| 2023-08-08T09:18:02
| 226,350,919
| 313
| 64
|
MIT
| 2023-09-14T18:06:13
| 2019-12-06T14:50:59
|
Python
|
UTF-8
|
Python
| false
| false
| 3,151
|
py
|
baseline.py
|
from typing import Sequence
import haiku as hk
import jax.numpy as jnp
from jax.nn import one_hot
from ...physics import pairwise_diffs
from ..base import WaveFunction
from .gto import GTOBasis
from .pyscfext import confs_from_mc, pyscf_from_mol
__all__ = ['Baseline']
class Baseline(WaveFunction):
r"""Represent an (MC-)SCF wave function, used as baseline."""
def __init__(
self,
mol,
n_determinants,
centers,
shells,
mo_coeffs,
confs,
conf_coeffs,
):
super().__init__(mol)
self.basis = GTOBasis(centers, shells)
conf_coeffs = conf_coeffs[:, :n_determinants]
self.mo_coeffs = hk.get_parameter(
'mo_coeffs', mo_coeffs.shape, init=lambda s, d: mo_coeffs
)
self.conf_coeffs = hk.get_parameter(
'conf_coeffs', conf_coeffs.shape, init=lambda s, d: conf_coeffs
)
self.confs = confs[:, :n_determinants]
def __call__(self, phys_conf):
mol_idx = phys_conf.mol_idx
diffs = pairwise_diffs(phys_conf.r, phys_conf.R)
n_el = diffs.shape[-3]
aos = self.basis(diffs)
mos = jnp.einsum('...mo,...em->...eo', self.mo_coeffs[mol_idx], aos)
mos = mos[:, self.confs[mol_idx]].swapaxes(-2, -3)
# ci coefficients are included in the orbitals of the respective determinant
factors = (jnp.abs(self.conf_coeffs[mol_idx]) ** (1 / n_el))[:, None] * (
one_hot(0, n_el)[None, :] * jnp.sign(self.conf_coeffs[mol_idx])[:, None]
+ (1 - one_hot(0, n_el)[None, :])
)
return mos * factors[:, None, :]
@classmethod
def from_mol(cls, mols, *, basis='6-31G', cas=None, **pyscf_kwargs):
r"""Create input to the constructor from a :class:`~deepqmc.Molecule`.
Args:
mol (~deepqmc.Molecule): the molecule or a sequence of molecules to
consider.
basis (str): the name of a Gaussian basis set.
cas (Tuple[int,int]): optional the active space specification for CAS-SCF.
"""
mols = mols if isinstance(mols, Sequence) else [mols]
mo_coeffs, confs, conf_coeffs = [], [], []
for mol in mols:
mol_pyscf, (mf, mc) = pyscf_from_mol(mol, basis, cas, **pyscf_kwargs)
centers, shells = GTOBasis.from_pyscf(mol_pyscf)
mo_coeff = jnp.asarray(mc.mo_coeff if mc else mf.mo_coeff)
ao_overlap = jnp.asarray(mf.mol.intor('int1e_ovlp_cart'))
mo_coeff *= jnp.sqrt(jnp.diag(ao_overlap))[:, None]
conf_coeff, conf = (
([1], [sum([list(range(n_el)) for n_el in (mol.n_up, mol.n_down)], [])])
if mc is None
else zip(*confs_from_mc(mc))
)
mo_coeffs.append(mo_coeff)
confs.append(jnp.array(conf))
conf_coeffs.append(jnp.array(conf_coeff))
return {
'centers': centers,
'shells': shells,
'mo_coeffs': jnp.stack(mo_coeffs),
'confs': jnp.stack(confs),
'conf_coeffs': jnp.stack(conf_coeffs),
}
|
d04a36e14b3c4f190470e430b3b9fc31f8bff39a
|
6c8305ea1df9687df1c0d2b0ace56733516c6322
|
/readthedocs/telemetry/collectors.py
|
4b32644654e636e284291e8009e83a024cc10bb7
|
[
"MIT"
] |
permissive
|
readthedocs/readthedocs.org
|
9806083aa744c2308267919480a692e1e003e45d
|
bf88ce6d1085d922322a5fadce63a22c5544c830
|
refs/heads/main
| 2023-09-05T20:22:34.281891
| 2023-09-05T12:41:52
| 2023-09-05T12:41:52
| 841,835
| 2,894
| 1,509
|
MIT
| 2023-09-14T20:36:00
| 2010-08-16T19:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 12,382
|
py
|
collectors.py
|
"""Data collectors."""
import json
import os
import dparse
import structlog
from readthedocs.config.models import PythonInstallRequirements
from readthedocs.core.utils.filesystem import safe_open
log = structlog.get_logger(__name__)
class BuildDataCollector:
"""
Build data collector.
Collect data from a runnig build.
"""
def __init__(self, environment):
self.environment = environment
self.build = self.environment.build
self.project = self.environment.project
self.version = self.environment.version
self.config = self.environment.config
self.checkout_path = self.project.checkout_path(self.version.slug)
log.bind(
build_id=self.build["id"],
project_slug=self.project.slug,
version_slug=self.version.slug,
)
@staticmethod
def _safe_json_loads(content, default=None):
def lowercase(d):
"""Convert all dictionary keys to lowercase."""
return {k.lower(): i for k, i in d.items()}
# pylint: disable=broad-except
try:
# Use ``object_hook`` parameter to lowercase all the keys of the dictionary.
# This helps us to have our data normalized and improve queries.
return json.loads(content, object_hook=lowercase)
except Exception:
log.info(
"Error while loading JSON content.",
exc_info=True,
)
return default
def run(self, *args, **kwargs):
build_cmd = self.environment.run(*args, record=False, demux=True, **kwargs)
return build_cmd.exit_code, build_cmd.output, build_cmd.error
def collect(self):
"""
Collect all relevant data from the runnig build.
Data that can be extracted from the database (project/organization)
isn't collected here.
"""
# NOTE: we could run each command inside a try/except block to have a
# more granular protection and be able to save data from those commands
# that didn't fail. Otherwise, if one command fails, all the data for
# this Build is lost.
data = {}
data["config"] = {"user": self.config.source_config}
data["os"] = self._get_operating_system()
data["python"] = self._get_python_version()
user_apt_packages, all_apt_packages = self._get_apt_packages()
conda_packages = (
self._get_all_conda_packages() if self.config.is_using_conda else {}
)
data["packages"] = {
"pip": {
"user": self._get_user_pip_packages(),
"all": self._get_all_pip_packages(),
},
"conda": {
"all": conda_packages,
},
"apt": {
"user": user_apt_packages,
"all": all_apt_packages,
},
}
data["doctool"] = self._get_doctool()
return data
def _get_doctool_name(self):
if self.version.is_sphinx_type:
return "sphinx"
if self.version.is_mkdocs_type:
return "mkdocs"
return "generic"
def _get_doctool(self):
data = {
"name": self._get_doctool_name(),
"extensions": [],
"html_theme": "",
}
if self._get_doctool_name() != "sphinx":
return data
# The project does not define a `conf.py` or does not have one
if not self.config.sphinx or not self.config.sphinx.configuration:
return data
conf_py_dir = os.path.join(
self.checkout_path,
os.path.dirname(self.config.sphinx.configuration),
)
filepath = os.path.join(conf_py_dir, "_build", "json", "telemetry.json")
if os.path.exists(filepath):
with safe_open(filepath, "r") as json_file:
content = json_file.read()
data.update(self._safe_json_loads(content, {}))
return data
def _get_all_conda_packages(self):
"""
Get all the packages installed by the user using conda.
This includes top level and transitive dependencies.
The output of ``conda list`` is in the form of::
[
{
"base_url": "https://conda.anaconda.org/conda-forge",
"build_number": 0,
"build_string": "py_0",
"channel": "conda-forge",
"dist_name": "alabaster-0.7.12-py_0",
"name": "alabaster",
"platform": "noarch",
"version": "0.7.12"
},
{
"base_url": "https://conda.anaconda.org/conda-forge",
"build_number": 0,
"build_string": "pyh9f0ad1d_0",
"channel": "conda-forge",
"dist_name": "asn1crypto-1.4.0-pyh9f0ad1d_0",
"name": "asn1crypto",
"platform": "noarch",
"version": "1.4.0"
}
]
"""
code, stdout, _ = self.run(
"conda", "list", "--json", "--name", self.version.slug
)
if code == 0 and stdout:
packages = self._safe_json_loads(stdout, [])
packages = [
{
"name": package["name"],
"channel": package["channel"],
"version": package["version"],
}
for package in packages
]
return packages
return []
def _get_user_pip_packages(self):
"""
Get all the packages to be installed defined by the user.
It parses all the requirements files specified in the config file by
the user (python.install.requirements) using ``dparse`` --a 3rd party
package.
If the version of the package is explicit (==) it saves that particular
version. Otherwise, if it's not defined, it saves ``undefined`` and if
it's a non deterministic operation (like >=, <= or ~=) it saves
``unknown`` in the version.
"""
results = []
# pylint: disable=too-many-nested-blocks
for install in self.config.python.install:
if isinstance(install, PythonInstallRequirements):
if install.requirements:
cmd = ["cat", install.requirements]
_, stdout, _ = self.run(*cmd, cwd=self.checkout_path)
df = dparse.parse(
stdout, file_type=dparse.filetypes.requirements_txt
).serialize()
dependencies = df.get("dependencies", [])
for requirement in dependencies:
name = requirement.get("name", "").lower()
if not name:
continue
# If the user defines a specific version in the
# requirements file, we save it Otherwise, we don't
# because we don't know which version will be
# installed.
version = "undefined"
specs = str(requirement.get("specs", ""))
if specs:
if specs.startswith("=="):
version = specs.replace("==", "", 1)
else:
version = "unknown"
results.append(
{
"name": name,
"version": version,
}
)
return results
def _get_all_pip_packages(self):
"""
Get all the packages installed by pip.
This includes top level and transitive dependencies.
The output of ``pip list`` is in the form of::
[
{
"name": "requests-mock",
"version": "1.8.0"
},
{
"name": "requests-toolbelt",
"version": "0.9.1"
},
{
"name": "rstcheck",
"version": "3.3.1"
},
{
"name": "selectolax",
"version": "0.2.10"
},
{
"name": "slumber",
"version": "0.7.1"
}
]
"""
cmd = [
"python",
"-m",
"pip",
"list",
"--pre",
"--local",
"--format",
"json",
]
code, stdout, _ = self.run(*cmd)
if code == 0 and stdout:
return self._safe_json_loads(stdout, [])
return []
def _get_operating_system(self):
"""
Get the current operating system.
The output of ``lsb_release --description`` is in the form of::
Description: Ubuntu 20.04.3 LTS
"""
code, stdout, _ = self.run("lsb_release", "--description")
stdout = stdout.strip()
if code == 0 and stdout:
parts = stdout.split("\t")
if len(parts) == 2:
return parts[1]
return ""
def _get_apt_packages(self):
"""
Get the list of installed apt packages (global and from the user).
The current source of user installed packages is the config file,
but we have only the name, so we take the version from the list of all
installed packages.
"""
all_apt_packages = self._get_all_apt_packages()
all_apt_packages_dict = {
package["name"]: package["version"] for package in all_apt_packages
}
user_apt_packages = self._get_user_apt_packages()
for package in user_apt_packages:
package["version"] = all_apt_packages_dict.get(package["name"], "")
return user_apt_packages, all_apt_packages
def _get_all_apt_packages(self):
"""
Get all installed apt packages and their versions.
The output of ``dpkg-query --show`` is the form of::
adduser 3.116ubuntu1
apt 1.6.14
base-files 10.1ubuntu2.11
base-passwd 3.5.44
bash 4.4.18-2ubuntu1.2
bsdutils 1:2.31.1-0.4ubuntu3.7
bzip2 1.0.6-8.1ubuntu0.2
coreutils 8.28-1ubuntu1
dash 0.5.8-2.10
debconf 1.5.66ubuntu1
debianutils 4.8.4
diffutils 1:3.6-1
dpkg 1.19.0.5ubuntu2.3
e2fsprogs 1.44.1-1ubuntu1.3
fdisk 2.31.1-0.4ubuntu3.7
findutils 4.6.0+git+20170828-2
gcc-8-base 8.4.0-1ubuntu1~18.04
gpgv 2.2.4-1ubuntu1.4
grep 3.1-2build1
gzip 1.6-5ubuntu1.2
hostname 3.20
"""
code, stdout, _ = self.run(
"dpkg-query", "--showformat", "${package} ${version}\\n", "--show"
)
stdout = stdout.strip()
packages = []
if code != 0 or not stdout:
return packages
for line in stdout.split("\n"):
parts = line.split()
if len(parts) == 2:
package, version = parts
packages.append(
{
"name": package.lower(),
"version": version,
}
)
return packages
def _get_user_apt_packages(self):
return [
{"name": package.lower(), "version": ""}
for package in self.config.build.apt_packages
]
def _get_python_version(self):
"""
Get the python version currently used.
The output of ``python --version`` is in the form of::
Python 3.8.12
"""
code, stdout, _ = self.run("python", "--version")
stdout = stdout.strip()
if code == 0 and stdout:
parts = stdout.split()
if len(parts) == 2:
return parts[1]
return ""
|
67ee592f1515c857734aaa7ba9529834083ed30d
|
83b8b30ebb633eecd29ca0a7a20cc43a293c9333
|
/tests/micropython/heapalloc_exc_compressed_emg_exc.py
|
86ade07862a1c52e3bd1f0fb30d046fa555b4230
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
adafruit/circuitpython
|
430ec895149d1eb814b505db39b4977a35ee88a7
|
506dca71b0cbb7af749bb51f86b01021db5483b3
|
refs/heads/main
| 2023-08-21T16:30:46.781068
| 2023-08-20T00:39:44
| 2023-08-20T00:39:44
| 66,166,069
| 3,806
| 1,560
|
MIT
| 2023-09-14T19:23:51
| 2016-08-20T20:10:40
|
C
|
UTF-8
|
Python
| false
| false
| 753
|
py
|
heapalloc_exc_compressed_emg_exc.py
|
import micropython
# Does the full test from heapalloc_exc_compressed.py but while the heap is
# locked (this can only work when the emergency exception buf is enabled).
# Some ports need to allocate heap for the emgergency exception buffer.
try:
micropython.alloc_emergency_exception_buf(256)
except AttributeError:
pass
a = set()
def test():
micropython.heap_lock()
try:
name()
except NameError as e:
print(type(e).__name__, e)
try:
a.pop()
except KeyError as e:
print(type(e).__name__, e)
try:
name()
except NameError as e:
print(e.args[0])
try:
a.pop()
except KeyError as e:
print(e.args[0])
micropython.heap_unlock()
test()
|
1ced929d9717f6c27bb8dee06978b6011c332de3
|
05169e203974411667ab947298a74575b8a179e0
|
/packages/jet_bridge_base/jet_bridge_base/views/api.py
|
0947a7b078cc60a0178adce86898d9d092888a95
|
[
"MIT"
] |
permissive
|
jet-admin/jet-bridge
|
f6b563e1801985063483ddb02e9e1c3301dc0612
|
c53d30fb308eed5822083eaf71f641c4098610cc
|
refs/heads/master
| 2023-09-01T14:31:42.261427
| 2023-08-24T13:54:34
| 2023-08-24T13:54:34
| 163,167,532
| 1,564
| 166
|
MIT
| 2023-03-18T03:20:04
| 2018-12-26T10:27:33
|
Python
|
UTF-8
|
Python
| false
| false
| 541
|
py
|
api.py
|
from jet_bridge_base.configuration import configuration
from jet_bridge_base.responses.json import JSONResponse
from jet_bridge_base.store import store
from jet_bridge_base.views.base.api import BaseAPIView
class ApiView(BaseAPIView):
def get(self, request, *args, **kwargs):
return JSONResponse({
'version': configuration.get_version(),
'type': configuration.get_type(),
'store_available': store.is_ok(),
'media_url_template': configuration.media_url('{}', request)
})
|
f99e43e1237e4c8a1e42978b550ac8e4b55de9fe
|
2c1dc7049d820d2b75811a6c0479bd34eb84ad87
|
/test/selenium/smoke/Login_and_Accounts.py
|
2b3aee420e8331763d5322f0ea7430afeff67dd4
|
[
"GPL-2.0-only",
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"GPL-1.0-or-later",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-unknown"
] |
permissive
|
apache/cloudstack
|
3775c9171022dfaf91d655bd166149e36f4caa41
|
819dd7b75c1b61ae444c45476f5834dbfb9094d0
|
refs/heads/main
| 2023-08-30T15:05:36.976909
| 2023-08-30T09:29:16
| 2023-08-30T09:29:16
| 9,759,448
| 1,468
| 1,232
|
Apache-2.0
| 2023-09-14T16:57:46
| 2013-04-29T22:27:12
|
Java
|
UTF-8
|
Python
| false
| false
| 8,127
|
py
|
Login_and_Accounts.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys, os
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/'+'../lib'))
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import unittest, time
import Global_Locators
import initialize
class login(unittest.TestCase):
def setUp(self):
MS_URL = initialize.getMSip()
self.driver = initialize.getOrCreateWebdriver()
self.base_url = "http://"+ MS_URL +":8080/" # Your management Server IP goes here
self.verificationErrors = []
def test_login(self):
# Here we will clear the test box for Username and Password and fill them with actual login data.
# After that we will click Login (Submit button)
driver = self.driver
driver.maximize_window()
driver.get(self.base_url + "client/")
driver.find_element_by_css_selector(Global_Locators.login_username_css).clear()
driver.find_element_by_css_selector(Global_Locators.login_username_css).send_keys("admin")
driver.find_element_by_css_selector(Global_Locators.login_password_css).clear()
driver.find_element_by_css_selector(Global_Locators.login_password_css).send_keys("password")
driver.find_element_by_css_selector(Global_Locators.login_submit_css).click()
time.sleep(5)
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def tearDown(self):
self.assertEqual([], self.verificationErrors)
################################################################################################################################################
class logout(unittest.TestCase):
def setUp(self):
self.driver = initialize.getOrCreateWebdriver()
self.driver.implicitly_wait(100)
self.verificationErrors = []
def test_logout(self):
# Here we will clear the test box for Username and Password and fill them with actual login data.
# After that we will click Login (Submit button)
driver = self.driver
driver.find_element_by_xpath("//div[@id='navigation']/ul/li").click()
driver.find_element_by_css_selector("div.icon.options").click()
driver.find_element_by_link_text("Logout").click()
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def tearDown(self):
self.assertEqual([], self.verificationErrors)
################################################################################################################################################
class login_test(unittest.TestCase):
def setUp(self):
self.driver = initialize.getOrCreateWebdriver()
self.verificationErrors = []
def test_logintest(self):
# Here we will clear the test box for Username and Password and fill them with actual login data.
# After that we will click Login (Submit button)
driver = self.driver
driver.find_element_by_css_selector(Global_Locators.login_username_css).clear()
driver.find_element_by_css_selector(Global_Locators.login_username_css).send_keys("test")
driver.find_element_by_css_selector(Global_Locators.login_password_css).clear()
driver.find_element_by_css_selector(Global_Locators.login_password_css).send_keys("password")
driver.find_element_by_css_selector(Global_Locators.login_submit_css).click()
time.sleep(5)
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def tearDown(self):
self.assertEqual([], self.verificationErrors)
################################################################################################################################################
class createAcc(unittest.TestCase):
def setUp(self):
self.driver = initialize.getOrCreateWebdriver()
self.verificationErrors = []
def test_createacc(self):
driver = self.driver
self.driver.implicitly_wait(100)
driver.find_element_by_xpath("//div[@id='navigation']/ul/li[8]/span[2]").click()
driver.find_element_by_xpath("//div[3]/span").click()
driver.find_element_by_id("label_username").clear()
driver.find_element_by_id("label_username").send_keys("test")
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys("password")
driver.find_element_by_id("label_confirm_password").clear()
driver.find_element_by_id("label_confirm_password").send_keys("password")
driver.find_element_by_id("label_email").clear()
driver.find_element_by_id("label_email").send_keys("test@citrix.com")
driver.find_element_by_id("label_first_name").clear()
driver.find_element_by_id("label_first_name").send_keys("test")
driver.find_element_by_id("label_last_name").clear()
driver.find_element_by_id("label_last_name").send_keys("test")
driver.find_element_by_id("label_domain").click()
Select(driver.find_element_by_id("label_type")).select_by_visible_text("Admin")
Select(driver.find_element_by_id("label_timezone")).select_by_visible_text("[UTC-08:00] Pacific Standard Time")
driver.find_element_by_xpath("//button[@type='button']").click()
# Go to Dashboard
driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click()
time.sleep(30)
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def tearDown(self):
self.assertEqual([], self.verificationErrors)
################################################################################################################################################
class tearAcc(unittest.TestCase):
def setUp(self):
self.driver = initialize.getOrCreateWebdriver()
self.verificationErrors = []
def test_tearacc(self):
driver = self.driver
driver.find_element_by_css_selector("li.navigation-item.accounts").click()
driver.find_element_by_css_selector("tr.odd > td.name.first").click()
driver.find_element_by_css_selector("a[alt=\"Delete account\"] > span.icon").click()
driver.find_element_by_xpath("(//button[@type='button'])[2]").click()
# Go to Dashboard
driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click()
time.sleep(30)
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
################################################################################################################################################
|
ff631c99521e48c015b5bd951e08be2b0f874c77
|
60cffd76b5d2f499ed845116c7af0794093af041
|
/tests/conftest.py
|
8630b0179e43147c411984377d3f4a1016ea45cb
|
[
"MIT"
] |
permissive
|
jrxFive/python-nomad
|
2ed5a1dc6c7520c6acb63c1876a57aef0d36925d
|
41df8692ed3fca69ff92cf5e806ad969fe0e0ffa
|
refs/heads/master
| 2023-08-28T17:53:12.371524
| 2023-06-02T21:59:17
| 2023-06-02T21:59:17
| 58,209,578
| 135
| 92
|
MIT
| 2023-08-02T09:59:40
| 2016-05-06T13:34:56
|
Python
|
UTF-8
|
Python
| false
| false
| 420
|
py
|
conftest.py
|
import nomad
import pytest
import tests.common as common
@pytest.fixture
def nomad_setup():
n = nomad.Nomad(host=common.IP, port=common.NOMAD_PORT, verify=False, token=common.NOMAD_TOKEN)
return n
@pytest.fixture
def nomad_setup_with_namespace():
n = nomad.Nomad(
host=common.IP, port=common.NOMAD_PORT, verify=False, token=common.NOMAD_TOKEN, namespace=common.NOMAD_NAMESPACE
)
return n
|
a5af35d598c71be4930f49a40bda389a5fcdf3ab
|
1180c0bfe29959d95f3c131e6e839950e528d4ee
|
/30/hobojoe1848/troubleshooting.py
|
b2b89afdece744a837465cf2ef97d6844aab8c4a
|
[] |
no_license
|
pybites/challenges
|
e3e461accd8e7f890aee8007ba5070086ef983fc
|
02b77652d0901e6e06cb9b1e7cb3e59c675445c2
|
refs/heads/community
| 2023-08-20T18:19:02.982214
| 2022-11-17T09:23:31
| 2022-11-17T09:23:31
| 78,264,928
| 764
| 3,115
| null | 2023-07-21T05:58:19
| 2017-01-07T07:17:50
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,337
|
py
|
troubleshooting.py
|
#!python3
#An app to provide random troubleshooting steps. Probably right half the time!
import time
import random
STEPS_LIST = ('SHUFFLE THE DIMMS',
'POWER DRAIN THE HOST',
'STRESS TEST THE HOST',
'RUN CPU AND MEMORY TESTS SEPARATELY',
'RUN MEMTEST',
'RTFM',
'RETURN TO MINIMUM CONFIG',
'REPLACE EVERY DIMM, EVERY CPU AND THE MOTHERBOARD',
'CALL AN EXORCIST')
def intro_message():
print('Team Awesome proudly presents...\n')
time.sleep(2)
print('THE TROUBLESHOOTING WHEEL OF MISFORTUNE'.center(80, '*') + '\n')
time.sleep(2)
print('Having trouble fixing a host?\n')
time.sleep(2)
print("With just one fantabulous spin of THE TROUBLESHOOTING WHEEL OF MISFORTUNE (tm), we'll have you on your way!")
time.sleep(4)
def spin():
input('\nGive the wheel a spin! (Hit Enter)')
print('Spinning...')
time.sleep(2)
print('\n' + STEPS_LIST[random.randint(0, len(STEPS_LIST) -1)])
print('\nNow get out there and fix that mofo!\n')
def main():
intro_message()
while True:
spin()
answer = input('Want another spin? (N to quit) ')
if answer.upper() == 'N':
break
if __name__ == "__main__":
main()
|
a2ce1397e356dcbf5b6c4fb079513de4e0f494c7
|
c0bfd93cd7f26a271268e504959256f1e02c6806
|
/components/protocomm/python/session_pb2.py
|
a30e794c79627360ac39e70df4b508c25ef40f1a
|
[
"Apache-2.0"
] |
permissive
|
espressif/ESP8266_RTOS_SDK
|
606f396e92d2675d9854f0fabd88587fbbbaf267
|
af0cdc36fa2600033d0a09301c754008cf1503c1
|
refs/heads/master
| 2023-08-24T22:40:15.373553
| 2023-05-06T02:04:24
| 2023-05-06T02:04:24
| 27,584,181
| 3,163
| 1,749
|
Apache-2.0
| 2023-08-09T10:48:13
| 2014-12-05T09:27:12
|
C
|
UTF-8
|
Python
| false
| true
| 4,393
|
py
|
session_pb2.py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: session.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import sec0_pb2 as sec0__pb2
import sec1_pb2 as sec1__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='session.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\rsession.proto\x1a\nsec0.proto\x1a\nsec1.proto\"v\n\x0bSessionData\x12\"\n\x07sec_ver\x18\x02 \x01(\x0e\x32\x11.SecSchemeVersion\x12\x1c\n\x04sec0\x18\n \x01(\x0b\x32\x0c.Sec0PayloadH\x00\x12\x1c\n\x04sec1\x18\x0b \x01(\x0b\x32\x0c.Sec1PayloadH\x00\x42\x07\n\x05proto*2\n\x10SecSchemeVersion\x12\x0e\n\nSecScheme0\x10\x00\x12\x0e\n\nSecScheme1\x10\x01\x62\x06proto3')
,
dependencies=[sec0__pb2.DESCRIPTOR,sec1__pb2.DESCRIPTOR,])
_SECSCHEMEVERSION = _descriptor.EnumDescriptor(
name='SecSchemeVersion',
full_name='SecSchemeVersion',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SecScheme0', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SecScheme1', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=161,
serialized_end=211,
)
_sym_db.RegisterEnumDescriptor(_SECSCHEMEVERSION)
SecSchemeVersion = enum_type_wrapper.EnumTypeWrapper(_SECSCHEMEVERSION)
SecScheme0 = 0
SecScheme1 = 1
_SESSIONDATA = _descriptor.Descriptor(
name='SessionData',
full_name='SessionData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sec_ver', full_name='SessionData.sec_ver', index=0,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sec0', full_name='SessionData.sec0', index=1,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sec1', full_name='SessionData.sec1', index=2,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='proto', full_name='SessionData.proto',
index=0, containing_type=None, fields=[]),
],
serialized_start=41,
serialized_end=159,
)
_SESSIONDATA.fields_by_name['sec_ver'].enum_type = _SECSCHEMEVERSION
_SESSIONDATA.fields_by_name['sec0'].message_type = sec0__pb2._SEC0PAYLOAD
_SESSIONDATA.fields_by_name['sec1'].message_type = sec1__pb2._SEC1PAYLOAD
_SESSIONDATA.oneofs_by_name['proto'].fields.append(
_SESSIONDATA.fields_by_name['sec0'])
_SESSIONDATA.fields_by_name['sec0'].containing_oneof = _SESSIONDATA.oneofs_by_name['proto']
_SESSIONDATA.oneofs_by_name['proto'].fields.append(
_SESSIONDATA.fields_by_name['sec1'])
_SESSIONDATA.fields_by_name['sec1'].containing_oneof = _SESSIONDATA.oneofs_by_name['proto']
DESCRIPTOR.message_types_by_name['SessionData'] = _SESSIONDATA
DESCRIPTOR.enum_types_by_name['SecSchemeVersion'] = _SECSCHEMEVERSION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SessionData = _reflection.GeneratedProtocolMessageType('SessionData', (_message.Message,), dict(
DESCRIPTOR = _SESSIONDATA,
__module__ = 'session_pb2'
# @@protoc_insertion_point(class_scope:SessionData)
))
_sym_db.RegisterMessage(SessionData)
# @@protoc_insertion_point(module_scope)
|
ba08e6ca84f4d0ace9908dbfa91c5ca9976c5af0
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/7_graph/连通分量/无向图的双连通分量/1568. 使陆地分离的最少天数-割点.py
|
b23a151c93a5c06c98fd30786c2e3e4715d602ef
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,584
|
py
|
1568. 使陆地分离的最少天数-割点.py
|
# 一共3种情况,0,1,2, 并查集求岛屿数量如果大于2 返回0, 如果岛屿数量为1, tarjan算法求割点,
# 如果找到割点返回 1,没有割点则返回,2
from copy import deepcopy
from typing import List
def findCutVertices(n: int, graph: List[List[int]]) -> List[bool]:
"""Tarjan 算法求无向图的割点
Args:
n (int): 顶点数
graph (List[List[int]]): 邻接表
Returns:
List[bool]: 每个点是否是割点
"""
def dfs(cur: int, pre: int) -> int:
nonlocal dfsId
dfsId += 1
dfsOrder[cur] = dfsId
curLow = dfsId
childCount = 0
for next in graph[cur]:
if dfsOrder[next] == 0:
childCount += 1
nextLow = dfs(next, cur)
if nextLow >= dfsOrder[cur]:
isCut[cur] = True
if nextLow < curLow:
curLow = nextLow
elif next != pre and dfsOrder[next] < curLow:
curLow = dfsOrder[next]
if pre == -1 and childCount == 1: # 特判:只有一个儿子的树根,删除后并没有增加连通分量的个数,这种情况下不是割顶
isCut[cur] = False
return curLow
isCut = [False] * n
dfsOrder = [0] * n # 值从 1 开始
dfsId = 0
for i, order in enumerate(dfsOrder):
if order == 0:
dfs(i, -1)
return isCut
class Solution:
def minDays(self, grid: List[List[int]]) -> int:
# 特判
oneCount = sum(row.count(1) for row in grid)
if oneCount == 0:
return 0
elif oneCount == 1:
return 1
# 连通分量不为1的情况
gridCopy = deepcopy(grid)
part = self.floodFill(gridCopy)
if part != 1:
return 0
# tarjan寻找割点
ROW, COL = len(grid), len(grid[0])
adjList = [[] for _ in range(ROW * COL)]
for i in range(ROW):
for j in range(COL):
if grid[i][j] == 1:
cur = i * COL + j
if i - 1 >= 0 and grid[i - 1][j] == 1:
next = (i - 1) * COL + j
adjList[cur].append(next)
adjList[next].append(cur)
if j + 1 < COL and grid[i][j + 1] == 1:
next = i * COL + j + 1
adjList[cur].append(next)
adjList[next].append(cur)
isCut = findCutVertices(ROW * COL, adjList)
if any(isCut):
return 1
else:
return 2
def floodFill(self, grid: List[List[int]]) -> int:
def dfs(r: int, c: int) -> None:
if grid[r][c] == 0:
return
grid[r][c] = 0
for dr, dc in [(1, 0), (-1, 0), (0, 1), (0, -1)]:
nr, nc = r + dr, c + dc
if 0 <= nr < row and 0 <= nc < col and grid[nr][nc] == 1:
dfs(nr, nc)
res = 0
row, col = len(grid), len(grid[0])
for r in range(row):
for c in range(col):
if grid[r][c] == 1:
res += 1
dfs(r, c)
return res
assert Solution().minDays([[0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]]) == 2
assert (
Solution().minDays(grid=[[0, 1, 0, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 0]])
== 1
)
|
06a46686405c0979d52e6930ce679ea03b43034e
|
b097bc2fba0629d592d76d9c7649d9a62789afb6
|
/register.py
|
832dfdedbff5e101b557e6bf310618e3fdac402e
|
[
"MIT"
] |
permissive
|
cwacek/python-jsonschema-objects
|
d91bb4d6c69fffcc24f45f33e05174a627293449
|
2a5aeb29a953cff3d4fd82f19c0d46342af1da36
|
refs/heads/master
| 2023-09-01T11:49:43.767991
| 2023-08-17T01:37:45
| 2023-08-17T01:37:45
| 18,216,839
| 356
| 113
|
MIT
| 2023-09-14T01:56:32
| 2014-03-28T15:29:21
|
Python
|
UTF-8
|
Python
| false
| false
| 298
|
py
|
register.py
|
import pandoc
import os
def markdown_to_rst(src):
pandoc.core.PANDOC_PATH = "/usr/local/bin/pandoc"
if not os.path.exists(pandoc.core.PANDOC_PATH):
raise Exception("Pandoc not available")
doc = pandoc.Document()
doc.markdown = open("README.md").read()
return doc.rst
|
44ca9aebb9ac5c0f7828c066794a758508ec7abc
|
d4412fbe37540e2c4cbe59ed6503d3661ccb7d9c
|
/colossalai/tensor/dist_spec_mgr.py
|
c968050de49d45d32d4c701f75183274ae743194
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] |
permissive
|
hpcaitech/ColossalAI
|
a082ed08a3807b53c49d1f86835b9808590d9042
|
c7b60f75470f067d1342705708810a660eabd684
|
refs/heads/main
| 2023-09-01T04:13:13.834565
| 2023-08-30T15:07:21
| 2023-08-30T15:07:21
| 422,274,596
| 32,044
| 4,084
|
Apache-2.0
| 2023-09-14T15:19:54
| 2021-10-28T16:19:44
|
Python
|
UTF-8
|
Python
| false
| false
| 8,688
|
py
|
dist_spec_mgr.py
|
from contextlib import contextmanager
import torch
import torch.distributed as dist
# from colossalai.nn.layer.utils import divide
from numpy import prod
from colossalai.tensor.distspec import DistPlacementPattern, _DistSpec
from colossalai.tensor.process_group import ProcessGroup
# TODO(jiaruifang) circle import, move the divide to colossalai.commons.
# colossalai.tensor shall not import any submodule from colossal.nn
def divide(numerator, denominator):
"""Only allow exact division.
Args:
numerator (int): Numerator of the division.
denominator (int): Denominator of the division.
Returns:
int: the result of exact division.
"""
assert denominator != 0, 'denominator can not be zero'
assert numerator % denominator == 0, \
'{} is not divisible by {}'.format(numerator, denominator)
return numerator // denominator
class TransformDistSpec(torch.autograd.Function):
@staticmethod
def forward(ctx, tensor, old_dist_spec, dist_spec, pg, forward_trans_func, backward_trans_func):
ctx.old_dist_spec = old_dist_spec
ctx.dist_spec = dist_spec
ctx.backward_trans_func = backward_trans_func
ctx.pg = pg
return forward_trans_func(tensor, old_dist_spec, dist_spec, pg)
@staticmethod
def backward(ctx, grad_outputs):
return ctx.backward_trans_func(grad_outputs, ctx.dist_spec, ctx.old_dist_spec,
ctx.pg), None, None, None, None, None
class DistSpecManager:
_use_autograd_function: bool = True
@staticmethod
def _sanity_check(old_dist_spec: _DistSpec, dist_spec: _DistSpec) -> None:
pass
@staticmethod
def _shard_as(tensor: torch.Tensor, old_dist_spec: _DistSpec, dist_spec: _DistSpec,
pg: ProcessGroup) -> torch.Tensor:
"""_shard_as: shard the tensor w.r.t a distributed specification.
Assuming the tensor passed in is a global (replicated) tensor.
Args:
tensor (torch.Tensor): a global (replicated) tensor before shard
dist_spec (_DistSpec): the distributed spec. to be sharded as.
pg (ProcessGroup): the process group of the corresponding colotensor
Returns:
torch.Tensor: a torch tensor after sharded.
"""
assert old_dist_spec.placement.value == 'r', f"The old_dist_spec of DistSpecManager._shard_as must be REPLICATE!"
DistSpecManager._sanity_check(old_dist_spec, dist_spec)
chunk = tensor
idx = pg.tp_local_rank()
num_parts = prod(dist_spec.num_partitions)
for i, dim in enumerate(dist_spec.dims):
num_parts //= dist_spec.num_partitions[i]
chunk_size = divide(tensor.size(dim), dist_spec.num_partitions[i])
chunk = chunk.narrow(dim, idx // num_parts * chunk_size, chunk_size)
idx %= num_parts
return chunk.clone().detach().contiguous()
@staticmethod
def _gather(tensor: torch.Tensor, old_dist_spec: _DistSpec, pg: ProcessGroup) -> torch.Tensor:
"""_gather gather sharded tensors to a replicated one.
Args:
tensor (torch.Tensor): a shared torch tensor
old_dist_spec (_DistSpec): the distributed spec. of the tensor.
Returns:
torch.Tensor: a replicated tensor.
"""
assert old_dist_spec.placement.value == 's', f"The old_dist_spec of DistSpecManager._gather must be SHARD!"
is_cpu_tensor = False
if tensor.device.type == 'cpu':
# pytorch lower than 1.11 dose not support gather a cpu tensor.
# Therefore, we transfer tensor to GPU before gather.
saved_dev = tensor.device
tensor.data = tensor.data.cuda()
is_cpu_tensor = True
buffer = [torch.empty_like(tensor) for _ in range(pg.tp_world_size())]
assert tensor.device.type == 'cuda'
dist.all_gather(buffer, tensor, group=pg.tp_process_group())
for i in range(len(old_dist_spec.dims) - 1, -1, -1):
new_buffer = []
dim = old_dist_spec.dims[i]
num_parts = old_dist_spec.num_partitions[i]
for start in range(0, len(buffer), num_parts):
new_buffer.append(torch.cat(buffer[start:start + num_parts], dim))
buffer = new_buffer
assert len(buffer) == 1
if is_cpu_tensor:
buffer[0].data = buffer[0].data.to(saved_dev)
return buffer[0]
@staticmethod
def _all_to_all(tensor: torch.Tensor, old_dist_spec: _DistSpec, dist_spec: _DistSpec,
pg: ProcessGroup) -> torch.Tensor:
world_size = pg.tp_world_size()
if world_size == 1:
return tensor
assert tensor.device.type == "cuda", \
"Currently, only CUDA Tensor with NCCL backend is supported for the requested AlltoAll " \
f"collective function, however, we got {tensor.device.type} device"
gather_dim = old_dist_spec.dims[0]
scatter_dim = dist_spec.dims[0]
shapes = list(tensor.shape)
scattered_dim_size = shapes[scatter_dim] // world_size
gathered_dim_size = shapes[gather_dim] * world_size
shapes[scatter_dim] = scattered_dim_size
scatter_list = [t.contiguous() for t in torch.tensor_split(tensor, world_size, scatter_dim)]
gather_list = [torch.empty(*shapes, dtype=tensor.dtype, device=tensor.device) for _ in range(world_size)]
dist.all_to_all(gather_list, scatter_list, group=pg.tp_process_group())
output_ = torch.cat(gather_list, dim=gather_dim).contiguous()
assert output_.shape[scatter_dim] == scattered_dim_size and output_.shape[gather_dim] == gathered_dim_size
return output_
@staticmethod
def _r2r(tensor: torch.Tensor, old_dist_spec: _DistSpec, dist_spec: _DistSpec, pg: ProcessGroup) -> torch.Tensor:
DistSpecManager._sanity_check(old_dist_spec, dist_spec)
return tensor
@staticmethod
def _r2s(tensor: torch.Tensor, old_dist_spec: _DistSpec, dist_spec: _DistSpec, pg: ProcessGroup) -> torch.Tensor:
DistSpecManager._sanity_check(old_dist_spec, dist_spec)
return DistSpecManager._shard_as(tensor, old_dist_spec, dist_spec, pg)
@staticmethod
def _s2r(tensor: torch.Tensor, old_dist_spec: _DistSpec, dist_spec: _DistSpec, pg: ProcessGroup) -> torch.Tensor:
DistSpecManager._sanity_check(old_dist_spec, dist_spec)
return DistSpecManager._gather(tensor, old_dist_spec, pg)
@staticmethod
def _s2s(tensor: torch.Tensor, old_dist_spec: _DistSpec, dist_spec: _DistSpec, pg: ProcessGroup) -> torch.Tensor:
DistSpecManager._sanity_check(old_dist_spec, dist_spec)
if old_dist_spec == dist_spec:
return tensor
if len(old_dist_spec.dims) == 1 and len(dist_spec.dims) == 1:
# use all-to-all to save memory
return DistSpecManager._all_to_all(tensor, old_dist_spec, dist_spec, pg)
tensor = DistSpecManager._gather(tensor, old_dist_spec, pg)
return DistSpecManager._shard_as(tensor, old_dist_spec, dist_spec, pg)
@staticmethod
def handle_trans_spec(tensor: torch.Tensor, old_dist_spec: _DistSpec, dist_spec: _DistSpec,
pg: ProcessGroup) -> torch.Tensor:
assert isinstance(old_dist_spec, _DistSpec), f"{type(old_dist_spec)} should be _DistSpec"
assert isinstance(dist_spec, _DistSpec), f"{type(dist_spec)} should be _DistSpec"
trans_func_key = (old_dist_spec.placement, dist_spec.placement)
trans_funcs = {
(DistPlacementPattern.REPLICATE, DistPlacementPattern.REPLICATE): DistSpecManager._r2r,
(DistPlacementPattern.REPLICATE, DistPlacementPattern.SHARD): DistSpecManager._r2s,
(DistPlacementPattern.SHARD, DistPlacementPattern.REPLICATE): DistSpecManager._s2r,
(DistPlacementPattern.SHARD, DistPlacementPattern.SHARD): DistSpecManager._s2s
}
forward_trans_handle = trans_funcs[trans_func_key]
if not DistSpecManager._use_autograd_function:
return forward_trans_handle(tensor, old_dist_spec, dist_spec, pg)
backward_trans_handle = trans_funcs[(dist_spec.placement, old_dist_spec.placement)]
return TransformDistSpec.apply(tensor, old_dist_spec, dist_spec, pg, forward_trans_handle,
backward_trans_handle)
@staticmethod
@contextmanager
def no_grad():
try:
DistSpecManager._use_autograd_function = False
yield
finally:
DistSpecManager._use_autograd_function = True
|
0e8ff39e611de7474402f4e6b810e79eb010cc3c
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/Akamai_SIEM/Integrations/Akamai_SIEM/Akamai_SIEM.py
|
350e69cda15c180896cad0dc7a47c14aa9d2c9a9
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 17,826
|
py
|
Akamai_SIEM.py
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
""" IMPORTS """
# Std imports
from datetime import datetime, timezone
from base64 import b64decode
# 3-rd party imports
from typing import Dict, Tuple, Union, Optional, List, Any, Sequence
import urllib.parse
import urllib3
from akamai.edgegrid import EdgeGridAuth
# Local imports
from CommonServerUserPython import *
"""GLOBALS/PARAMS
Attributes:
INTEGRATION_NAME:
Name of the integration as shown in the integration UI, for example: Microsoft Graph User.
INTEGRATION_COMMAND_NAME:
Command names should be written in all lower-case letters,
and each word separated with a hyphen, for example: msgraph-user.
INTEGRATION_CONTEXT_NAME:
Context output names should be written in camel case, for example: MSGraphUser.
"""
INTEGRATION_NAME = 'Akamai SIEM'
INTEGRATION_COMMAND_NAME = 'akamai-siem'
INTEGRATION_CONTEXT_NAME = 'Akamai'
# Disable insecure warnings
urllib3.disable_warnings()
class Client(BaseClient):
def get_events(self, config_ids: str, offset: Optional[str] = '', limit: Optional[Union[str, int]] = None,
from_epoch: Optional[str] = '', to_epoch: Optional[str] = '') \
-> Tuple[List[Any], Any]:
"""
Get security events from Akamai WAF service by - https://developer.akamai.com/api/cloud_security/siem/v1.html,
Pay attention response as text of multiple json objects
Allowed query parameters combinations:
1. offset - Since a prior request.
2. offset, limit - Since a prior request, limited.
3. from - Since a point in time.
4. from, limit - Since a point in time, limited.
5. from, to - Over a range of time.
6. from, to, limit - Over a range of time, limited.
Args:
config_ids: Unique identifier for each security configuration. To report on more than one configuration, separate
integer identifiers with semicolons, e.g. 12892;29182;82912.
offset: This token denotes the last message. If specified, this operation fetches only security events that have
occurred from offset. This is a required parameter for offset mode and you can’t use it in time-based
requests.
limit: Defines the approximate maximum number of security events each fetch returns, in both offset and
time-based modes. The default limit is 10000. Expect requests to return a slightly higher number of
security events than you set in the limit parameter, because data is stored in different buckets.
from_epoch: The start of a specified time range, expressed in Unix epoch seconds.
This is a required parameter to get time-based results for a set period, and you can’t use it in
offset mode.
to_epoch: The end of a specified time range, expressed in Unix epoch seconds. You can’t use this parameter in
offset mode and it’s an optional parameter in time-based mode. If omitted, the value defaults to the
current time.
Returns:
Multiple json objects as list of dictionaries, offset for next pagnination
"""
params = {
'offset': offset,
'limit': limit,
'to': to_epoch,
'from': from_epoch,
}
raw_response: str = self._http_request(method='GET',
url_suffix=f'/{config_ids}',
params=assign_params(**params),
resp_type='text')
events: List = []
if '{ "total": 0' not in raw_response:
events = [json.loads(event) for event in raw_response.split('\n')[:-2]]
new_offset = str(max([int(event.get('httpMessage', {}).get('start')) for event in events]))
else:
new_offset = str(from_epoch)
return events, new_offset
'''HELPER FUNCIONS'''
def date_format_converter(from_format: str, date_before: str, readable_format: str = '%Y-%m-%dT%H:%M:%SZ%Z') -> str:
"""
Convert datatime object from epoch time to follow format %Y-%m-%dT%H:%M:%SZ
Args:
from_format: format to convert from.
date_before: date before conversion epoch time or %Y-%m-%dT%H:%M:%SZ format
readable_format: readable format by default %Y-%m-%dT%H:%M:%SZ
Examples:
>>> date_format_converter(from_format='epoch', date_before='1576570098')
'2019-12-17T08:08:18Z'
>>> date_format_converter(from_format='epoch', date_before='1576570098', readable_format='%Y-%m-%d %H:%M:%S')
'2019-12-17 08:08:18'
>>> date_format_converter(from_format='readable', date_before='2019-12-17T08:08:18Z')
'1576570098'
Returns:
Converted date as Datetime object or string object
"""
converted_date: Union[str, int] = ''
if from_format == 'epoch':
converted_date = datetime.utcfromtimestamp(int(date_before)).strftime(readable_format)
elif from_format == 'readable':
date_before += 'UTC'
converted_date = int(datetime.strptime(date_before, readable_format).replace(tzinfo=timezone.utc).timestamp())
return str(converted_date)
def decode_message(msg: str) -> Sequence[Optional[str]]:
"""
Follow these steps for data members that appear within the event’s attackData section:
1. If the member name is prefixed rule, URL-decode the value.
2. The result is a series of base64-encoded chunks delimited with semicolons.
3. Split the value at semicolon (;) characters.
4. base64-decode each chunk of split data.
The example above would yield a sequence of alert, alert, and deny.
Args:
msg: Messeage to decode
Returns:
Decoded message as array
Examples:
>>> decode_message(msg='ZGVueQ%3d%3d')
['deny']
>>> decode_message(msg='Q3VzdG9tX1JlZ0VYX1J1bGU%3d%3bTm8gQWNjZXB0IEhlYWRlciBBTkQgTm8gVXNlciBBZ2VudCBIZWFkZXI%3d')
['Custom_RegEX_Rule', 'No Accept Header AND No User Agent Header']
"""
readable_msg = []
translated_msg = urllib.parse.unquote(msg).split(';')
for word in translated_msg:
word = b64decode(word.encode('utf8')).decode('utf8')
if word:
readable_msg.append(word)
return readable_msg
def events_to_ec(raw_response: List) -> Tuple[List, List, List]:
"""
Convert raw response response to ec
Args:
raw_response: events as list from raw response
Returns:
events as defined entry context and events for human readable
"""
events_ec: List[Dict] = []
ip_ec: List[Dict] = []
events_human_readable: List[Dict] = []
for event in raw_response:
events_ec.append(
{
"AttackData": assign_params(
ConfigID=event.get('attackData', {}).get('configId'),
PolicyID=event.get('attackData', {}).get('policyId'),
ClientIP=event.get('attackData', {}).get('clientIP'),
Rules=decode_message(event.get('attackData', {}).get('rules')),
RuleMessages=decode_message(event.get('attackData', {}).get('ruleMessages')),
RuleTags=decode_message(event.get('attackData', {}).get('ruleTags')),
RuleData=decode_message(event.get('attackData', {}).get('ruleData')),
RuleSelectors=decode_message(event.get('attackData', {}).get('ruleSelectors')),
RuleActions=decode_message(event.get('attackData', {}).get('ruleActions'))
),
"HttpMessage": assign_params(
RequestId=event.get('httpMessage', {}).get('requestId'),
Start=event.get('httpMessage', {}).get('start'),
Protocol=event.get('httpMessage', {}).get('protocol'),
Method=event.get('httpMessage', {}).get('method'),
Host=event.get('httpMessage', {}).get('host'),
Port=event.get('httpMessage', {}).get('port'),
Path=event.get('httpMessage', {}).get('path'),
RequestHeaders=event.get('httpMessage', {}).get('requestHeaders'),
Status=event.get('httpMessage', {}).get('status'),
Bytes=event.get('httpMessage', {}).get('bytes'),
ResponseHeaders=event.get('httpMessage', {}).get('responseHeaders')
),
"Geo": assign_params(
Continent=event.get('geo', {}).get('continent'),
Country=event.get('geo', {}).get('country'),
City=event.get('geo', {}).get('city'),
RegionCode=event.get('geo', {}).get('regionCode'),
Asn=event.get('geo', {}).get('asn')
)
}
)
ip_ec.append(assign_params(
Address=event.get('attackData', {}).get('clientIP'),
ASN=event.get('geo', {}).get('asn'),
Geo={
"Country": event.get('geo', {}).get('country')
}
))
events_human_readable.append(assign_params(**{
'Attacking IP': event.get('attackData', {}).get('clientIP'),
"Config ID": event.get('attackData', {}).get('configId'),
"Policy ID": event.get('attackData', {}).get('policyId'),
"Rules": decode_message(event.get('attackData', {}).get('rules')),
"Rule messages": decode_message(event.get('attackData', {}).get('ruleMessages')),
"Rule actions": decode_message(event.get('attackData', {}).get('ruleActions')),
'Date occured': date_format_converter(from_format='epoch',
date_before=event.get('httpMessage', {}).get('start')),
"Location": {
'Country': event.get('geo', {}).get('country'),
'City': event.get('geo', {}).get('city')
}
}))
return events_ec, ip_ec, events_human_readable
''' COMMANDS '''
@logger
def test_module_command(client: Client) -> Tuple[None, None, str]:
"""Performs a basic GET request to check if the API is reachable and authentication is successful.
Args:
client: Client object with request
*_: Usually demisto.args()
Returns:
'ok' if test successful.
Raises:
DemistoException: If test failed.
"""
# Test on the following date Monday, 6 March 2017 16:07:22
events, offset = client.get_events(config_ids=demisto.params().get('configIds'),
from_epoch='1488816442',
limit='1')
if isinstance(events, list):
return None, None, 'ok'
raise DemistoException(f'Test module failed, {events}')
@logger
def fetch_incidents_command(
client: Client,
fetch_time: str,
fetch_limit: Union[str, int],
config_ids: str,
last_run: Optional[str] = None) -> Tuple[List[Dict[str, Any]], Dict]:
"""Uses to fetch incidents into Demisto
Documentation: https://github.com/demisto/content/tree/master/docs/fetching_incidents
Args:
client: Client object with request
fetch_time: From when to fetch if first time, e.g. `3 days`
fetch_limit: limit of incidents in a fetch
config_ids: security configuration ids to fetch, e.g. `51000;56080`
last_run: Last fetch object occurs.
Returns:
incidents, new last_run
"""
raw_response: Optional[List] = []
if not last_run:
last_run, _ = parse_date_range(date_range=fetch_time, date_format='%s')
raw_response, offset = client.get_events(config_ids=config_ids, from_epoch=last_run, limit=fetch_limit)
incidents = []
if raw_response:
for event in raw_response:
incidents.append({
'name': f"{INTEGRATION_NAME}: {event.get('attackData').get('configId')}",
'occurred': date_format_converter(from_format='epoch',
date_before=event.get('httpMessage', {}).get('start')),
'rawJSON': json.dumps(event)
})
return incidents, {'lastRun': offset}
def get_events_command(client: Client, config_ids: str, offset: Optional[str] = None, limit: Optional[str] = None,
from_epoch: Optional[str] = None, to_epoch: Optional[str] = None, time_stamp: Optional[str] = None) \
-> Tuple[object, dict, Union[List, Dict]]:
"""
Get security events from Akamai WAF service
Allowed query parameters combinations:
1. offset - Since a prior request.
2. offset, limit - Since a prior request, limited.
3. from - Since a point in time.
4. from, limit - Since a point in time, limited.
5. from, to - Over a range of time.
6. from, to, limit - Over a range of time, limited.
Args:
client: Client object
config_ids: Unique identifier for each security configuration. To report on more than one configuration, separate
integer identifiers with semicolons, e.g. 12892;29182;82912.
offset: This token denotes the last message. If specified, this operation fetches only security events that have
occurred from offset. This is a required parameter for offset mode and you can’t use it in time-based requests.
limit: Defines the approximate maximum number of security events each fetch returns, in both offset and
time-based modes. The default limit is 10000. Expect requests to return a slightly higher number of
security events than you set in the limit parameter, because data is stored in different buckets.
from_epoch: The start of a specified time range, expressed in Unix epoch seconds.
This is a required parameter to get time-based results for a set time_stamp, and you can’t use it in
offset mode.
to_epoch: The end of a specified time range, expressed in Unix epoch seconds. You can’t use this parameter in
offset mode and it’s an optional parameter in time-based mode. If omitted, the value defaults to the
current time.
time_stamp: timestamp (<number> <time unit>, e.g., 12 hours, 7 days of events
Returns:
Human readable, entry context, raw response
"""
if time_stamp:
from_epoch, to_epoch = parse_date_range(date_range=time_stamp,
date_format="%s")
raw_response, offset = client.get_events(config_ids=config_ids,
offset=offset,
limit=limit,
from_epoch=from_epoch,
to_epoch=to_epoch)
if raw_response:
events_ec, ip_ec, events_human_readable = events_to_ec(raw_response)
entry_context = {
"Akamai.SIEM(val.HttpMessage.RequestId && val.HttpMessage.RequestId == obj.HttpMessage.RequestId)": events_ec,
outputPaths.get('ip'): ip_ec
}
title = f'{INTEGRATION_NAME} - Attacks data'
human_readable = tableToMarkdown(name=title,
t=events_human_readable,
removeNull=True)
return human_readable, entry_context, raw_response
else:
return f'{INTEGRATION_NAME} - Could not find any results for given query', {}, {}
''' COMMANDS MANAGER / SWITCH PANEL '''
def main():
params = demisto.params()
client = Client(
base_url=urljoin(params.get('host'), '/siem/v1/configs'),
verify=not params.get('insecure', False),
proxy=params.get('proxy'),
auth=EdgeGridAuth(
client_token=params.get('clienttoken_creds', {}).get('password') or params.get('clientToken'),
access_token=params.get('accesstoken_creds', {}).get('password') or params.get('accessToken'),
client_secret=params.get('clientsecret_creds', {}).get('password') or params.get('clientSecret'),
)
)
commands = {
"test-module": test_module_command,
f"{INTEGRATION_COMMAND_NAME}-get-events": get_events_command
}
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
if command == 'fetch-incidents':
incidents, new_last_run = fetch_incidents_command(client,
fetch_time=params.get('fetchTime'),
fetch_limit=params.get('fetchLimit'),
config_ids=params.get('configIds'),
last_run=demisto.getLastRun().get('lastRun'))
demisto.incidents(incidents)
demisto.setLastRun(new_last_run)
else:
human_readable, entry_context, raw_response = commands[command](client, **demisto.args())
return_outputs(human_readable, entry_context, raw_response)
except Exception as e:
err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]'
return_error(err_msg, error=e)
if __name__ == 'builtins':
main()
|
4ed601181531ce34aa8e8d35495e8f5ad7a83991
|
0e48483bd01170a8f4ca864de9b75bad50f55a94
|
/hazelcast/serialization/portable/classdef.py
|
04347803ca6a36b242b328ac194a39ae9e59b345
|
[
"Apache-2.0"
] |
permissive
|
hazelcast/hazelcast-python-client
|
ed01e55165961ecb148a498ae3dd36503b64e93e
|
1723cc040c328ebc0d5ab44396c2f55bad7d9075
|
refs/heads/master
| 2023-09-04T02:29:29.753280
| 2023-08-14T09:13:03
| 2023-08-14T09:13:03
| 47,321,016
| 112
| 73
|
Apache-2.0
| 2023-09-11T17:59:53
| 2015-12-03T09:14:23
|
Python
|
UTF-8
|
Python
| false
| false
| 26,041
|
py
|
classdef.py
|
import typing
from hazelcast.errors import HazelcastSerializationError
class FieldType:
PORTABLE = 0
BYTE = 1
BOOLEAN = 2
CHAR = 3
SHORT = 4
INT = 5
LONG = 6
FLOAT = 7
DOUBLE = 8
UTF = 9 # Defined for backward compatibility.
STRING = 9
PORTABLE_ARRAY = 10
BYTE_ARRAY = 11
BOOLEAN_ARRAY = 12
CHAR_ARRAY = 13
SHORT_ARRAY = 14
INT_ARRAY = 15
LONG_ARRAY = 16
FLOAT_ARRAY = 17
DOUBLE_ARRAY = 18
UTF_ARRAY = 19 # Defined for backward compatibility.
STRING_ARRAY = 19
DECIMAL = 20
DECIMAL_ARRAY = 21
TIME = 22
TIME_ARRAY = 23
DATE = 24
DATE_ARRAY = 25
TIMESTAMP = 26
TIMESTAMP_ARRAY = 27
TIMESTAMP_WITH_TIMEZONE = 28
TIMESTAMP_WITH_TIMEZONE_ARRAY = 29
class FieldDefinition:
def __init__(
self,
index: int,
field_name: str,
field_type: int,
version: int,
factory_id: int = 0,
class_id: int = 0,
):
self.index = index
self.field_name = field_name
self.field_type = field_type
self.version = version
self.factory_id = factory_id
self.class_id = class_id
def __eq__(self, other):
return (
isinstance(other, FieldDefinition)
and self.index == other.index
and self.field_name == other.field_name
and self.version == other.version
and self.factory_id == other.factory_id
and self.class_id == other.class_id
)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "FieldDefinition(ix=%s, name=%s, type=%s, version=%s, fid=%s, cid=%s)" % (
self.index,
self.field_name,
self.field_type,
self.version,
self.factory_id,
self.class_id,
)
class ClassDefinition:
def __init__(self, factory_id: int, class_id: int, version: int):
self.factory_id = factory_id
self.class_id = class_id
self.version = version
self.field_defs: typing.Dict[str, FieldDefinition] = {}
def add_field_def(self, field_def):
self.field_defs[field_def.field_name] = field_def
def get_field(
self, field_name_or_index: typing.Union[int, str]
) -> typing.Optional[FieldDefinition]:
if isinstance(field_name_or_index, int):
index = field_name_or_index
count = self.get_field_count()
if 0 <= index < count:
for field in self.field_defs.values():
if field.index == index:
return field
raise IndexError("Index is out of bound. Index: %s and size: %s" % (index, count))
else:
return self.field_defs.get(field_name_or_index, None)
def has_field(self, field_name: str) -> bool:
return field_name in self.field_defs
def get_field_names(self) -> typing.List[str]:
return list(self.field_defs.keys())
def get_field_type(self, field_name: str) -> int:
fd = self.get_field(field_name)
if fd:
return fd.field_type
raise ValueError("Unknown field: %s" % field_name)
def get_field_class_id(self, field_name: str) -> int:
fd = self.get_field(field_name)
if fd:
return fd.class_id
raise ValueError("Unknown field: %s" % field_name)
def get_field_count(self) -> int:
return len(self.field_defs)
def set_version_if_not_set(self, version):
if self.version < 0:
self.version = version
def __eq__(self, other):
return (
isinstance(other, ClassDefinition)
and self.factory_id == other.factory_id
and self.class_id == other.class_id
and self.version == other.version
and self.field_defs == other.field_defs
)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "fid:%s, cid:%s, v:%s, fields:%s" % (
self.factory_id,
self.class_id,
self.version,
self.field_defs,
)
def __hash__(self):
return hash((self.factory_id, self.class_id, self.version))
class ClassDefinitionBuilder:
"""Builder class to construct :class:`ClassDefinition` of
:class:`hazelcast.serialization.api.Portable` objects.
One must make sure that the order of the fields added to this
builder is consistent across all the usages of the Portable
object such as the write order of the fields of the
:func:`Portable.write_portable <hazelcast.serialization.api.Portable.write_portable>`
method.
"""
def __init__(self, factory_id: int, class_id: int, version: int = 0):
self.factory_id = factory_id
self.class_id = class_id
self.version = version
self._index = 0
self._done = False
self._field_defs: typing.List[FieldDefinition] = []
self._field_names: typing.Set[str] = set()
def add_portable_field(
self, field_name: str, class_def: ClassDefinition
) -> "ClassDefinitionBuilder":
"""Adds the field with the Portable type to the
class definition.
Args:
field_name: Name of the field to add.
class_def: Class definition of the nested Portable.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
if class_def.class_id is None or class_def.class_id == 0:
raise ValueError("Portable class id cannot be zero!")
self._add_field_by_type(
field_name,
FieldType.PORTABLE,
class_def.version,
class_def.factory_id,
class_def.class_id,
)
return self
def add_byte_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the byte type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.BYTE, self.version)
return self
def add_boolean_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the boolean type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.BOOLEAN, self.version)
return self
def add_char_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the char type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.CHAR, self.version)
return self
def add_short_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the short type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.SHORT, self.version)
return self
def add_int_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the int type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.INT, self.version)
return self
def add_long_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the long type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.LONG, self.version)
return self
def add_float_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the float type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.FLOAT, self.version)
return self
def add_double_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the double type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.DOUBLE, self.version)
return self
def add_string_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the string type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.STRING, self.version)
return self
def add_utf_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the string type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
.. deprecated:: 4.1
This method is deprecated and will be removed in the
next major version. Use :func:`add_string_field` instead.
"""
return self.add_string_field(field_name)
def add_decimal_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the decimal type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.DECIMAL, self.version)
return self
def add_time_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the time type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.TIME, self.version)
return self
def add_date_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the date type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.DATE, self.version)
return self
def add_timestamp_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the timestamp type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.TIMESTAMP, self.version)
return self
def add_timestamp_with_timezone_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the timestamp with timezone type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.TIMESTAMP_WITH_TIMEZONE, self.version)
return self
def add_portable_array_field(
self, field_name: str, class_def: ClassDefinition
) -> "ClassDefinitionBuilder":
"""Adds the field with the Portable array type to the
class definition.
Args:
field_name: Name of the field to add.
class_def: Class definition of the nested Portable.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
if class_def.class_id is None or class_def.class_id == 0:
raise ValueError("Portable class id cannot be zero!")
self._add_field_by_type(
field_name,
FieldType.PORTABLE_ARRAY,
class_def.version,
class_def.factory_id,
class_def.class_id,
)
return self
def add_byte_array_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the byte array type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.BYTE_ARRAY, self.version)
return self
def add_boolean_array_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the boolean array type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.BOOLEAN_ARRAY, self.version)
return self
def add_char_array_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the char array type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.CHAR_ARRAY, self.version)
return self
def add_short_array_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the short array type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.SHORT_ARRAY, self.version)
return self
def add_int_array_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the int array type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.INT_ARRAY, self.version)
return self
def add_long_array_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the long array type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.LONG_ARRAY, self.version)
return self
def add_float_array_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the float array type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.FLOAT_ARRAY, self.version)
return self
def add_double_array_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the double array type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.DOUBLE_ARRAY, self.version)
return self
def add_string_array_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the string array type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.STRING_ARRAY, self.version)
return self
def add_utf_array_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the string array type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
.. deprecated:: 4.1
This method is deprecated and will be removed in the
next major version. Use :func:`add_string_array_field` instead.
"""
return self.add_string_array_field(field_name)
def add_decimal_array_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the decimal array type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.DECIMAL_ARRAY, self.version)
return self
def add_time_array_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the time array type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.TIME_ARRAY, self.version)
return self
def add_date_array_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the date array type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.DATE_ARRAY, self.version)
return self
def add_timestamp_array_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the timestamp array type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.TIMESTAMP_ARRAY, self.version)
return self
def add_timestamp_with_timezone_array_field(self, field_name: str) -> "ClassDefinitionBuilder":
"""Adds the field with the timestamp with timezone array type to the
class definition.
Args:
field_name: Name of the field to add.
Returns:
Itself for chaining.
Raises:
HazelcastSerializationError: If this method is called
after :func:`build` or a field with the same
name is already registered.
"""
self._add_field_by_type(field_name, FieldType.TIMESTAMP_WITH_TIMEZONE_ARRAY, self.version)
return self
def add_field_def(self, field_def):
"""
Warnings:
This method is not intended for public usage.
It might be removed from the public API on the
next major version.
"""
if self._index != field_def.index:
raise ValueError("Invalid field index")
self._check(field_def.field_name)
self._index += 1
self._field_defs.append(field_def)
return self
def build(self) -> ClassDefinition:
"""Builds and returns the class definition."""
self._done = True
cd = ClassDefinition(self.factory_id, self.class_id, self.version)
for field_def in self._field_defs:
cd.add_field_def(field_def)
return cd
def _add_field_by_type(self, field_name, field_type, version, factory_id=0, class_id=0):
self._check(field_name)
fd = FieldDefinition(self._index, field_name, field_type, version, factory_id, class_id)
self._field_defs.append(fd)
self._index += 1
def _check(self, field_name):
if field_name in self._field_names:
raise HazelcastSerializationError("Field with the name %s already exists" % field_name)
self._field_names.add(field_name)
if self._done:
raise HazelcastSerializationError(
"ClassDefinition is already built for %s" % self.class_id
)
|
850b5939049b680c1a2e1ae4f365aa0e64a0b017
|
a173777f4ba02c1e683d75810fa6932487ba42cc
|
/2022/Hack-A-Sat-Quals-2022/power-point/control_antenna.py
|
581a080765da8a8d7452e0b9a3fa0e0eb9f74c8c
|
[] |
no_license
|
perfectblue/ctf-writeups
|
ba9454ef06e1004253f004154fba6ae00d88ca09
|
3f2a8a2c2598d700f33cb3f39ceb515e2ba46312
|
refs/heads/master
| 2023-06-25T19:28:05.222110
| 2022-12-11T04:55:13
| 2022-12-11T04:55:13
| 133,306,580
| 606
| 75
| null | 2023-01-20T22:38:17
| 2018-05-14T04:53:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,481
|
py
|
control_antenna.py
|
from braindead import *
from braindead.util import xor
import numpy as np
import random
import time
log.enable()
args = Args()
args.parse()
r = io.connect(('power_point.satellitesabove.me', 5100))
r.sla('Ticket please', 'ticket{foxtrot582956papa3:GDEMCSCtCnLdL8Q6itFKoSOVKQkepfca9xcpAK-m_wUb7Wx-CgVi216Bnr92UTfC8A}')
cmd_server = list(r.rla('Antenna pointing TCP server accepts commands at ').decode().strip().split(':'))
cmd_server[1] = int(cmd_server[1])
iq_server = list(r.rla('Sample TCP server will provide samples at ').decode().strip().split(':'))
iq_server[1] = int(iq_server[1])
time.sleep(1)
iq_r = io.connect(iq_server)
r.ru('Sample Server: Client connected')
time.sleep(1)
cmd_r = io.connect(cmd_server)
r.ru('Press Enter')
fout = open('sig.cf32', 'wb')
az, el = 10, 20
old_power = 500
old = az, el
d_az = 0
d_el = 0
for i in range(400):
#for az in np.linspace(8, 12, 5):
#for el in np.linspace(18, 22, 5):
az += random.uniform(-0.2, 0.2) + d_az*0.8
el += random.uniform(-0.2, 0.2) + d_el*0.8
cmd_r.sl(f'{az},{el}')
buf = iq_r.recvn(1024 *8)
fout.write(buf)
sig = np.frombuffer(buf, dtype=np.complex64)
power = np.mean(np.abs(sig))
if power < old_power:
az, el = old
else:
d_az = az - old[0]
d_el = el - old[1]
old = az, el
old_power = power
print(az, el, np.mean(np.abs(sig)))
r.ru('Sending 1024')
print(i*1024/10/8*2, 'bytes done', az, el)
fout.close()
io.interactive(r)
|
084b5f85b3b0cb3f66f0f44b794e46d9be9f81b7
|
c66c784c2f01dbf529513469dcf0b79851da0beb
|
/scripts/test_examples.py
|
70f322045b20605dc614afd45cbc0b613ab01e13
|
[
"BSD-3-Clause"
] |
permissive
|
pynvme/pynvme
|
346af8a4106b2081bd30cb5766355b7894879d7c
|
2ff6d77fa8a00bc2b04d35f60affe14412941e40
|
refs/heads/master
| 2022-09-21T16:28:56.882795
| 2022-09-12T02:37:43
| 2022-09-12T02:37:43
| 163,970,764
| 144
| 54
|
BSD-3-Clause
| 2021-01-29T03:35:08
| 2019-01-03T12:33:41
|
Python
|
UTF-8
|
Python
| false
| false
| 28,403
|
py
|
test_examples.py
|
#
# BSD LICENSE
#
# Copyright (c) Crane Chu <cranechu@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -*- coding: utf-8 -*-
import time
import pytest
import logging
import nvme as d
# intuitive, spec, qpair, vscode, debug, cmdlog, assert
def test_hello_world(nvme0, nvme0n1, qpair):
# prepare data buffer and IO queue
read_buf = d.Buffer()
write_buf = d.Buffer()
write_buf[10:21] = b'hello world'
# send write and read command
def write_cb(cdw0, status1): # command callback function
nvme0n1.read(qpair, read_buf, 0, 1)
nvme0n1.write(qpair, write_buf, 0, 1, cb=write_cb)
# wait commands complete and verify data
assert read_buf[10:21] != b'hello world'
qpair.waitdone(2)
assert read_buf[10:21] == b'hello world'
# access PCIe/NVMe registers, identify data, pythonic
def test_registers_and_identify_data(pcie, nvme0, nvme0n1):
logging.info("0x%x, 0x%x" % (pcie[0], pcie.register(0, 2)))
logging.info("0x%08x, 0x%08x" % (nvme0[0], nvme0[4]))
logging.info("model name: %s" % nvme0.id_data(63, 24, str))
logging.info("vid: 0x%x" % nvme0.id_data(1, 0))
logging.info("namespace size: %d" % nvme0n1.id_data(7, 0))
# Controller, sanitize, default parameters, getlogpage
def test_sanitize(nvme0: d.Controller, buf):
if nvme0.id_data(331, 328) == 0:
pytest.skip("sanitize operation is not supported")
#import PySimpleGUI as sg
logging.info("supported sanitize operation: %d" % nvme0.id_data(331, 328))
#sg.OneLineProgressMeter('sanitize progress', 0, 100, 'progress', orientation='h')
nvme0n1 = d.Namespace(nvme0, 1, 128*1000*1000//4)
nvme0.sanitize().waitdone() # sanitize clears namespace
# check sanitize status in log page
nvme0.getlogpage(0x81, buf, 20).waitdone()
while buf.data(3, 2) & 0x7 != 1: # sanitize operation is not completed
time.sleep(1)
nvme0.getlogpage(0x81, buf, 20).waitdone()
progress = buf.data(1, 0)*100//0xffff
#sg.OneLineProgressMeter('sanitize progress', progress, 100, 'progress', orientation='h')
logging.info("%d%%" % progress)
nvme0n1.close()
# simple ioworker, complicated io_size, python function call, CI
def test_ioworker_simplified(nvme0, nvme0n1: d.Namespace, qpair):
nvme0n1.ioworker(time=1).start().close()
nvme0n1.ioworker(io_size=[1, 2, 3, 7, 8, 16], time=1).start().close()
nvme0n1.ioworker(op_percentage={2:10, 1:20, 0:30, 9:40}, time=1).start().close()
test_hello_world(nvme0, nvme0n1, qpair)
# ioworker with admin commands, multiprocessing, log, cmdlog, pythonic
def subprocess_trim(pciaddr, seconds):
pcie = d.Pcie(pciaddr)
nvme0 = d.Controller(pcie, True)
nvme0n1 = d.Namespace(nvme0)
q = d.Qpair(nvme0, 8)
buf = d.Buffer(4096)
buf.set_dsm_range(0, 8, 8)
# send trim commands
start = time.time()
while time.time()-start < seconds:
nvme0n1.dsm(q, buf, 1).waitdone()
q.delete()
nvme0n1.close()
pcie.close()
def test_ioworker_with_temperature_and_trim(nvme0, nvme0n1):
test_seconds = 10
# start trim process
import multiprocessing
mp = multiprocessing.get_context("spawn")
p = mp.Process(target = subprocess_trim,
args = (nvme0.addr, test_seconds))
p.start()
# start read/write ioworker and admin commands
smart_log = d.Buffer(512, "smart log")
with nvme0n1.ioworker(io_size=256,
lba_random=False,
read_percentage=0,
time=test_seconds):
for i in range(15):
time.sleep(1)
nvme0.getlogpage(0x02, smart_log, 512).waitdone()
ktemp = smart_log.data(2, 1)
from pytemperature import k2c
logging.info("temperature: %0.2f degreeC" % k2c(ktemp))
# wait trim process complete
p.join()
# multiple ioworkers, PCIe, TCP, CPU, performance, ioworker return values
def test_multiple_controllers_and_namespaces(pciaddr):
# address list of the devices to test
addr_list = ['01:00.0', '02:00.0', '03:00.0', '04:00.0']
addr_list = [pciaddr, ]
test_seconds = 10
# create all controllers and namespace
pcie_list = [d.Pcie(a) for a in addr_list]
nvme_list = [d.Controller(p) for p in pcie_list]
ns_list = [d.Namespace(n) for n in nvme_list]
# create two ioworkers on each namespace
ioworkers = []
for ns in ns_list:
w = ns.ioworker(io_size=8, lba_align=8,
region_start=0, region_end=256*1024*8, # 1GB space
lba_random=False, qdepth=64,
read_percentage=100, time=test_seconds).start()
ioworkers.append(w)
w = ns.ioworker(io_size=8, lba_align=16,
region_start=256*1024*8, region_end=2*256*1024*8,
lba_random=True, qdepth=256,
read_percentage=0, time=test_seconds).start()
ioworkers.append(w)
# collect test results
io_total = 0
for w in ioworkers:
r = w.close()
io_total += (r.io_count_read+r.io_count_nonread)
logging.info("total throughput: %d IOPS" % (io_total/test_seconds))
for n in ns_list:
n.close()
for p in pcie_list:
p.close()
# PCIe, different of power states and resets
def test_power_and_reset(pcie, nvme0, subsystem):
pcie.aspm = 2 # ASPM L1
pcie.power_state = 3 # PCI PM D3hot
pcie.aspm = 0
pcie.power_state = 0
nvme0.reset() # controller reset: CC.EN
nvme0.getfeatures(7).waitdone()
pcie.reset() # PCIe reset: hot reset, TS1, TS2
nvme0.reset() # reset controller after pcie reset
nvme0.getfeatures(7).waitdone()
pcie.flr() # PCIe function level reset
nvme0.reset() # reset controller after pcie reset
nvme0.getfeatures(7).waitdone()
subsystem.reset() # NVMe subsystem reset: NSSR
nvme0.reset() # controller reset: CC.EN
nvme0.getfeatures(7).waitdone()
subsystem.power_cycle(10) # power cycle NVMe device: cold reset
nvme0.reset() # controller reset: CC.EN
nvme0.getfeatures(7).waitdone()
subsystem.poweroff()
subsystem.poweron()
nvme0.reset() # controller reset: CC.EN
nvme0.getfeatures(7).waitdone()
# test parameters, leverage innovations in python community
@pytest.mark.parametrize("io_count", [1, 9])
@pytest.mark.parametrize("lba_count", [1, 8])
@pytest.mark.parametrize("lba_offset", [0, 8])
def test_different_io_size_and_count(nvme0, nvme0n1, qpair,
lba_offset, lba_count, io_count):
# allcoate all DMA buffers for IO commands
bufs = []
for i in range(io_count):
bufs.append(d.Buffer(lba_count*512))
# send and reap all IO command dwords
for i in range(io_count):
nvme0n1.read(qpair, bufs[i], lba_offset, lba_count)
qpair.waitdone(io_count)
# IO commands, fused operations, generic commands
def test_fused_operations(nvme0, nvme0n1):
# create qpair and buffer for IO commands
q = d.Qpair(nvme0, 10)
b = d.Buffer()
# separate compare and write commands
nvme0n1.write(q, b, 8).waitdone()
nvme0n1.compare(q, b, 8).waitdone()
# implement fused compare and write operations with generic commands
# Controller.send_cmd() sends admin commands,
# and Namespace.send_cmd() here sends IO commands.
nvme0n1.send_cmd(5|(1<<8), q, b, 1, 8, 0, 0)
nvme0n1.send_cmd(1|(1<<9), q, b, 1, 8, 0, 0)
q.waitdone(2)
q.delete()
# protocol tests on queue, buffer, PRP, and doorbells
from psd import IOCQ, IOSQ, PRP, PRPList, SQE, CQE
def test_send_cmd_2sq_1cq(nvme0):
# 2 SQ share one CQ
cq = IOCQ(nvme0, 1, 10, PRP())
sq1 = IOSQ(nvme0, 1, 10, PRP(), cqid=1)
sq2 = IOSQ(nvme0, 2, 16, PRP(), cqid=1)
# write lba0, 16K data organized by PRPList
write_cmd = SQE(1, 1) # write to namespace 1
write_cmd.prp1 = PRP() # PRP1 is a 4K page
prp_list = PRPList() # PRPList contains 3 pages
prp_list[0] = PRP()
prp_list[1] = PRP()
prp_list[2] = PRP()
write_cmd.prp2 = prp_list # PRP2 points to the PRPList
write_cmd[10] = 0 # starting LBA
write_cmd[12] = 31 # LBA count: 32, 16K, 4 pages
write_cmd.cid = 123 # verify cid later
# send write commands in both SQ
sq1[0] = write_cmd # fill command dwords in SQ1
write_cmd.cid = 567 # verify cid later
sq2[0] = write_cmd # fill command dwords in SQ2
sq2.tail = 1 # ring doorbell of SQ2 first
time.sleep(0.1) # delay to ring SQ1,
sq1.tail = 1 # so command in SQ2 should complete first
# wait for 2 command completions
while CQE(cq[1]).p == 0: pass
# check first cpl
cqe = CQE(cq[0])
assert cqe.sqid == 2
assert cqe.sqhd == 1
assert cqe.cid == 567
# check second cpl
cqe = CQE(cq[1])
assert cqe.sqid == 1
assert cqe.sqhd == 1
assert cqe.cid == 123
# update cq head doorbell to device
cq.head = 2
# delete all queues
sq1.delete()
sq2.delete()
cq.delete()
def test_sanitize_operations_basic(nvme0, nvme0n1): #L8
if nvme0.id_data(331, 328) == 0: #L9
pytest.skip("sanitize operation is not supported") #L10
logging.info("supported sanitize operation: %d" % nvme0.id_data(331, 328))
nvme0.sanitize().waitdone() #L13
# check sanitize status in log page
buf = d.Buffer(4096) #L16
with pytest.warns(UserWarning, match="AER notification is triggered"):
nvme0.getlogpage(0x81, buf, 20).waitdone() #L17
while buf.data(3, 2) & 0x7 != 1: #L18
time.sleep(1)
nvme0.getlogpage(0x81, buf, 20).waitdone() #L20
progress = buf.data(1, 0)*100//0xffff
logging.info("%d%%" % progress)
def test_buffer_read_write(nvme0, nvme0n1):
buf = d.Buffer(512, 'ascii table') #L2
logging.info("physical address of buffer: 0x%lx" % buf.phys_addr) #L3
for i in range(512):
buf[i] = i%256 #L6
print(buf.dump(128)) #L7
buf = d.Buffer(512, 'random', pvalue=100, ptype=0xbeef) #L15
print(buf.dump())
buf = d.Buffer(512, 'random', pvalue=100, ptype=0xbeef) #L17
print(buf.dump())
qpair = d.Qpair(nvme0, 10)
nvme0n1.write(qpair, buf, 0).waitdone()
nvme0n1.read(qpair, buf, 0).waitdone()
print(buf.dump())
qpair.delete()
@pytest.fixture()
def ncqa(nvme0):
num_of_queue = 0
def test_greater_id(cdw0, status):
nonlocal num_of_queue
num_of_queue = 1+(cdw0&0xffff)
nvme0.getfeatures(7, cb=test_greater_id).waitdone()
logging.info("number of queue: %d" % num_of_queue)
return num_of_queue
def test_create_qpairs(nvme0, nvme0n1, buf, ncqa):
qpair = d.Qpair(nvme0, 1024)
nvme0n1.read(qpair, buf, 0)
qpair.waitdone()
nvme0n1.read(qpair, buf, 0, 8).waitdone()
ql = []
for i in range(ncqa-1):
ql.append(d.Qpair(nvme0, 8))
with pytest.raises(d.QpairCreationError):
ql.append(d.Qpair(nvme0, 8))
with pytest.warns(UserWarning, match="ioworker host ERROR -1: "):
nvme0n1.ioworker(io_size=8, time=1000).start().close()
qpair.delete()
nvme0n1.ioworker(io_size=8, time=1).start().close()
for q in ql:
q.delete()
def test_namespace_multiple(pciaddr, buf):
# create all controllers and namespace
addr_list = [pciaddr, ] # add more DUT BDF here
pcie_list = [d.Pcie(a) for a in addr_list]
for p in pcie_list:
nvmex = d.Controller(p)
qpair = d.Qpair(nvmex, 8)
nvmexn1 = d.Namespace(nvmex)
#Check if support write uncorrectable command
wuecc_support = nvmex.id_data(521, 520) & 0x2
if wuecc_support != 0:
nvmexn1.write_uncorrectable(qpair, 0, 8).waitdone()
with pytest.warns(UserWarning, match="ERROR status: 02/81"):
nvmexn1.read(qpair, buf, 0, 8).waitdone()
nvmexn1.write(qpair, buf, 0, 8).waitdone()
def this_read_cb(dword0, status1):
assert status1>>1 == 0
nvmexn1.write_uncorrectable(qpair, 0, 8)
nvmexn1.read(qpair, buf, 0, 8, cb=this_read_cb).waitdone(2)
def another_read_cb(dword0, status1):
logging.info("dword0: 0x%08x" % dword0)
logging.info("phase bit: %d" % (status1&1))
logging.info("dnr: %d" % ((status1>>15)&1))
logging.info("more: %d" % ((status1>>14)&1))
logging.info("sct: 0x%x" % ((status1>>9)&0x7))
logging.info("sc: 0x%x" % ((status1>>1)&0xff))
with pytest.warns(UserWarning, match="ERROR status: 02/81"):
nvmexn1.read(qpair, buf, 0, 8, cb=another_read_cb).waitdone()
qpair.delete()
nvmexn1.close()
p.close()
@pytest.mark.parametrize("qcount", [1, 1, 2, 4])
def test_ioworker_iops_multiple_queue(nvme0n1, qcount):
nvme0n1.format(512)
l = []
io_total = 0
for i in range(qcount):
a = nvme0n1.ioworker(io_size=8, lba_align=8,
region_start=0, region_end=256*1024*8, # 1GB space
lba_random=False, qdepth=16,
read_percentage=100, time=10).start()
l.append(a)
for a in l:
r = a.close()
io_total += r.io_count_read
logging.info("Q %d IOPS: %.3fK" % (qcount, io_total/10000))
@pytest.mark.parametrize("iops", [100, 10*1000, 1000*1000])
def test_ioworker_fixed_iops(nvme0n1, iops):
output_io_per_second = []
nvme0n1.ioworker(io_size=8,
lba_random=True,
read_percentage=100,
iops=iops,
output_io_per_second=output_io_per_second,
time=10).start().close()
logging.info(output_io_per_second)
def test_dsm_trim(nvme0: d.Controller, nvme0n1: d.Namespace, qpair: d.Qpair):
trimbuf = d.Buffer(4096)
# DUT info
logging.info("model number: %s" % nvme0.id_data(63, 24, str))
logging.info("firmware revision: %s" % nvme0.id_data(71, 64, str))
# single range
start_lba = 0
lba_count = 8*1024
trimbuf.set_dsm_range(0, start_lba, lba_count)
nvme0n1.dsm(qpair, trimbuf, 1, attribute=0x4).waitdone()
# multiple range
lba_count = lba_count//256
for i in range(256):
trimbuf.set_dsm_range(i, start_lba+i*lba_count, lba_count)
nvme0n1.dsm(qpair, trimbuf, 256).waitdone()
def test_ioworker_performance(nvme0n1):
import matplotlib.pyplot as plt
output_io_per_second = []
percentile_latency = dict.fromkeys([90, 99, 99.9, 99.99, 99.999])
nvme0n1.ioworker(io_size=8,
lba_random=True,
read_percentage=100,
output_io_per_second=output_io_per_second,
output_percentile_latency=percentile_latency,
time=10).start().close()
logging.info(output_io_per_second)
logging.info(percentile_latency)
X = []
Y = []
for _, k in enumerate(percentile_latency):
X.append(k)
Y.append(percentile_latency[k])
plt.plot(X, Y)
plt.xscale('log')
plt.yscale('log')
#plt.show()
def test_ioworker_jedec_enterprise_workload(nvme0n1):
distribution = [1000]*5 + [200]*15 + [25]*80
iosz_distribution = {1: 4,
2: 1,
3: 1,
4: 1,
5: 1,
6: 1,
7: 1,
8: 67,
16: 10,
32: 7,
64: 3,
128: 3}
nvme0n1.ioworker(io_size=iosz_distribution,
distribution = distribution,
lba_random=True,
read_percentage=0,
ptype=0xbeef, pvalue=100,
time=10).start().close()
def test_power_on_off(nvme0):
def poweron():
logging.info("poweron")
pass
def poweroff():
logging.info("poweroff")
pass
subsystem = d.Subsystem(nvme0, poweron, poweroff)
subsystem = d.Subsystem(nvme0)
subsystem.poweroff()
subsystem.poweron()
nvme0.reset()
def test_init_nvme_customerized(pcie):
def nvme_init(nvme0):
logging.info("user defined nvme init")
nvme0[0x14] = 0
while not (nvme0[0x1c]&0x1) == 0: pass
# 3. set admin queue registers
nvme0.init_adminq()
# 4. set register cc
nvme0[0x14] = 0x00460000
# 5. enable cc.en
nvme0[0x14] = 0x00460001
# 6. wait csts.rdy to 1
while not (nvme0[0x1c]&0x1) == 1: pass
# 7. identify controller
nvme0.identify(d.Buffer(4096)).waitdone()
# 8. create and identify all namespace
nvme0.init_ns()
# 9. set/get num of queues, 2 IO queues
nvme0.setfeatures(0x7, cdw11=0x00010001).waitdone()
nvme0.init_queues(nvme0.getfeatures(0x7).waitdone())
# 10. send out all aer
aerl = nvme0.id_data(259)+1
for i in range(aerl):
nvme0.aer()
# 1. set pcie registers
pcie.aspm = 0
# 2. disable cc.en and wait csts.rdy to 0
nvme0 = d.Controller(pcie, nvme_init_func=nvme_init)
# test with ioworker
nvme0n1 = d.Namespace(nvme0)
qpair = d.Qpair(nvme0, 10)
nvme0n1.ioworker(time=1).start().close()
qpair2 = d.Qpair(nvme0, 10)
with pytest.raises(d.QpairCreationError):
qpair3 = d.Qpair(nvme0, 10)
qpair.delete()
qpair2.delete()
nvme0n1.close()
def test_ioworker_op_dict_trim(nvme0n1):
cmdlog_list = [None]*10000
op_percentage = {2: 40, 9: 30, 1: 30}
nvme0n1.ioworker(io_size=8,
io_count=10000,
op_percentage=op_percentage,
output_cmdlog_list=cmdlog_list).start().close()
op_log = [c[2] for c in cmdlog_list]
for op in (2, 9, 1):
logging.info("occurance of %d: %d" % (op, op_log.count(op)))
def test_ioworker_io_sequence_read_write_trim_flush_uncorr(nvme0n1):
cmd_seq = [(000000, 1, 0, 8), #L2
(200000, 2, 3, 1),
(400000, 1, 2, 1),
(600000, 9, 1, 1),
(800000, 4, 0, 8),
(1000000, 0, 0, 0)]
cmdlog_list = [None]*len(cmd_seq) #L8
r = nvme0n1.ioworker(io_sequence=cmd_seq, #L10
output_cmdlog_list=cmdlog_list).start().close()
assert r.mseconds > 1000 #L13
assert cmdlog_list[-1][2] == 0 #L14
assert cmdlog_list[-2][2] == 4
assert cmdlog_list[-3][2] == 9
assert cmdlog_list[-4][2] == 1
assert cmdlog_list[-5][2] == 2
assert cmdlog_list[-6][2] == 1
def test_aer_with_multiple_sanitize(nvme0, nvme0n1, buf): #L8
if nvme0.id_data(331, 328) == 0: #L9
pytest.skip("sanitize operation is not supported") #L10
logging.info("supported sanitize operation: %d" % nvme0.id_data(331, 328))
for i in range(3):
nvme0.sanitize().waitdone() #L13
# check sanitize status in log page
with pytest.warns(UserWarning, match="AER notification is triggered"):
nvme0.getlogpage(0x81, buf, 20).waitdone() #L17
while buf.data(3, 2) & 0x7 != 1: #L18
time.sleep(1)
nvme0.getlogpage(0x81, buf, 20).waitdone() #L20
progress = buf.data(1, 0)*100//0xffff
logging.info("%d%%" % progress)
def test_verify_partial_namespace(nvme0):
region_end=1024*1024*1024//512 # 1GB space
nvme0n1 = d.Namespace(nvme0, 1, region_end)
assert True == nvme0n1.verify_enable(True)
nvme0n1.format()
nvme0n1.ioworker(io_size=8,
lba_random=True,
region_end=region_end,
read_percentage=50,
time=30).start().close()
nvme0n1.close()
def test_jsonrpc_list_qpairs(pciaddr):
import json
import socket
# create the jsonrpc client
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect('/var/tmp/pynvme.sock')
def jsonrpc_call(sock, method, params=[]):
# create and send the command
req = {}
req['id'] = 1234567890
req['jsonrpc'] = '2.0'
req['method'] = method
req['params'] = params
sock.sendall(json.dumps(req).encode('ascii'))
# receive the result
resp = json.loads(sock.recv(4096).decode('ascii'))
assert resp['id'] == 1234567890
assert resp['jsonrpc'] == '2.0'
return resp['result']
result = jsonrpc_call(sock, 'list_all_qpair')
assert len(result) == 0
# create controller and admin queue
pcie = d.Pcie(pciaddr)
nvme0 = d.Controller(pcie)
result = jsonrpc_call(sock, 'list_all_qpair')
assert len(result) == 1
assert result[0]['qid']-1 == 0
result = jsonrpc_call(sock, 'list_all_qpair')
assert len(result) == 1
assert result[0]['qid']-1 == 0
q1 = d.Qpair(nvme0, 8)
result = jsonrpc_call(sock, 'list_all_qpair')
assert len(result) == 2
assert result[0]['qid']-1 == 0
assert result[1]['qid']-1 == 1
q2 = d.Qpair(nvme0, 8)
result = jsonrpc_call(sock, 'list_all_qpair')
assert len(result) == 3
assert result[0]['qid']-1 == 0
assert result[1]['qid']-1 == 1
assert result[2]['qid']-1 == 2
q1.delete()
result = jsonrpc_call(sock, 'list_all_qpair')
assert len(result) == 2
assert result[0]['qid']-1 == 0
assert result[1]['qid']-1 == 2
q2.delete()
result = jsonrpc_call(sock, 'list_all_qpair')
assert len(result) == 1
assert result[0]['qid']-1 == 0
pcie.close()
result = jsonrpc_call(sock, 'list_all_qpair')
assert len(result) == 0
def test_powercycle_with_qpair(nvme0, nvme0n1, buf, subsystem):
qpair = d.Qpair(nvme0, 16)
nvme0n1.write(qpair, buf, 0).waitdone()
nvme0n1.read(qpair, buf, 0).waitdone()
# delete qpair before power cycle, and then reset controller, recreate qpair
qpair.delete()
subsystem.power_cycle(10)
nvme0.reset()
qpair = d.Qpair(nvme0, 16)
nvme0n1.read(qpair, buf, 0).waitdone()
qpair.delete()
def test_reset_time(pcie):
def nvme_init(nvme0):
logging.info("user defined nvme init")
nvme0[0x14] = 0
while not (nvme0[0x1c]&0x1) == 0: pass
logging.info(time.time())
# 3. set admin queue registers
nvme0.init_adminq()
logging.info(time.time())
# 5. enable cc.en
nvme0[0x14] = 0x00460001
# 6. wait csts.rdy to 1
while not (nvme0[0x1c]&0x1) == 1: pass
logging.info(time.time())
# 7. identify controller
nvme0.identify(d.Buffer(4096)).waitdone()
logging.info(time.time())
nvme0.setfeatures(0x7, cdw11=0x00ff00ff).waitdone()
nvme0.init_queues(nvme0.getfeatures(0x7).waitdone())
logging.info("1: nvme init")
logging.info(time.time())
nvme0 = d.Controller(pcie, nvme_init_func=nvme_init)
subsystem = d.Subsystem(nvme0)
qpair = d.Qpair(nvme0, 10)
qpair2 = d.Qpair(nvme0, 10)
qpair3 = d.Qpair(nvme0, 10)
qpair.delete()
qpair2.delete()
qpair3.delete()
logging.info("2: nvme reset")
logging.info(time.time())
nvme0.reset()
logging.info("3: power cycle")
subsystem.poweroff()
logging.info(time.time())
subsystem.poweron()
nvme0.reset()
@pytest.mark.parametrize("ps", range(1, 5, 1))
def test_power_state_transition(pcie, nvme0, nvme0n1, qpair, buf, ps):
# for accurate sleep delay
import ctypes
libc = ctypes.CDLL('libc.so.6')
# write data to LBA 0x5a
nvme0n1.write(qpair, buf, 0x5a).waitdone()
# enable ASPM and get original power state
pcie.aspm = 2
orig_ps = nvme0.getfeatures(0x2).waitdone()
# test with delay 1us-1ms
for i in range(1000):
# fix on power state 0 before test
nvme0.setfeatures(0x2, cdw11=0).waitdone()
libc.usleep(1000)
# change power status
nvme0.setfeatures(0x2, cdw11=ps)
libc.usleep(i)
# read lba 0x5a and verify data
nvme0n1.read(qpair, buf, 0x5a).waitdone()
assert buf[0] == 0x5a
# consume the cpl of setfeatures above
nvme0.waitdone() # for setfeautres above
# recover to original power state
pcie.aspm = 0
nvme0.setfeatures(0x2, cdw11=orig_ps).waitdone()
@pytest.mark.parametrize("nsid", [0, 1, 0xffffffff])
def test_getlogpage_nsid(nvme0, buf, nsid):
logging.info("model name: %s, nsid %d" % (nvme0.id_data(63, 24, str), nsid))
nvme0.getlogpage(0xCA, buf, 512, nsid=nsid).waitdone()
nvme0.getlogpage(0x02, buf, 512, nsid=nsid).waitdone()
def test_ioworker_with_temperature(nvme0, nvme0n1, buf):
with nvme0n1.ioworker(io_size=256,
time=30,
op_percentage={0:10, # flush
2:60, # read
9:30}), \
nvme0n1.ioworker(io_size=8,
time=30,
op_percentage={0:10, # flush
9:10, # trim
1:80}):# write
for i in range(40):
time.sleep(1)
nvme0.getlogpage(0x02, buf, 512).waitdone()
ktemp = buf.data(2, 1)
from pytemperature import k2c
logging.info("temperature: %0.2f degreeC" %
k2c(ktemp))
def test_ioworker_jedec_enterprise_workload_512(nvme0n1):
distribution = [1000]*5 + [200]*15 + [25]*80
iosz_distribution = {1: 4,
2: 1,
3: 1,
4: 1,
5: 1,
6: 1,
7: 1,
8: 67,
16: 10,
32: 7,
64: 3,
128: 3}
output_percentile_latency = dict.fromkeys([99, 99.99, 99.9999])
nvme0n1.ioworker(io_size=iosz_distribution,
lba_random=True,
qdepth=128,
distribution = distribution,
read_percentage=0,
ptype=0xbeef, pvalue=100,
time=30,
output_percentile_latency=\
output_percentile_latency).start().close()
logging.info(output_percentile_latency)
|
9583839613a6bde70b109d90e3de837195adbb58
|
7ed2ef754060465709897be60ff14a0f4e2c9578
|
/delfin/tests/unit/drivers/hpe/hpe_msa/test_constans.py
|
c31319b3e38b15a80795d8077e3a77e3d19af66e
|
[
"Apache-2.0"
] |
permissive
|
sodafoundation/delfin
|
967b7ff276c20ea546e07538c2b02a7920aaddf4
|
978eff481945203bfbc3d84123e151f836748428
|
refs/heads/master
| 2023-09-04T11:27:21.103714
| 2023-07-13T09:02:14
| 2023-07-13T09:02:14
| 254,367,182
| 220
| 336
|
Apache-2.0
| 2023-09-13T07:04:15
| 2020-04-09T12:37:29
|
Python
|
UTF-8
|
Python
| false
| false
| 52,986
|
py
|
test_constans.py
|
LIST_CONTROLLERS = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<RESPONSE VERSION="L100" REQUEST="show controllers">
<COMP G="0" P="1"/>
<OBJECT basetype="controllers" name="controllers">
<PROPERTY name="durable-id" >controller_a</PROPERTY>
<PROPERTY name="controller-id">A</PROPERTY>
<PROPERTY name="serial-number">7CE539M591</PROPERTY>
<PROPERTY name="cache-memory-size">4096</PROPERTY>
<PROPERTY name="system-memory-size">6144</PROPERTY>
<PROPERTY name="sc-fw">GLS210R04-01</PROPERTY>
<PROPERTY name="sc-cpu-type">Gladden</PROPERTY>
<PROPERTY name="cpu_count">1</PROPERTY>
<PROPERTY name="health">OK</PROPERTY>
<PROPERTY name="position">Top</PROPERTY>
</OBJECT>
<COMP G="0" P="13"/>
<OBJECT basetype="controllers" name="controllers">
<PROPERTY name="durable-id">controller_b</PROPERTY>
<PROPERTY name="controller-id">B</PROPERTY>
<PROPERTY name="serial-number">7CE539M591</PROPERTY>
<PROPERTY name="cache-memory-size">4096</PROPERTY>
<PROPERTY name="system-memory-size">6144</PROPERTY>
<PROPERTY name="sc-fw">GLS210R04-01</PROPERTY>
<PROPERTY name="sc-cpu-type">Gladden</PROPERTY>
<PROPERTY name="cpu_count">1</PROPERTY>
<PROPERTY name="health">OK</PROPERTY>
<PROPERTY name="position">Bottom</PROPERTY>
</OBJECT>
</RESPONSE>
"""
LIST_SYSTEM = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<RESPONSE VERSION="L100" REQUEST="show system">
<COMP G="0" P="1"/>
<OBJECT basetype="system" >
<PROPERTY name="system-name">msa2040</PROPERTY>
<PROPERTY name="midplane-serial-number">00C0FF26DCB0</PROPERTY>
<PROPERTY name="system-location">Uninitialized Location</PROPERTY>
<PROPERTY name="vendor-name">HP</PROPERTY>
<PROPERTY name="product-id">MSA 2040 SAN</PROPERTY>
<PROPERTY name="product-brand" >MSA Storage</PROPERTY>
<PROPERTY name="health">OK</PROPERTY>
</OBJECT>
</RESPONSE>
"""
LIST_VISION = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<RESPONSE VERSION="L100" REQUEST="show version">
<COMP G="0" P="1"/>
<OBJECT basetype="versions">
<PROPERTY name="bundle-version">GL210R004</PROPERTY>
</OBJECT>
</RESPONSE>
"""
LIST_PORTS = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<RESPONSE VERSION="L100" REQUEST="show ports">
<COMP G="0" P="1"/>
<OBJECT basetype="port" name="ports" oid="1" format="rows">
<PROPERTY name="durable-id">hostport_A1</PROPERTY>
<PROPERTY name="port">A1</PROPERTY>
<PROPERTY name="configured-speed">8Gb</PROPERTY>
<PROPERTY name="port-type">FC</PROPERTY>
<PROPERTY name="target-id">207000c0ff26dcb0</PROPERTY>
<PROPERTY name="health">N/A</PROPERTY>
</OBJECT>
<COMP G="1" P="2"/>
<OBJECT basetype="fc-port" name="port-details" oid="2" format="rows">
<PROPERTY name="sfp-supported-speeds">4G,8G</PROPERTY>
</OBJECT>
<COMP G="0" P="3"/>
<OBJECT basetype="port" name="ports" oid="3" format="rows">
<PROPERTY name="durable-id">hostport_A2</PROPERTY>
<PROPERTY name="port">A2</PROPERTY>
<PROPERTY name="target-id">217000c0ff26dcb0</PROPERTY>
<PROPERTY name="port-type">FC</PROPERTY>
<PROPERTY name="configured-speed">8Gb</PROPERTY>
<PROPERTY name="health">N/A</PROPERTY>
</OBJECT>
<COMP G="3" P="4"/>
<OBJECT basetype="fc-port" name="port-details" oid="4" format="rows">
<PROPERTY name="sfp-supported-speeds">4G,8G</PROPERTY>
</OBJECT>
<COMP G="0" P="5"/>
<OBJECT basetype="port" name="ports" oid="5" format="rows">
<PROPERTY name="durable-id">hostport_A3</PROPERTY>
<PROPERTY name="port">A3</PROPERTY>
<PROPERTY name="port-type">iSCSI</PROPERTY>
<PROPERTY name="health">N/A</PROPERTY>
</OBJECT>
<COMP G="5" P="6"/>
<OBJECT basetype="iscsi-port" name="port-details" oid="6" format="pairs">
<PROPERTY name="ip-address">0.0.0.0</PROPERTY>
<PROPERTY name="mac-address">00:C0:FF:35:BD:64</PROPERTY>
</OBJECT>
<COMP G="0" P="7"/>
<OBJECT basetype="port" name="ports" oid="7" format="rows">
<PROPERTY name="durable-id" >hostport_A4</PROPERTY>
<PROPERTY name="port">A4</PROPERTY>
<PROPERTY name="port-type">iSCSI</PROPERTY>
<PROPERTY name="configured-speed">Auto</PROPERTY>
<PROPERTY name="health">N/A</PROPERTY>
</OBJECT>
<COMP G="7" P="8"/>
<OBJECT basetype="iscsi-port" name="port-details" oid="8" format="pairs">
<PROPERTY name="ip-address">0.0.0.0</PROPERTY>
<PROPERTY name="mac-address">00:C0:FF:35:BD:65</PROPERTY>
</OBJECT>
<COMP G="0" P="9"/>
<OBJECT basetype="port" name="ports" oid="9" format="rows">
<PROPERTY name="durable-id">hostport_B1</PROPERTY>
<PROPERTY name="port">B1</PROPERTY>
<PROPERTY name="target-id">247000c0ff26dcb0</PROPERTY>
<PROPERTY name="port-type">FC</PROPERTY>
<PROPERTY name="configured-speed">8Gb</PROPERTY>
<PROPERTY name="health">N/A</PROPERTY>
</OBJECT>
<COMP G="9" P="10"/>
<OBJECT basetype="fc-port" name="port-details" oid="10" format="rows">
<PROPERTY name="sfp-supported-speeds">4G,8G</PROPERTY>
</OBJECT>
<COMP G="0" P="11"/>
<OBJECT basetype="port" name="ports" oid="11" format="rows">
<PROPERTY name="durable-id">hostport_B2</PROPERTY>
<PROPERTY name="port">B2</PROPERTY>
<PROPERTY name="port-type">FC</PROPERTY>
<PROPERTY name="target-id">257000c0ff26dcb0</PROPERTY>
<PROPERTY name="configured-speed">8Gb</PROPERTY>
<PROPERTY name="health">N/A</PROPERTY>
</OBJECT>
<COMP G="11" P="12"/>
<OBJECT basetype="fc-port" name="port-details" oid="12" format="rows">
<PROPERTY name="sfp-supported-speeds">4G,8G</PROPERTY>
</OBJECT>
<COMP G="0" P="13"/>
<OBJECT basetype="port" name="ports" oid="13" format="rows">
<PROPERTY name="durable-id">hostport_B3</PROPERTY>
<PROPERTY name="port">B3</PROPERTY>
<PROPERTY name="port-type">iSCSI</PROPERTY>
<PROPERTY name="configured-speed">Auto</PROPERTY>
<PROPERTY name="health">N/A</PROPERTY>
</OBJECT>
<COMP G="13" P="14"/>
<OBJECT basetype="iscsi-port" name="port-details" oid="14" format="pairs">
<PROPERTY name="ip-address">0.0.0.0</PROPERTY>
<PROPERTY name="mac-address">00:C0:FF:35:BA:BC</PROPERTY>
</OBJECT>
<COMP G="0" P="15"/>
<OBJECT basetype="port" name="ports" oid="15" format="rows">
<PROPERTY name="durable-id">hostport_B4</PROPERTY>
<PROPERTY name="port">B4</PROPERTY>
<PROPERTY name="port-type">iSCSI</PROPERTY>
<PROPERTY name="configured-speed">Auto</PROPERTY>
<PROPERTY name="health">N/A</PROPERTY>
</OBJECT>
<COMP G="15" P="16"/>
<OBJECT basetype="iscsi-port" name="port-details" oid="16" format="pairs">
<PROPERTY name="ip-address">0.0.0.0</PROPERTY>
<PROPERTY name="mac-address">00:C0:FF:35:BA:BD</PROPERTY>
</OBJECT>
</RESPONSE>
"""
LIST_POOLS = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<RESPONSE VERSION="L100" REQUEST="show pools">
<COMP G="0" P="1"/>
<OBJECT basetype="pools" name="pools" oid="1" format="rows">
<PROPERTY name="name">A</PROPERTY>
<PROPERTY name="serial-number">00c0ff26c4ea0000d980546101000000</PROPERTY>
<PROPERTY name="total-size">1196.8GB</PROPERTY>
<PROPERTY name="total-avail">1196.8GB</PROPERTY>
<PROPERTY name="health">OK</PROPERTY>
</OBJECT>
</RESPONSE>
"""
LIST_VOLUMES = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<RESPONSE VERSION="L100" REQUEST="show volumes">
<COMP G="0" P="1"/>
<OBJECT basetype="volumes" name="volume" format="rows">
<PROPERTY name="durable-id">V1</PROPERTY>
<PROPERTY name="volume-name">Vol0001</PROPERTY>
<PROPERTY name="size">99.9GB</PROPERTY>
<PROPERTY name="allocated-size">0B</PROPERTY>
<PROPERTY name="total-size">99.9GB</PROPERTY>
<PROPERTY name="blocks">195305472</PROPERTY>
<PROPERTY name="health">OK</PROPERTY>
<PROPERTY name="wwn">600C0FF00026C4EAFA80546101000000</PROPERTY>
<PROPERTY name="volume-type">base</PROPERTY>
</OBJECT>
<COMP G="0" P="2"/>
<OBJECT basetype="volumes" name="volume" format="rows">
<PROPERTY name="durable-id">V2</PROPERTY>
<PROPERTY name="volume-name">Vol0002</PROPERTY>
<PROPERTY name="allocated-size">0B</PROPERTY>
<PROPERTY name="total-size">99.9GB</PROPERTY>
<PROPERTY name="blocks">195305472</PROPERTY>
<PROPERTY name="health">OK</PROPERTY>
<PROPERTY name="wwn">600C0FF00026C4EA0A81546101000000</PROPERTY>
<PROPERTY name="volume-type">base</PROPERTY>
</OBJECT>
</RESPONSE>
"""
LIST_DISKS = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<RESPONSE VERSION="L100" REQUEST="show disks">
<COMP G="0" P="1"/>
<OBJECT basetype="drives" name="drive" oid="1" format="rows">
<PROPERTY name="durable-id">disk_01.01</PROPERTY>
<PROPERTY name="location" >1.1</PROPERTY>
<PROPERTY name="port">0</PROPERTY>
<PROPERTY name="serial-number">6SL9CD560000N51404EF</PROPERTY>
<PROPERTY name="vendor">SEAGATE</PROPERTY>
<PROPERTY name="model">ST3600057SS</PROPERTY>
<PROPERTY name="description">SAS</PROPERTY>
<PROPERTY name="type">SAS</PROPERTY>
<PROPERTY name="rpm" >15</PROPERTY>
<PROPERTY name="size">600.1GB</PROPERTY>
<PROPERTY name="health">OK</PROPERTY>
<PROPERTY name="disk-group">dgA01</PROPERTY>
</OBJECT>
<COMP G="0" P="2"/>
<OBJECT basetype="drives" name="drive" oid="2" format="rows">
<PROPERTY name="durable-id">disk_01.02</PROPERTY>
<PROPERTY name="location">1.2</PROPERTY>
<PROPERTY name="serial-number">6SL7X4RE0000B42601SF</PROPERTY>
<PROPERTY name="vendor">SEAGATE</PROPERTY>
<PROPERTY name="model">ST3600057SS</PROPERTY>
<PROPERTY name="description">SAS</PROPERTY>
<PROPERTY name="type">SAS</PROPERTY>
<PROPERTY name="rpm">15</PROPERTY>
<PROPERTY name="size">600.1GB</PROPERTY>
<PROPERTY name="health">OK</PROPERTY>
<PROPERTY name="disk-group">dgA01</PROPERTY>
</OBJECT>
<COMP G="0" P="3"/>
<OBJECT basetype="drives" name="drive" oid="3" format="rows">
<PROPERTY name="durable-id">disk_01.03</PROPERTY>
<PROPERTY name="location">1.3</PROPERTY>
<PROPERTY name="serial-number">6SL9QR5T0000N52120SK</PROPERTY>
<PROPERTY name="vendor">SEAGATE</PROPERTY>
<PROPERTY name="description">SAS</PROPERTY>
<PROPERTY name="model">ST3600057SS</PROPERTY>
<PROPERTY name="rpm">15</PROPERTY>
<PROPERTY name="size">600.1GB</PROPERTY>
<PROPERTY name="health">OK</PROPERTY>
<PROPERTY name="disk-group">dgA01</PROPERTY>
</OBJECT>
<COMP G="0" P="4"/>
<OBJECT basetype="drives" name="drive" oid="4" format="rows">
<PROPERTY name="durable-id">disk_01.04</PROPERTY>
<PROPERTY name="port">0</PROPERTY>
<PROPERTY name="location">1.4</PROPERTY>
<PROPERTY name="description">SAS</PROPERTY>
<PROPERTY name="serial-number">3SL0WT7G00009051YBTF</PROPERTY>
<PROPERTY name="vendor">SEAGATE</PROPERTY>
<PROPERTY name="model">ST3600057SS</PROPERTY>
<PROPERTY name="rpm" >15</PROPERTY>
<PROPERTY name="size">600.1GB</PROPERTY>
<PROPERTY name="health">OK</PROPERTY>
<PROPERTY name="disk-group">dgA01</PROPERTY>
</OBJECT>
</RESPONSE>
"""
LIST_ERROR = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<RESPONSE VERSION="L100" REQUEST="show events error">
<COMP G="0" P="13"/>
<OBJECT basetype="events" name="event" oid="1" format="packed">
<PROPERTY name="time-stamp">2021-11-12 08:16:20</PROPERTY>
<PROPERTY name="time-stamp-numeric" >1636704980</PROPERTY>
<PROPERTY name="event-code">557</PROPERTY>
<PROPERTY name="event-id" >A891</PROPERTY>
<PROPERTY name="model">MSA 2040 SAN</PROPERTY>
<PROPERTY name="serial-number">00C0FF26C236</PROPERTY>
<PROPERTY name="controller" >A</PROPERTY>
<PROPERTY name="controller-numeric">1</PROPERTY>
<PROPERTY name="severity">ERROR</PROPERTY>
<PROPERTY name="severity-numeric">2</PROPERTY>
<PROPERTY name="message" >An Enclosure Management Processor(EMP)</PROPERTY>
<PROPERTY name="additional-information">Management</PROPERTY>
<PROPERTY name="recommended-action">Management</PROPERTY>
</OBJECT>
</RESPONSE>
"""
LIST_HOST_INITIATORS = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<RESPONSE VERSION="L100" REQUEST="show host-groups">
<COMP G="2" P="3"/>
<OBJECT basetype="initiator" name="initiator" oid="3" format="rows">
<PROPERTY name="durable-id">I2</PROPERTY>
<PROPERTY name="nickname">FC-port1</PROPERTY>
<PROPERTY name="discovered">No</PROPERTY>
<PROPERTY name="mapped">No</PROPERTY>
<PROPERTY name="profile">HP-UX</PROPERTY>
<PROPERTY name="host-bus-type">FC</PROPERTY>
<PROPERTY name="host-bus-type-numeric">6</PROPERTY>
<PROPERTY name="id" key="true">21000024ff3dfed1</PROPERTY>
<PROPERTY name="host-id">NOHOST</PROPERTY>
<PROPERTY name="host-key" >HU</PROPERTY>
<PROPERTY name="host-port-bits-a">0</PROPERTY>
<PROPERTY name="host-port-bits-b">0</PROPERTY>
</OBJECT>
<COMP G="5" P="6"/>
<OBJECT basetype="initiator" name="initiator" oid="6" format="rows">
<PROPERTY name="durable-id">I1</PROPERTY>
<PROPERTY name="nickname">FC-port2</PROPERTY>
<PROPERTY name="discovered">No</PROPERTY>
<PROPERTY name="mapped">Yes</PROPERTY>
<PROPERTY name="profile">HP-UX</PROPERTY>
<PROPERTY name="host-bus-type">FC</PROPERTY>
<PROPERTY name="host-bus-type-numeric">6</PROPERTY>
<PROPERTY name="id" key="true">10000090fa13870e</PROPERTY>
<PROPERTY name="host-id">00c0ff26c2360000e2399f6101010000</PROPERTY>
<PROPERTY name="host-key" >H1</PROPERTY>
<PROPERTY name="host-port-bits-a">0</PROPERTY>
<PROPERTY name="host-port-bits-b">0</PROPERTY>
</OBJECT>
<COMP G="5" P="7"/>
<OBJECT basetype="initiator" name="initiator" oid="7" format="rows">
<PROPERTY name="durable-id">I0</PROPERTY>
<PROPERTY name="nickname">FC-port3</PROPERTY>
<PROPERTY name="discovered">No</PROPERTY>
<PROPERTY name="mapped">Yes</PROPERTY>
<PROPERTY name="profile">HP-UX</PROPERTY>
<PROPERTY name="host-bus-type">FC</PROPERTY>
<PROPERTY name="host-bus-type-numeric">6</PROPERTY>
<PROPERTY name="id" key="true">10000090fa13870f</PROPERTY>
<PROPERTY name="host-id">00c0ff26c2360000e2399f6101010000</PROPERTY>
<PROPERTY name="host-key" >H1</PROPERTY>
<PROPERTY name="host-port-bits-a">0</PROPERTY>
<PROPERTY name="host-port-bits-b">0</PROPERTY>
</OBJECT>
<COMP G="9" P="10"/>
<OBJECT basetype="initiator" name="initiator" oid="10" format="rows">
<PROPERTY name="durable-id">I6</PROPERTY>
<PROPERTY name="nickname">rac01_01</PROPERTY>
<PROPERTY name="discovered">No</PROPERTY>
<PROPERTY name="mapped">Yes</PROPERTY>
<PROPERTY name="profile">Standard</PROPERTY>
<PROPERTY name="host-bus-type">FC</PROPERTY>
<PROPERTY name="host-bus-type-numeric">6</PROPERTY>
<PROPERTY name="id" key="true">500143801875548e</PROPERTY>
<PROPERTY name="host-id">00c0ff26c4ea0000057f245b01010000</PROPERTY>
<PROPERTY name="host-key" >H4</PROPERTY>
<PROPERTY name="host-port-bits-a">0</PROPERTY>
<PROPERTY name="host-port-bits-b">0</PROPERTY>
</OBJECT>
<COMP G="9" P="11"/>
<OBJECT basetype="initiator" name="initiator" oid="11" format="rows">
<PROPERTY name="durable-id">I5</PROPERTY>
<PROPERTY name="nickname">rac01_02</PROPERTY>
<PROPERTY name="discovered">No</PROPERTY>
<PROPERTY name="mapped">Yes</PROPERTY>
<PROPERTY name="profile">Standard</PROPERTY>
<PROPERTY name="host-bus-type">FC</PROPERTY>
<PROPERTY name="host-bus-type-numeric">6</PROPERTY>
<PROPERTY name="id" key="true">5001438012097ed6</PROPERTY>
<PROPERTY name="host-id">00c0ff26c4ea0000057f245b01010000</PROPERTY>
<PROPERTY name="host-key" >H4</PROPERTY>
<PROPERTY name="host-port-bits-a">0</PROPERTY>
<PROPERTY name="host-port-bits-b">0</PROPERTY>
</OBJECT>
<COMP G="12" P="13"/>
<OBJECT basetype="initiator" name="initiator" oid="13" format="rows">
<PROPERTY name="durable-id">I3</PROPERTY>
<PROPERTY name="nickname">rac02_01</PROPERTY>
<PROPERTY name="discovered">No</PROPERTY>
<PROPERTY name="mapped">Yes</PROPERTY>
<PROPERTY name="profile">Standard</PROPERTY>
<PROPERTY name="host-bus-type">FC</PROPERTY>
<PROPERTY name="host-bus-type-numeric">6</PROPERTY>
<PROPERTY name="id" key="true">50014380029ceb58</PROPERTY>
<PROPERTY name="host-id">00c0ff26c4ea0000f77f245b01010000</PROPERTY>
<PROPERTY name="host-key" >H3</PROPERTY>
<PROPERTY name="host-port-bits-a">0</PROPERTY>
<PROPERTY name="host-port-bits-b">0</PROPERTY>
</OBJECT>
<COMP G="12" P="14"/>
<OBJECT basetype="initiator" name="initiator" oid="14" format="rows">
<PROPERTY name="durable-id">I4</PROPERTY>
<PROPERTY name="nickname">rac02_02</PROPERTY>
<PROPERTY name="discovered">No</PROPERTY>
<PROPERTY name="mapped">No</PROPERTY>
<PROPERTY name="profile">Standard</PROPERTY>
<PROPERTY name="host-bus-type">FC</PROPERTY>
<PROPERTY name="host-bus-type-numeric">6</PROPERTY>
<PROPERTY name="id" key="true">500143801209031c</PROPERTY>
<PROPERTY name="host-id">00c0ff26c4ea0000f77f245b01010000</PROPERTY>
<PROPERTY name="host-key" >H3</PROPERTY>
<PROPERTY name="host-port-bits-a">0</PROPERTY>
<PROPERTY name="host-port-bits-b">0</PROPERTY>
</OBJECT>
<COMP G="15" P="16"/>
<OBJECT basetype="initiator" name="initiator" oid="16" format="rows">
<PROPERTY name="durable-id">I2</PROPERTY>
<PROPERTY name="nickname">FC-port1</PROPERTY>
<PROPERTY name="discovered">No</PROPERTY>
<PROPERTY name="mapped">No</PROPERTY>
<PROPERTY name="profile">HP-UX</PROPERTY>
<PROPERTY name="host-bus-type">FC</PROPERTY>
<PROPERTY name="host-bus-type-numeric">6</PROPERTY>
<PROPERTY name="id" key="true">21000024ff3dfed1</PROPERTY>
<PROPERTY name="host-id">NOHOST</PROPERTY>
<PROPERTY name="host-key" >HU</PROPERTY>
<PROPERTY name="host-port-bits-a">0</PROPERTY>
<PROPERTY name="host-port-bits-b">0</PROPERTY>
</OBJECT>
</RESPONSE>
"""
LIST_HOST_GROUPS = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<RESPONSE VERSION="L100" REQUEST="show host-groups">
<COMP G="0" P="1"/>
<OBJECT basetype="host-group" name="host-group" oid="1" format="rows">
<PROPERTY name="durable-id" >HGU</PROPERTY>
<PROPERTY name="name">-ungrouped-</PROPERTY>
<PROPERTY name="serial-number" >UNGROUPEDHOSTS</PROPERTY>
<PROPERTY name="member-count" >0</PROPERTY>
</OBJECT>
<COMP G="1" P="2"/> <OBJECT basetype="host" name="host" oid="2" format="rows">
<PROPERTY name="durable-id">HU</PROPERTY>
<PROPERTY name="name">-nohost-</PROPERTY>
<PROPERTY name="serial-number">NOHOST</PROPERTY>
<PROPERTY name="member-count" >0</PROPERTY>
<PROPERTY name="host-group">UNGROUPEDHOSTS</PROPERTY>
<PROPERTY name="group-key" >HGU</PROPERTY>
</OBJECT>
<COMP G="2" P="3"/>
<OBJECT basetype="initiator" name="initiator" oid="3" format="rows">
<PROPERTY name="durable-id" >I2</PROPERTY>
<PROPERTY name="nickname" >FC-port1</PROPERTY>
<PROPERTY name="discovered">No</PROPERTY>
<PROPERTY name="mapped" >No</PROPERTY>
<PROPERTY name="profile" >HP-UX</PROPERTY>
<PROPERTY name="host-bus-type" >FC</PROPERTY>
<PROPERTY name="host-bus-type-numeric">6</PROPERTY>
<PROPERTY name="id" >21000024ff3dfed1</PROPERTY>
<PROPERTY name="host-id" >NOHOST</PROPERTY>
<PROPERTY name="host-key" >HU</PROPERTY>
<PROPERTY name="host-port-bits-a" >0</PROPERTY>
<PROPERTY name="host-port-bits-b" >0</PROPERTY>
</OBJECT>
<COMP G="0" P="4"/>
<OBJECT basetype="host-group" name="host-group" oid="4" format="rows">
<PROPERTY name="durable-id" >HG0</PROPERTY>
<PROPERTY name="name">HostGroup1</PROPERTY>
<PROPERTY name="serial-number" >00c0ff26c2360000223a9f6101010000</PROPERTY>
<PROPERTY name="member-count">1</PROPERTY>
</OBJECT>
<COMP G="4" P="5"/> <OBJECT basetype="host" name="host" oid="5" format="rows">
<PROPERTY name="durable-id" >H1</PROPERTY>
<PROPERTY name="name">Host1</PROPERTY>
<PROPERTY name="serial-number" >00c0ff26c2360000e2399f6101010000</PROPERTY>
<PROPERTY name="member-count">2</PROPERTY>
<PROPERTY name="host-group">00c0ff26c2360000223a9f6101010000</PROPERTY>
<PROPERTY name="group-key">HG0</PROPERTY>
</OBJECT>
<COMP G="5" P="6"/>
<OBJECT basetype="initiator" name="initiator" oid="6" format="rows">
<PROPERTY name="durable-id" >I1</PROPERTY>
<PROPERTY name="nickname" >FC-port2</PROPERTY>
<PROPERTY name="discovered">No</PROPERTY>
<PROPERTY name="mapped" >Yes</PROPERTY>
<PROPERTY name="profile" >HP-UX</PROPERTY>
<PROPERTY name="host-bus-type" >FC</PROPERTY>
<PROPERTY name="host-bus-type-numeric">6</PROPERTY>
<PROPERTY name="id" >10000090fa13870e</PROPERTY>
<PROPERTY name="host-id" >00c0ff26c2360000e2399f6101010000</PROPERTY>
<PROPERTY name="host-key" >H1</PROPERTY>
<PROPERTY name="host-port-bits-a">0</PROPERTY>
<PROPERTY name="host-port-bits-b" >0</PROPERTY>
</OBJECT>
<COMP G="5" P="7"/>
<OBJECT basetype="initiator" name="initiator" oid="7" format="rows">
<PROPERTY name="durable-id" >I0</PROPERTY>
<PROPERTY name="nickname" >FC-port3</PROPERTY>
<PROPERTY name="discovered">No</PROPERTY>
<PROPERTY name="mapped" >Yes</PROPERTY>
<PROPERTY name="profile" >HP-UX</PROPERTY>
<PROPERTY name="host-bus-type" >FC</PROPERTY>
<PROPERTY name="host-bus-type-numeric">6</PROPERTY>
<PROPERTY name="id" >10000090fa13870f</PROPERTY>
<PROPERTY name="host-id" >00c0ff26c2360000e2399f6101010000</PROPERTY>
<PROPERTY name="host-key" >H1</PROPERTY>
<PROPERTY name="host-port-bits-a">0</PROPERTY>
<PROPERTY name="host-port-bits-b" >0</PROPERTY>
</OBJECT>
<COMP G="0" P="8"/>
<OBJECT basetype="host-group" name="host-group" oid="8" format="rows">
<PROPERTY name="durable-id" >HG2</PROPERTY>
<PROPERTY name="name" >rac</PROPERTY>
<PROPERTY name="serial-number">00c0ff26c4ea00008c81245b01010000</PROPERTY>
<PROPERTY name="member-count" >2</PROPERTY>
</OBJECT>
<COMP G="8" P="9"/> <OBJECT basetype="host" name="host" oid="9" format="rows">
<PROPERTY name="durable-id" >H4</PROPERTY>
<PROPERTY name="name" >rac01</PROPERTY>
<PROPERTY name="serial-number">00c0ff26c4ea0000057f245b01010000</PROPERTY>
<PROPERTY name="member-count" >2</PROPERTY>
<PROPERTY name="host-group" >00c0ff26c4ea00008c81245b01010000</PROPERTY>
<PROPERTY name="group-key">HG2</PROPERTY>
</OBJECT>
<COMP G="9" P="10"/>
<OBJECT basetype="initiator" name="initiator" oid="10" format="rows">
<PROPERTY name="durable-id" >I6</PROPERTY>
<PROPERTY name="nickname" >rac01_01</PROPERTY>
<PROPERTY name="discovered">No</PROPERTY>
<PROPERTY name="mapped" >Yes</PROPERTY>
<PROPERTY name="profile" >Standard</PROPERTY>
<PROPERTY name="host-bus-type" >FC</PROPERTY>
<PROPERTY name="host-bus-type-numeric">6</PROPERTY>
<PROPERTY name="id" >500143801875548e</PROPERTY>
<PROPERTY name="host-id" >00c0ff26c4ea0000057f245b01010000</PROPERTY>
<PROPERTY name="host-key" >H4</PROPERTY>
<PROPERTY name="host-port-bits-a">0</PROPERTY>
<PROPERTY name="host-port-bits-b" >0</PROPERTY>
</OBJECT>
<COMP G="9" P="11"/>
<OBJECT basetype="initiator" name="initiator" oid="11" format="rows">
<PROPERTY name="durable-id" >I5</PROPERTY>
<PROPERTY name="nickname" >rac01_02</PROPERTY>
<PROPERTY name="discovered">No</PROPERTY>
<PROPERTY name="mapped" >Yes</PROPERTY>
<PROPERTY name="profile" >Standard</PROPERTY>
<PROPERTY name="host-bus-type" >FC</PROPERTY>
<PROPERTY name="host-bus-type-numeric">6</PROPERTY>
<PROPERTY name="id" >5001438012097ed6</PROPERTY>
<PROPERTY name="host-id" >00c0ff26c4ea0000057f245b01010000</PROPERTY>
<PROPERTY name="host-key" >H4</PROPERTY>
<PROPERTY name="host-port-bits-a">0</PROPERTY>
<PROPERTY name="host-port-bits-b" >0</PROPERTY>
</OBJECT>
<COMP G="8" P="12"/>
<OBJECT basetype="host" name="host" oid="12" format="rows">
<PROPERTY name="durable-id" >H3</PROPERTY>
<PROPERTY name="name" >rac02</PROPERTY>
<PROPERTY name="serial-number">00c0ff26c4ea0000f77f245b01010000</PROPERTY>
<PROPERTY name="member-count" >2</PROPERTY>
<PROPERTY name="host-group">00c0ff26c4ea00008c81245b01010000</PROPERTY>
<PROPERTY name="group-key">HG2</PROPERTY>
</OBJECT>
<COMP G="12" P="13"/>
<OBJECT basetype="initiator" name="initiator" oid="13" format="rows">
<PROPERTY name="durable-id" >I3</PROPERTY>
<PROPERTY name="nickname" >rac02_01</PROPERTY>
<PROPERTY name="discovered">No</PROPERTY>
<PROPERTY name="mapped" >Yes</PROPERTY>
<PROPERTY name="profile" >Standard</PROPERTY>
<PROPERTY name="host-bus-type" >FC</PROPERTY>
<PROPERTY name="host-bus-type-numeric">6</PROPERTY>
<PROPERTY name="id" >50014380029ceb58</PROPERTY>
<PROPERTY name="host-id" >00c0ff26c4ea0000f77f245b01010000</PROPERTY>
<PROPERTY name="host-key" >H3</PROPERTY>
<PROPERTY name="host-port-bits-a">0</PROPERTY>
<PROPERTY name="host-port-bits-b" >0</PROPERTY>
</OBJECT>
<COMP G="12" P="14"/>
<OBJECT basetype="initiator" name="initiator" oid="14" format="rows">
<PROPERTY name="durable-id" >I4</PROPERTY>
<PROPERTY name="nickname" >rac02_02</PROPERTY>
<PROPERTY name="discovered">No</PROPERTY>
<PROPERTY name="mapped" >No</PROPERTY>
<PROPERTY name="profile" >Standard</PROPERTY>
<PROPERTY name="host-bus-type" >FC</PROPERTY>
<PROPERTY name="host-bus-type-numeric">6</PROPERTY>
<PROPERTY name="id" >500143801209031c</PROPERTY>
<PROPERTY name="host-id" >00c0ff26c4ea0000f77f245b01010000</PROPERTY>
<PROPERTY name="host-key" >H3</PROPERTY>
<PROPERTY name="host-port-bits-a">0</PROPERTY>
<PROPERTY name="host-port-bits-b" >0</PROPERTY>
</OBJECT>
<COMP G="0" P="15"/>
<OBJECT basetype="host" name="host" oid="15" format="rows">
<PROPERTY name="durable-id" >HU</PROPERTY>
<PROPERTY name="name" >-nohost-</PROPERTY>
<PROPERTY name="serial-number">NOHOST</PROPERTY>
<PROPERTY name="member-count" >0</PROPERTY>
<PROPERTY name="host-group" >UNGROUPEDHOSTS</PROPERTY>
<PROPERTY name="group-key">HGU</PROPERTY>
</OBJECT>
<COMP G="15" P="16"/>
<OBJECT basetype="initiator" name="initiator" oid="16" format="rows">
<PROPERTY name="durable-id" >I2</PROPERTY>
<PROPERTY name="nickname" >FC-port1</PROPERTY>
<PROPERTY name="discovered">No</PROPERTY>
<PROPERTY name="mapped" >No</PROPERTY>
<PROPERTY name="profile" >HP-UX</PROPERTY>
<PROPERTY name="host-bus-type" >FC</PROPERTY>
<PROPERTY name="host-bus-type-numeric">6</PROPERTY>
<PROPERTY name="id" >21000024ff3dfed1</PROPERTY>
<PROPERTY name="host-id" >NOHOST</PROPERTY>
<PROPERTY name="host-key" >HU</PROPERTY>
<PROPERTY name="host-port-bits-a">0</PROPERTY>
<PROPERTY name="host-port-bits-b" >0</PROPERTY>
</OBJECT>
</RESPONSE>
"""
LIST_HOST = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<RESPONSE VERSION="L100" REQUEST="show host-groups">
<COMP G="1" P="2"/> <OBJECT basetype="host" name="host" oid="2" format="rows">
<PROPERTY name="durable-id">HU</PROPERTY>
<PROPERTY name="name">-nohost-</PROPERTY>
<PROPERTY name="serial-number">NOHOST</PROPERTY>
<PROPERTY name="member-count">0</PROPERTY>
<PROPERTY name="host-group">UNGROUPEDHOSTS</PROPERTY>
<PROPERTY name="group-key">HGU</PROPERTY>
</OBJECT>
<COMP G="4" P="5"/> <OBJECT basetype="host" name="host" oid="5" format="rows">
<PROPERTY name="durable-id">H1</PROPERTY>
<PROPERTY name="name">Host1</PROPERTY>
<PROPERTY name="serial-number">00c0ff26c2360000e2399f6101010000</PROPERTY>
<PROPERTY name="member-count">2</PROPERTY>
<PROPERTY name="host-group">00c0ff26c2360000223a9f6101010000</PROPERTY>
<PROPERTY name="group-key">HG0</PROPERTY>
</OBJECT>
<COMP G="8" P="9"/> <OBJECT basetype="host" name="host" oid="9" format="rows">
<PROPERTY name="durable-id">H4</PROPERTY>
<PROPERTY name="name">rac01</PROPERTY>
<PROPERTY name="serial-number">00c0ff26c4ea0000057f245b01010000</PROPERTY>
<PROPERTY name="member-count">2</PROPERTY>
<PROPERTY name="host-group">00c0ff26c4ea00008c81245b01010000</PROPERTY>
<PROPERTY name="group-key">HG2</PROPERTY>
</OBJECT>
<COMP G="8" P="12"/>
<OBJECT basetype="host" name="host" oid="12" format="rows">
<PROPERTY name="durable-id">H3</PROPERTY>
<PROPERTY name="name">rac02</PROPERTY>
<PROPERTY name="serial-number">00c0ff26c4ea0000f77f245b01010000</PROPERTY>
<PROPERTY name="member-count">2</PROPERTY>
<PROPERTY name="host-group">00c0ff26c4ea00008c81245b01010000</PROPERTY>
<PROPERTY name="group-key">HG2</PROPERTY>
</OBJECT>
<COMP G="0" P="15"/><OBJECT basetype="host" name="host" oid="15" format="rows">
<PROPERTY name="durable-id">HU</PROPERTY>
<PROPERTY name="name">-nohost-</PROPERTY>
<PROPERTY name="serial-number">NOHOST</PROPERTY>
<PROPERTY name="member-count">0</PROPERTY>
<PROPERTY name="host-group">UNGROUPEDHOSTS</PROPERTY>
<PROPERTY name="group-key">HGU</PROPERTY>
</OBJECT>
</RESPONSE>
"""
LIST_VOLUME_GROUPS = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<RESPONSE VERSION="L100" REQUEST="show volume-groups">
<COMP G="0" P="4"/>
<OBJECT basetype="volume-groups" name="volume-groups" oid="4" format="rows">
<PROPERTY name="durable-id">VG6</PROPERTY>
<PROPERTY name="group-name" >VGroup1</PROPERTY>
<PROPERTY name="serial-number">00c0ff26c4ea0000ab2b9f6101000000</PROPERTY>
<PROPERTY name="type" >Volume</PROPERTY>
<PROPERTY name="type-numeric">3672</PROPERTY>
<PROPERTY name="member-count">2</PROPERTY>
</OBJECT>
<COMP G="4" P="5"/>
<OBJECT basetype="volumes" name="volume" oid="5" format="rows">
<PROPERTY name="durable-id">V0</PROPERTY>
<PROPERTY name="virtual-disk-name" >A</PROPERTY>
<PROPERTY name="storage-pool-name">A</PROPERTY>
<PROPERTY name="volume-name">Vol0001</PROPERTY>
<PROPERTY name="size" >100.9GB</PROPERTY>
<PROPERTY name="size-numeric" >197255168</PROPERTY>
<PROPERTY name="total-size">100.9GB</PROPERTY>
<PROPERTY name="total-size-numeric" >197255168</PROPERTY>
<PROPERTY name="allocated-size">0B</PROPERTY>
<PROPERTY name="allocated-size-numeric">0</PROPERTY>
<PROPERTY name="storage-type" >Virtual</PROPERTY>
<PROPERTY name="storage-type-numeric" >1</PROPERTY>
<PROPERTY name="preferred-owner">A</PROPERTY>
<PROPERTY name="preferred-owner-numeric">1</PROPERTY>
<PROPERTY name="owner" >A</PROPERTY>
<PROPERTY name="owner-numeric">1</PROPERTY>
<PROPERTY name="serial-number" >00c0ff26c4ea0000fa80546101000000</PROPERTY>
<PROPERTY name="write-policy" >write-back</PROPERTY>
<PROPERTY name="write-policy-numeric">1</PROPERTY>
<PROPERTY name="cache-optimization" >standard</PROPERTY>
<PROPERTY name="cache-optimization-numeric" >0</PROPERTY>
<PROPERTY name="read-ahead-size" >Adaptive</PROPERTY>
<PROPERTY name="read-ahead-size-numeric" >-1</PROPERTY>
<PROPERTY name="volume-type" >base</PROPERTY>
<PROPERTY name="volume-type-numeric">15</PROPERTY>
<PROPERTY name="volume-class" >standard</PROPERTY>
<PROPERTY name="volume-class-numeric" >0</PROPERTY>
<PROPERTY name="profile-preference" >Standard</PROPERTY>
<PROPERTY name="profile-preference-numeric">0</PROPERTY>
<PROPERTY name="snapshot" >No</PROPERTY>
<PROPERTY name="volume-qualifier">N/A</PROPERTY>
<PROPERTY name="volume-qualifier-numeric" >0</PROPERTY>
<PROPERTY name="blocks" >197255168</PROPERTY>
<PROPERTY name="capabilities">dmse</PROPERTY>
<PROPERTY name="volume-parent"></PROPERTY>
<PROPERTY name="snap-pool"></PROPERTY>
<PROPERTY name="replication-set" ></PROPERTY>
<PROPERTY name="attributes" ></PROPERTY>
<PROPERTY name="wwn" >600C0FF00026C4EAFA80546101000000</PROPERTY>
<PROPERTY name="progress">0%</PROPERTY>
<PROPERTY name="progress-numeric">0</PROPERTY>
<PROPERTY name="container-name">A</PROPERTY>
<PROPERTY name="allowed-storage-tiers-numeric" >7</PROPERTY>
<PROPERTY name="threshold-percent-of-pool" >10.00 %</PROPERTY>
<PROPERTY name="reserved-size-in-pages" >0</PROPERTY>
<PROPERTY name="allocate-reserved-pages-first">Enabled</PROPERTY>
<PROPERTY name="health">OK</PROPERTY>
<PROPERTY name="health-numeric">0</PROPERTY>
<PROPERTY name="health-reason"></PROPERTY>
<PROPERTY name="health-recommendation" ></PROPERTY>
<PROPERTY name="volume-group">00c0ff26c4ea0000ab2b9f6101000000</PROPERTY>
<PROPERTY name="group-key" >VG6</PROPERTY>
</OBJECT>
<COMP G="4" P="6"/>
<OBJECT basetype="volumes" name="volume" oid="6" format="rows">
<PROPERTY name="durable-id">V1</PROPERTY>
<PROPERTY name="virtual-disk-name" >A</PROPERTY>
<PROPERTY name="storage-pool-name">A</PROPERTY>
<PROPERTY name="volume-name">Vol0002</PROPERTY>
<PROPERTY name="size" >99.9GB</PROPERTY>
<PROPERTY name="size-numeric" >195305472</PROPERTY>
<PROPERTY name="total-size">99.9GB</PROPERTY>
<PROPERTY name="total-size-numeric" >195305472</PROPERTY>
<PROPERTY name="allocated-size">0B</PROPERTY>
<PROPERTY name="allocated-size-numeric">0</PROPERTY>
<PROPERTY name="storage-type" >Virtual</PROPERTY>
<PROPERTY name="storage-type-numeric" >1</PROPERTY>
<PROPERTY name="preferred-owner">A</PROPERTY>
<PROPERTY name="preferred-owner-numeric">1</PROPERTY>
<PROPERTY name="owner" >A</PROPERTY>
<PROPERTY name="owner-numeric">1</PROPERTY>
<PROPERTY name="serial-number" >00c0ff26c4ea00000a81546101000000</PROPERTY>
<PROPERTY name="write-policy" >write-back</PROPERTY>
<PROPERTY name="write-policy-numeric">1</PROPERTY>
<PROPERTY name="cache-optimization" >standard</PROPERTY>
<PROPERTY name="cache-optimization-numeric" >0</PROPERTY>
<PROPERTY name="read-ahead-size" >Adaptive</PROPERTY>
<PROPERTY name="read-ahead-size-numeric" >-1</PROPERTY>
<PROPERTY name="volume-type" >base</PROPERTY>
<PROPERTY name="volume-type-numeric">15</PROPERTY>
<PROPERTY name="volume-class" >standard</PROPERTY>
<PROPERTY name="volume-class-numeric" >0</PROPERTY>
<PROPERTY name="profile-preference" >Standard</PROPERTY>
<PROPERTY name="profile-preference-numeric">0</PROPERTY>
<PROPERTY name="snapshot" >No</PROPERTY>
<PROPERTY name="volume-qualifier">N/A</PROPERTY>
<PROPERTY name="volume-qualifier-numeric" >0</PROPERTY>
<PROPERTY name="blocks" >195305472</PROPERTY>
<PROPERTY name="capabilities">dmse</PROPERTY>
<PROPERTY name="volume-parent"></PROPERTY>
<PROPERTY name="snap-pool"></PROPERTY>
<PROPERTY name="replication-set" ></PROPERTY>
<PROPERTY name="attributes" ></PROPERTY>
<PROPERTY name="wwn" >600C0FF00026C4EA0A81546101000000</PROPERTY>
<PROPERTY name="progress">0%</PROPERTY>
<PROPERTY name="progress-numeric">0</PROPERTY>
<PROPERTY name="container-name">A</PROPERTY>
<PROPERTY name="allowed-storage-tiers-numeric" >7</PROPERTY>
<PROPERTY name="threshold-percent-of-pool" >10.00 %</PROPERTY>
<PROPERTY name="reserved-size-in-pages" >0</PROPERTY>
<PROPERTY name="allocate-reserved-pages-first">Enabled</PROPERTY>
<PROPERTY name="health">OK</PROPERTY>
<PROPERTY name="health-numeric">0</PROPERTY>
<PROPERTY name="health-reason"></PROPERTY>
<PROPERTY name="health-recommendation" ></PROPERTY>
<PROPERTY name="volume-group">00c0ff26c4ea0000ab2b9f6101000000</PROPERTY>
<PROPERTY name="group-key" >VG6</PROPERTY>
</OBJECT>
</RESPONSE>
"""
LIST_MAPS_ALL = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<RESPONSE VERSION="L100" REQUEST="show maps">
<COMP G="0" P="1"/>
<OBJECT basetype="volume-group-view">
<PROPERTY name="durable-id">VG5</PROPERTY>
<PROPERTY name="serial-number">00c0ff26c4ea0000e22b9f6101000000</PROPERTY>
<PROPERTY name="group-name">VGroup2.*</PROPERTY>
</OBJECT>
<COMP G="1" P="2"/>
<OBJECT basetype="volume-group-view-mappings" >
<PROPERTY name="durable-id" >VG5_I3</PROPERTY>
<PROPERTY name="parent-id">VG5</PROPERTY>
<PROPERTY name="mapped-id">I3</PROPERTY>
<PROPERTY name="ports">1,2</PROPERTY>
<PROPERTY name="access">read-write</PROPERTY>
<PROPERTY name="access-numeric">3</PROPERTY>
<PROPERTY name="initiator-id">50014380029ceb58</PROPERTY>
<PROPERTY name="nickname">rac02_01</PROPERTY>
<PROPERTY name="host-profile">Standard</PROPERTY>
</OBJECT>
<COMP G="2" P="3"/>
<OBJECT basetype="volume-group-view-mappings-luns">
<PROPERTY name="volume-name">Vol0003</PROPERTY>
<PROPERTY name="volume-serial">00c0ff26c4ea000082537a6101000000</PROPERTY>
<PROPERTY name="lun">0</PROPERTY>
</OBJECT>
<COMP G="2" P="4"/>
<OBJECT basetype="volume-group-view-mappings-luns">
<PROPERTY name="volume-name" >Vol0004</PROPERTY>
<PROPERTY name="volume-serial" >00c0ff26c4ea000085537a6101000000</PROPERTY>
<PROPERTY name="lun">2</PROPERTY>
</OBJECT>
<COMP G="1" P="5"/>
<OBJECT basetype="volume-view">
<PROPERTY name="durable-id">V3</PROPERTY>
<PROPERTY name="volume-serial" >00c0ff26c4ea000085537a6101000000</PROPERTY>
<PROPERTY name="volume-name">Vol0004</PROPERTY>
</OBJECT>
<COMP G="5" P="6"/>
<OBJECT basetype="volume-view-mappings" >
<PROPERTY name="durable-id" >V3_I0</PROPERTY>
<PROPERTY name="parent-id">V3</PROPERTY>
<PROPERTY name="mapped-id" >I0</PROPERTY>
<PROPERTY name="ports">3,4</PROPERTY>
<PROPERTY name="lun" >0</PROPERTY>
<PROPERTY name="access">read-write</PROPERTY>
<PROPERTY name="access-numeric">3</PROPERTY>
<PROPERTY name="identifier" >10000090fa13870f</PROPERTY>
<PROPERTY name="nickname" >FC-port3</PROPERTY>
<PROPERTY name="host-profile">HPUX</PROPERTY>
</OBJECT>
<COMP G="0" P="7"/>
<OBJECT basetype="volume-view">
<PROPERTY name="durable-id">V0</PROPERTY>
<PROPERTY name="volume-serial">00c0ff26c4ea0000fa80546101000000</PROPERTY>
<PROPERTY name="volume-name" >Vol0001</PROPERTY>
</OBJECT>
<COMP G="7" P="8"/>
<OBJECT basetype="volume-view-mappings">
<PROPERTY name="durable-id" key="true">V0_I1</PROPERTY>
<PROPERTY name="parent-id">V0</PROPERTY>
<PROPERTY name="mapped-id">I1</PROPERTY>
<PROPERTY name="ports">1,2</PROPERTY>
<PROPERTY name="lun">0</PROPERTY>
<PROPERTY name="access">read-write</PROPERTY>
<PROPERTY name="access-numeric">3</PROPERTY>
<PROPERTY name="identifier">10000090fa13870e</PROPERTY>
<PROPERTY name="nickname">FC-port2</PROPERTY>
<PROPERTY name="host-profile">HPUX</PROPERTY>
</OBJECT>
<COMP G="0" P="9"/>
<OBJECT basetype="volume-view">
<PROPERTY name="durable-id" >V1</PROPERTY>
<PROPERTY name="volume-serial">00c0ff26c4ea00000a81546101000000</PROPERTY>
<PROPERTY name="volume-name">Vol0002</PROPERTY>
</OBJECT>
<COMP G="9" P="10"/>
<OBJECT basetype="volume-view-mappings">
<PROPERTY name="durable-id">V1_H4</PROPERTY>
<PROPERTY name="parent-id" >V1</PROPERTY>
<PROPERTY name="mapped-id">H4</PROPERTY>
<PROPERTY name="ports">1,2</PROPERTY>
<PROPERTY name="lun">0</PROPERTY>
<PROPERTY name="access">read-write</PROPERTY>
<PROPERTY name="access-numeric">3</PROPERTY>
<PROPERTY name="identifier">00c0ff26c4ea0000057f245b01010000</PROPERTY>
<PROPERTY name="nickname">rac01.*</PROPERTY>
<PROPERTY name="host-profile">Standard</PROPERTY>
</OBJECT>
</RESPONSE>
"""
error_result = [
{
'alert_id': 'A891',
'alert_name': '557',
'category': 'Fault',
'description': 'Management',
'location': 'An Enclosure Management Processor(EMP)',
'match_key': 'd0317252aed04fd8b68e79d7eab08277',
'occur_time': 1636704980000,
'resource_type': '557',
'sequence_number': 'A891',
'severity': 'ERROR',
'type': 'EquipmentAlarm'
}
]
volume_result = [
{
'name': 'Vol0001',
'storage_id': 'kkk',
'description': 'Vol0001',
'status': 'normal',
'native_volume_id': 'V1',
'native_storage_pool_id': '00c0ff26c4ea0000d980546101000000',
'wwn': '600C0FF00026C4EAFA80546101000000',
'type': 'base',
'total_capacity': 107266808217,
'free_capacit': 107266808217,
'used_capacity': 0,
'blocks': 195305472,
'compressed': True,
'deduplicated': True
}, {
'name': 'Vol0002',
'storage_id': 'kkk',
'description': 'Vol0002',
'status': 'normal',
'native_volume_id': 'V2',
'native_storage_pool_id': '00c0ff26c4ea0000d980546101000000',
'wwn': '600C0FF00026C4EA0A81546101000000',
'type': 'base',
'total_capacity': 107266808217,
'free_capacit': 107266808217,
'used_capacity': 0,
'blocks': 195305472,
'compressed': True,
'deduplicated': True
}
]
pools_result = [
{
'name': 'A',
'storage_id': 'kkk',
'native_storage_pool_id': '00c0ff26c4ea0000d980546101000000',
'status': 'normal',
'storage_type': 'block',
'total_capacity': 1285054214963,
'subscribed_capacity': 390610944,
'used_capacity': 214533616434,
'free_capacity': 1070520598529
}
]
ports_result = [
{
'native_port_id': 'hostport_A1',
'name': 'A1', 'type': 'fc',
'connection_status': 'disconnected',
'health_status': 'abnormal',
'location': 'A1_FC',
'storage_id': 'kkk',
'speed': 8589934592.0,
'max_speed': 8589934592.0,
'mac_address': None,
'ipv4': None,
'wwn': '207000c0ff26dcb0'
}, {
'native_port_id': 'hostport_A2',
'name': 'A2',
'type': 'fc',
'connection_status': 'disconnected',
'health_status': 'abnormal',
'location': 'A2_FC',
'storage_id': 'kkk',
'speed': 8589934592.0,
'max_speed': 8589934592.0,
'mac_address': None,
'ipv4': None,
'wwn': '217000c0ff26dcb0'
}, {
'native_port_id': 'hostport_A3',
'name': 'A3',
'type': 'eth',
'connection_status': 'disconnected',
'health_status': 'abnormal',
'location': 'A3_ISCSI',
'storage_id': 'kkk',
'speed': 0,
'max_speed': 0,
'mac_address': '00:C0:FF:35:BD:64',
'ipv4': '0.0.0.0',
'wwn': None
}, {
'native_port_id': 'hostport_A4',
'name': 'A4',
'type': 'eth',
'connection_status': 'disconnected',
'health_status': 'abnormal',
'location': 'A4_ISCSI',
'storage_id': 'kkk',
'speed': 0,
'max_speed': 0,
'mac_address': '00:C0:FF:35:BD:65',
'ipv4': '0.0.0.0',
'wwn': None
}, {
'native_port_id': 'hostport_B1',
'name': 'B1',
'type': 'fc',
'connection_status': 'disconnected',
'health_status': 'abnormal',
'location': 'B1_FC',
'storage_id': 'kkk',
'speed': 8589934592.0,
'max_speed': 8589934592.0,
'mac_address': None,
'ipv4': None,
'wwn': '247000c0ff26dcb0'
}, {
'native_port_id': 'hostport_B2',
'name': 'B2',
'type': 'fc',
'connection_status': 'disconnected',
'health_status': 'abnormal',
'location': 'B2_FC',
'storage_id': 'kkk',
'speed': 8589934592.0,
'max_speed': 8589934592.0,
'mac_address': None,
'ipv4': None,
'wwn': '257000c0ff26dcb0'
}, {
'native_port_id': 'hostport_B3',
'name': 'B3',
'type': 'eth',
'connection_status': 'disconnected',
'health_status': 'abnormal',
'location': 'B3_ISCSI', 'storage_id': 'kkk',
'speed': 0,
'max_speed': 0,
'mac_address': '00:C0:FF:35:BA:BC',
'ipv4': '0.0.0.0',
'wwn': None
}, {
'native_port_id': 'hostport_B4',
'name': 'B4',
'type': 'eth',
'connection_status': 'disconnected',
'health_status': 'abnormal',
'location': 'B4_ISCSI',
'storage_id': 'kkk',
'speed': 0,
'max_speed': 0,
'mac_address': '00:C0:FF:35:BA:BD',
'ipv4': '0.0.0.0',
'wwn': None
}]
disks_result = [
{
'native_disk_id': '1.1',
'name': '1.1',
'physical_type': 'sas',
'status': 'normal',
'storage_id': 'kkk',
'native_disk_group_id': 'dgA01',
'serial_number': '6SL9CD560000N51404EF',
'manufacturer': 'SEAGATE',
'model': 'ST3600057SS',
'speed': 15000,
'capacity': 644352468582,
'health_score': 'normal'
}, {
'native_disk_id': '1.2',
'name': '1.2',
'physical_type': 'sas',
'status': 'normal',
'storage_id': 'kkk',
'native_disk_group_id': 'dgA01',
'serial_number': '6SL7X4RE0000B42601SF',
'manufacturer': 'SEAGATE',
'model': 'ST3600057SS',
'speed': 15000,
'capacity': 644352468582,
'health_score': 'normal'
}, {
'native_disk_id': '1.3',
'name': '1.3',
'physical_type': 'sas',
'status': 'normal',
'storage_id': 'kkk',
'native_disk_group_id': 'dgA01',
'serial_number': '6SL9QR5T0000N52120SK',
'manufacturer': 'SEAGATE',
'model': 'ST3600057SS',
'speed': 15000, 'capacity': 644352468582,
'health_score': 'normal'
}, {
'native_disk_id': '1.4',
'name': '1.4',
'physical_type': 'sas',
'status': 'normal',
'storage_id': 'kkk',
'native_disk_group_id': 'dgA01',
'serial_number': '3SL0WT7G00009051YBTF',
'manufacturer': 'SEAGATE',
'model': 'ST3600057SS',
'speed': 15000,
'capacity': 644352468582,
'health_score': 'normal'
}
]
system_info = {
'name': 'msa2040',
'vendor': 'HPE',
'model': 'MSA 2040 SAN',
'status': 'normal',
'serial_number': '00C0FF26DCB0',
'firmware_version': 'GL210R004',
'location': 'Uninitialized Location',
'raw_capacity': 2577409874328,
'total_capacity': 1285054214963,
'used_capacity': 214533616434,
'free_capacity': 1070520598529
}
controller_result = [
{
'native_controller_id': 'A',
'name': 'controller_a',
'storage_id': 'kkk',
'status': 'normal',
'location': 'Top',
'soft_version': 'GLS210R04-01',
'cpu_info': 'Gladden',
'cpu_count': 1,
'memory_size': 6442450944
},
{
'native_controller_id': 'B',
'name': 'controller_b',
'storage_id': 'kkk',
'status': 'normal',
'location': 'Bottom',
'soft_version': 'GLS210R04-01',
'cpu_info': 'Gladden',
'cpu_count': 1,
'memory_size': 6442450944
}
]
list_storage_host_initiators = [
{
'name': 'FC-port1',
'type': 'fc',
'alias': 'I2',
'storage_id': 'kkk',
'native_storage_host_initiator_id': 'I2',
'wwn': '21000024ff3dfed1',
'status': 'online',
'native_storage_host_id': 'NOHOST'
},
{
'name': 'FC-port2',
'type': 'fc',
'alias': 'I1',
'storage_id': 'kkk',
'native_storage_host_initiator_id': 'I1',
'wwn': '10000090fa13870e',
'status': 'online',
'native_storage_host_id': '00c0ff26c2360000e2399f6101010000'
},
{
'name': 'FC-port3',
'type': 'fc',
'alias': 'I0',
'storage_id': 'kkk',
'native_storage_host_initiator_id': 'I0',
'wwn': '10000090fa13870f',
'status': 'online',
'native_storage_host_id': '00c0ff26c2360000e2399f6101010000'
},
{
'name': 'rac01_01',
'type': 'fc',
'alias': 'I6',
'storage_id': 'kkk',
'native_storage_host_initiator_id': 'I6',
'wwn': '500143801875548e',
'status': 'online',
'native_storage_host_id': '00c0ff26c4ea0000057f245b01010000'
},
{
'name': 'rac01_02',
'type': 'fc',
'alias': 'I5',
'storage_id': 'kkk',
'native_storage_host_initiator_id': 'I5',
'wwn': '5001438012097ed6',
'status': 'online',
'native_storage_host_id': '00c0ff26c4ea0000057f245b01010000'
},
{
'name': 'rac02_01',
'type': 'fc',
'alias': 'I3',
'storage_id': 'kkk',
'native_storage_host_initiator_id': 'I3',
'wwn': '50014380029ceb58',
'status': 'online',
'native_storage_host_id': '00c0ff26c4ea0000f77f245b01010000'
},
{
'name': 'rac02_02',
'type': 'fc',
'alias': 'I4',
'storage_id': 'kkk',
'native_storage_host_initiator_id': 'I4',
'wwn': '500143801209031c',
'status': 'online',
'native_storage_host_id': '00c0ff26c4ea0000f77f245b01010000'
},
{
'name': 'FC-port1',
'type': 'fc',
'alias': 'I2',
'storage_id': 'kkk',
'native_storage_host_initiator_id': 'I2',
'wwn': '21000024ff3dfed1',
'status': 'online',
'native_storage_host_id': 'NOHOST'
}
]
list_storage_hosts = [
{
'name': 'Host1',
'description': 'H1',
'storage_id': 'kkk',
'native_storage_host_id': '00c0ff26c2360000e2399f6101010000',
'os_type': 'HP-UX',
'status': 'normal'
},
{
'name': 'rac01',
'description': 'H4',
'storage_id': 'kkk',
'native_storage_host_id': '00c0ff26c4ea0000057f245b01010000',
'os_type': 'HP-UX',
'status': 'normal'
},
{
'name': 'rac02',
'description': 'H3',
'storage_id': 'kkk',
'native_storage_host_id': '00c0ff26c4ea0000f77f245b01010000',
'os_type': 'HP-UX',
'status': 'normal'
}
]
list_storage_host_groups = {
'storage_host_groups': [
{
'name': 'HostGroup1',
'description': 'HG0',
'storage_id': 'kkk',
'native_storage_host_group_id': '00c0ff26c2360000223a9f6101010000',
'storage_hosts': '00c0ff26c2360000e2399f6101010000'
}, {
'name': 'rac',
'description': 'HG2',
'storage_id': 'kkk',
'native_storage_host_group_id': '00c0ff26c4ea00008c81245b01010000',
'storage_hosts': '00c0ff26c4ea0000057f245b01010000,'
'00c0ff26c4ea0000f77f245b01010000'
}
],
'storage_host_grp_host_rels': [
{'storage_id': 'kkk',
'native_storage_host_group_id': '00c0ff26c2360000223a9f6101010000',
'native_storage_host_id': '00c0ff26c2360000e2399f6101010000'
},
{
'storage_id': 'kkk',
'native_storage_host_group_id': '00c0ff26c4ea00008c81245b01010000',
'native_storage_host_id': '00c0ff26c4ea0000057f245b01010000'
},
{
'storage_id': 'kkk',
'native_storage_host_group_id': '00c0ff26c4ea00008c81245b01010000',
'native_storage_host_id': '00c0ff26c4ea0000f77f245b01010000'
}
]
}
list_volume_groups = {
'volume_groups':
[
{
'name': 'VGroup1',
'description': 'VG6',
'storage_id': 'kkk',
'native_volume_group_id': 'VG6',
'volumes': 'V0,V1'
}
],
'vol_grp_vol_rels':
[
{
'storage_id': 'kkk',
'native_volume_group_id': 'VG6',
'native_volume_id': 'V0'
},
{
'storage_id': 'kkk',
'native_volume_group_id': 'VG6',
'native_volume_id': 'V1'
}
]
}
list_masking_views = [
{
'name': 'FC-port3',
'description': 'FC-port3',
'storage_id': 'kkk',
'native_masking_view_id': 'V3_I0V3',
'native_port_group_id': 'port_group_A3B3A4B4',
'native_volume_id': 'V3',
'native_storage_host_id': '00c0ff26c2360000e2399f6101010000'
},
{
'name': 'FC-port2',
'description': 'FC-port2',
'storage_id': 'kkk',
'native_masking_view_id': 'V0_I1V0',
'native_port_group_id': 'port_group_A1B1A2B2',
'native_volume_id': 'V0',
'native_storage_host_id': '00c0ff26c2360000e2399f6101010000'
},
{
'name': 'rac01.*',
'description': 'rac01.*',
'storage_id': 'kkk',
'native_masking_view_id': 'V1_H4V1',
'native_port_group_id': 'port_group_A1B1A2B2',
'native_volume_id': 'V1',
'native_storage_host_id': '00c0ff26c4ea0000057f245b01010000'
},
{
'name': 'rac02_01',
'description': 'rac02_01',
'storage_id': 'kkk',
'native_masking_view_id': 'VG5_I3VG5',
'native_port_group_id': 'port_group_A1B1A2B2',
'native_volume_group_id': 'VG5',
'native_storage_host_id': '00c0ff26c4ea0000f77f245b01010000'
}
]
|
c9251a9857cded6b7fc344572b7b292d576f800d
|
863bfa36852b84e2e2834abb67171e37d48b9b81
|
/returns/_internal/futures/_future_result.py
|
36c81bb0de4def9dd65343457fd147edfbe5b589
|
[
"BSD-2-Clause"
] |
permissive
|
dry-python/returns
|
33f763a34439cb6aa5419f16c6f45f27610d82d4
|
ae8d9ffaf20c459296337b78ba5ecb2f98870f61
|
refs/heads/master
| 2023-09-04T07:33:39.768675
| 2023-09-01T20:45:59
| 2023-09-01T20:45:59
| 167,689,891
| 2,967
| 130
|
BSD-2-Clause
| 2023-09-11T15:18:58
| 2019-01-26T13:08:38
|
Python
|
UTF-8
|
Python
| false
| false
| 6,484
|
py
|
_future_result.py
|
from typing import TYPE_CHECKING, Any, Awaitable, Callable, TypeVar
from returns.io import IO, IOResult
from returns.primitives.hkt import Kind2, dekind
from returns.result import Failure, Result, Success
if TYPE_CHECKING:
from returns.future import Future, FutureResult # noqa: F401
_ValueType = TypeVar('_ValueType', covariant=True)
_NewValueType = TypeVar('_NewValueType')
_ErrorType = TypeVar('_ErrorType', covariant=True)
_NewErrorType = TypeVar('_NewErrorType')
async def async_swap(
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_ErrorType, _ValueType]:
"""Swaps value and error types in ``Result``."""
return (await inner_value).swap()
async def async_map(
function: Callable[[_ValueType], _NewValueType],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async maps a function over a value."""
return (await inner_value).map(function)
async def async_apply(
container:
'FutureResult[Callable[[_ValueType], _NewValueType], _ErrorType]',
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async maps a function over a value."""
return (await inner_value).apply((await container)._inner_value)
async def async_bind(
function: Callable[
[_ValueType],
Kind2['FutureResult', _NewValueType, _ErrorType],
],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async binds a container over a value."""
container = await inner_value
if isinstance(container, Success):
return (await dekind(function(container.unwrap())))._inner_value
return container # type: ignore[return-value]
async def async_bind_awaitable(
function: Callable[[_ValueType], Awaitable[_NewValueType]],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async binds a coroutine over a value."""
container = await inner_value
if isinstance(container, Success):
return Result.from_value(await function(container.unwrap()))
return container # type: ignore[return-value]
async def async_bind_async(
function: Callable[
[_ValueType],
Awaitable[Kind2['FutureResult', _NewValueType, _ErrorType]],
],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async binds a coroutine with container over a value."""
container = await inner_value
if isinstance(container, Success):
return await dekind(await function(container.unwrap()))._inner_value
return container # type: ignore[return-value]
async def async_bind_result(
function: Callable[[_ValueType], Result[_NewValueType, _ErrorType]],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async binds a container returning ``Result`` over a value."""
return (await inner_value).bind(function)
async def async_bind_ioresult(
function: Callable[[_ValueType], IOResult[_NewValueType, _ErrorType]],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async binds a container returning ``IOResult`` over a value."""
container = await inner_value
if isinstance(container, Success):
return function(container.unwrap())._inner_value
return container # type: ignore[return-value]
async def async_bind_io(
function: Callable[[_ValueType], IO[_NewValueType]],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async binds a container returning ``IO`` over a value."""
container = await inner_value
if isinstance(container, Success):
return Success(function(container.unwrap())._inner_value)
return container # type: ignore[return-value]
async def async_bind_future(
function: Callable[[_ValueType], 'Future[_NewValueType]'],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async binds a container returning ``IO`` over a value."""
container = await inner_value
if isinstance(container, Success):
return await async_from_success(function(container.unwrap()))
return container # type: ignore[return-value]
async def async_bind_async_future(
function: Callable[[_ValueType], Awaitable['Future[_NewValueType]']],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async binds a container returning ``IO`` over a value."""
container = await inner_value
if isinstance(container, Success):
return await async_from_success(await function(container.unwrap()))
return container # type: ignore[return-value]
async def async_alt(
function: Callable[[_ErrorType], _NewErrorType],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_ValueType, _NewErrorType]:
"""Async alts a function over a value."""
container = await inner_value
if isinstance(container, Success):
return container
return Failure(function(container.failure()))
async def async_lash(
function: Callable[
[_ErrorType],
Kind2['FutureResult', _ValueType, _NewErrorType],
],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_ValueType, _NewErrorType]:
"""Async lashes a function returning a container over a value."""
container = await inner_value
if isinstance(container, Success):
return container
return (await dekind(function(container.failure())))._inner_value
async def async_from_success(
container: 'Future[_NewValueType]',
) -> Result[_NewValueType, Any]:
"""Async success unit factory."""
return Success((await container)._inner_value)
async def async_from_failure(
container: 'Future[_NewErrorType]',
) -> Result[Any, _NewErrorType]:
"""Async failure unit factory."""
return Failure((await container)._inner_value)
async def async_compose_result(
function: Callable[
[Result[_ValueType, _ErrorType]],
Kind2['FutureResult', _NewValueType, _ErrorType],
],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async composes ``Result`` based function."""
return (await dekind(function(await inner_value)))._inner_value
|
ce58e8e109e19e8b00835381b8377bf02159c998
|
286aed455c543eb139e23b7e72b55ba58e25fd03
|
/spot-setup/migration/migrate_old_flow_data.py
|
3b5543bf68c050c70ad7b14aa99ac09f30973394
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
apache/incubator-spot
|
8f9efa981636c766abf666fa62acbe2c4e6d9302
|
63169ba29443813b064f525a4c322e2673a7bc2a
|
refs/heads/master
| 2023-09-04T11:32:12.115539
| 2023-04-21T11:59:13
| 2023-04-21T11:59:13
| 69,328,650
| 380
| 255
|
Apache-2.0
| 2023-04-21T11:59:14
| 2016-09-27T07:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 11,020
|
py
|
migrate_old_flow_data.py
|
#!/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import subprocess
import fnmatch
import re
import pandas as pd
import datetime
from utilities import util
old_oa_path=sys.argv[1]
staging_db=sys.argv[2]
hdfs_staging_path=sys.argv[3]
dest_db = sys.argv[4]
impala_daemon = sys.argv[5]
# Execution example:
#./migrate_old_flow_data.py '/home/spotuser/incubator-spot_old/spot-oa' 'spot_migration' '/user/spotuser/spot_migration/' 'migrated' 'node01'
def main():
log = util.get_logger('SPOT.MIGRATE.FLOW')
cur_path = os.path.dirname(os.path.realpath(__file__))
new_spot_path = os.path.split(os.path.split(cur_path)[0])[0]
new_oa_path = '{0}/spot-oa'.format(new_spot_path)
log.info('New Spot OA path: {0}'.format(new_oa_path))
old_spot_path = os.path.split(old_oa_path)[0]
log.info("Creating HDFS paths for Impala tables")
util.create_hdfs_folder('{0}/flow/scores'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/flow/chords'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/flow/edge'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/flow/summary'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/flow/storyboard'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/flow/threat_investigation'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/flow/timeline'.format(hdfs_staging_path),log)
util.execute_cmd('hdfs dfs -setfacl -R -m user:impala:rwx {0}'.format(hdfs_staging_path),log)
log.info("Creating Staging tables in Impala")
util.execute_cmd('impala-shell -i {0} --var=hpath={1} --var=dbname={2} -c -f create_flow_migration_tables.hql'.format(impala_daemon, hdfs_staging_path, staging_db),log)
## Flow Ingest Summary
log.info('Processing Flow Ingest Summary')
ing_sum_path='{0}/data/flow/ingest_summary/'.format(old_oa_path)
pattern='is_??????.csv'
staging_table_name = 'flow_ingest_summary_tmp'
dest_table_name = 'flow_ingest_summary'
if os.path.exists(ing_sum_path):
for file in fnmatch.filter(os.listdir(ing_sum_path), pattern):
log.info('Processing file: {0}'.format(file))
filepath='{0}{1}'.format(ing_sum_path, file)
df = pd.read_csv(filepath)
s = df.iloc[:,0]
l_dates = list(s.unique())
l_dates = map(lambda x: x[0:10].strip(), l_dates)
l_dates = filter(lambda x: re.match('\d{4}[-/]\d{2}[-/]\d{1}', x), l_dates)
s_dates = set(l_dates)
for date_str in s_dates:
dt = datetime.datetime.strptime(date_str, '%Y-%m-%d')
log.info('Processing day: {0} {1} {2} {3}'.format(date_str, dt.year, dt.month, dt.day))
records = df[df['date'].str.contains(date_str)]
filename = "ingest_summary_{0}{1}{2}.csv".format(dt.year, dt.month, dt.day)
records.to_csv(filename, index=False)
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT tdate, total FROM {5}.{6}".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
os.remove(filename)
## Iterating days
days_path='{0}/data/flow/'.format(old_oa_path)
if os.path.exists(days_path):
for day_folder in fnmatch.filter(os.listdir(days_path), '2*'):
print day_folder
dt = datetime.datetime.strptime(day_folder, '%Y%m%d')
log.info('Processing day: {0} {1} {2} {3}'.format(day_folder, dt.year, dt.month, dt.day))
full_day_path = '{0}{1}'.format(days_path,day_folder)
## Flow Scores and Flow Threat Investigation
filename = '{0}/flow_scores.csv'.format(full_day_path)
if os.path.isfile(filename):
log.info("Processing Flow Scores")
staging_table_name = 'flow_scores_tmp'
dest_table_name = 'flow_scores'
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT tstart,srcip,dstip,sport,dport,proto,ipkt,ibyt,opkt,obyt,score,rank,srcIpInternal,destIpInternal,srcGeo,dstGeo,srcDomain,dstDomain,srcIP_rep,dstIP_rep FROM {5}.{6}".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
log.info("Processing Flow Threat Investigation")
staging_table_name = 'flow_scores_tmp'
dest_table_name = 'flow_threat_investigation'
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT tstart,srcip,dstip,sport,dport,sev FROM {5}.{6} WHERE sev > 0;".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
# Flow Chords
log.info("Processing Flow Chords")
staging_table_name = 'flow_chords_tmp'
dest_table_name = 'flow_chords'
for file in fnmatch.filter(os.listdir(full_day_path), 'chord*.tsv'):
ip = re.findall("chord-(\S+).tsv", file)[0]
ip = ip.replace('_', '.')
log.info("Processing File: {0} with IP:{1}".format(file, ip))
filename = '{0}/{1}'.format(full_day_path, file)
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT '{5}', srcip, dstip, ibyt, ipkt FROM {6}.{7};".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, ip, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
## Flow Edge
log.info("Processing Flow Edge")
staging_table_name = 'flow_edge_tmp'
dest_table_name = 'flow_edge'
pattern = 'edge*.tsv'
edge_files = fnmatch.filter(os.listdir(full_day_path), pattern)
for file in edge_files:
parts = (re.findall("edge-(\S+).tsv", file)[0]).split('-')
hh = int(parts[2])
mn = int(parts[3])
log.info("Processing File: {0} with HH: {1} and MN: {2}".format(file, hh, mn))
filename = '{0}/{1}'.format(full_day_path, file)
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT tstart, srcip, dstip, sport, dport, proto, flags, tos, ibyt, ipkt, input, output, rip, obyt, opkt, {5}, {6} FROM {7}.{8} WHERE srcip is not NULL;".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, hh, mn, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
##flow_storyboard
log.info("Processing Flow Storyboard")
staging_table_name = 'flow_storyboard_tmp'
dest_table_name = 'flow_storyboard'
filename = '{0}/threats.csv'.format(full_day_path)
if os.path.isfile(filename):
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT ip_threat, title, text FROM {5}.{6};".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
##flow_timeline
log.info("Processing Flow Timeline")
staging_table_name = 'flow_timeline_tmp'
dest_table_name = 'flow_timeline'
for file in fnmatch.filter(os.listdir(full_day_path), 'sbdet*.tsv'):
ip = re.findall("sbdet-(\S+).tsv", file)[0]
log.info("Processing File: {0} with IP:{1}".format(file, ip))
filename = '{0}/{1}'.format(full_day_path, file)
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT '{5}', tstart, tend, srcip, dstip, proto, sport, dport, ipkt, ibyt FROM {6}.{7};".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, ip, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
log.info("Dropping staging tables")
util.execute_cmd('impala-shell -i {0} --var=dbname={1} -c -f drop_flow_migration_tables.hql'.format(impala_daemon, staging_db),log)
log.info("Removing staging tables' path in HDFS")
util.execute_cmd('hadoop fs -rm -r {0}/flow/'.format(hdfs_staging_path),log)
log.info("Moving CSV data to backup folder")
util.execute_cmd('mkdir {0}/data/backup/'.format(old_oa_path),log)
util.execute_cmd('cp -r {0}/data/flow/ {0}/data/backup/'.format(old_oa_path),log)
util.execute_cmd('rm -r {0}/data/flow/'.format(old_oa_path),log)
log.info("Invalidating metadata in Impala to refresh tables content")
util.execute_cmd('impala-shell -i {0} -q "INVALIDATE METADATA;"'.format(impala_daemon),log)
log.info("Creating ipynb template structure and copying advanced mode and threat investigation ipynb templates for each pre-existing day in the new Spot location")
ipynb_pipeline_path = '{0}/ipynb/flow/'.format(old_oa_path)
if os.path.exists(ipynb_pipeline_path):
for folder in os.listdir(ipynb_pipeline_path):
log.info("Creating ipynb flow folders in new Spot locaiton: {0}".format(folder))
util.execute_cmd('mkdir -p {0}/ipynb/flow/{1}/'.format(new_oa_path, folder),log)
log.info("Copying advanced mode ipynb template")
util.execute_cmd('cp {0}/oa/flow/ipynb_templates/Advanced_Mode_master.ipynb {0}/ipynb/flow/{1}/Advanced_Mode.ipynb'.format(new_oa_path, folder),log)
log.info("Copying threat investigation ipynb template")
util.execute_cmd('cp {0}/oa/flow/ipynb_templates/Threat_Investigation_master.ipynb {0}/ipynb/flow/{1}/Threat_Investigation.ipynb'.format(new_oa_path, folder),log)
if __name__=='__main__':
main()
|
ab9223d47aa0e2e2bca30664f09532fd4070cc13
|
882aadb4d342155851987bf3dd7f5c294f0cb07b
|
/ESP32/python/palettes/glowbow.py
|
ae41371d15a13896f5715098a0fbef91ce38e584
|
[] |
no_license
|
danjulio/lepton
|
fdaaed370111b452fd30697094d6c4760375e6b8
|
8d4dc4cdc3a6a765af514226835cd7c700cd4cf2
|
refs/heads/master
| 2022-11-12T07:12:02.529182
| 2022-11-06T20:02:00
| 2022-11-06T20:02:00
| 140,652,391
| 162
| 38
| null | 2022-08-08T05:36:33
| 2018-07-12T02:49:33
|
C
|
UTF-8
|
Python
| false
| false
| 5,764
|
py
|
glowbow.py
|
"""
"Glowbow" colormap for 8-bit LEP indexed data
Copyright 2020 Dan Julio, converted to python by Todd LaWall (bitreaper)
This file is part of firecam.
firecam is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
firecam is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with firecam. If not, see <https://www.gnu.org/licenses/>.
"""
glowbow_palette = [
[16, 16, 16],
[19, 17, 18],
[22, 16, 16],
[25, 17, 18],
[28, 17, 19],
[31, 17, 20],
[34, 17, 19],
[36, 18, 20],
[39, 18, 19],
[43, 19, 21],
[45, 18, 21],
[48, 20, 21],
[52, 19, 22],
[54, 20, 23],
[58, 20, 23],
[63, 21, 23],
[68, 21, 25],
[70, 21, 26],
[73, 22, 27],
[75, 22, 26],
[79, 22, 27],
[81, 22, 28],
[84, 23, 27],
[87, 22, 28],
[91, 24, 30],
[96, 23, 30],
[102, 24, 33],
[104, 25, 32],
[108, 25, 33],
[110, 25, 34],
[117, 25, 34],
[120, 27, 34],
[122, 27, 35],
[127, 28, 35],
[129, 27, 35],
[132, 29, 37],
[135, 27, 37],
[138, 29, 38],
[141, 29, 39],
[143, 29, 40],
[147, 29, 41],
[150, 31, 41],
[152, 30, 41],
[155, 29, 42],
[158, 30, 41],
[165, 31, 44],
[167, 32, 43],
[170, 32, 44],
[175, 33, 45],
[177, 33, 46],
[178, 32, 46],
[182, 32, 45],
[186, 33, 47],
[188, 34, 48],
[190, 34, 47],
[194, 34, 48],
[195, 35, 49],
[195, 35, 47],
[197, 38, 48],
[196, 39, 46],
[198, 39, 45],
[199, 41, 44],
[200, 42, 43],
[201, 43, 43],
[200, 44, 41],
[201, 45, 42],
[203, 46, 41],
[204, 47, 42],
[204, 47, 40],
[205, 49, 40],
[205, 49, 38],
[206, 52, 38],
[207, 52, 36],
[208, 53, 37],
[209, 54, 36],
[210, 55, 36],
[210, 58, 35],
[211, 59, 34],
[212, 60, 33],
[213, 60, 33],
[214, 61, 33],
[213, 62, 31],
[215, 64, 33],
[215, 64, 31],
[216, 66, 30],
[218, 66, 30],
[218, 66, 30],
[218, 68, 29],
[219, 70, 28],
[220, 69, 28],
[221, 72, 26],
[223, 73, 26],
[222, 74, 24],
[223, 75, 25],
[224, 76, 24],
[225, 78, 23],
[225, 78, 22],
[226, 79, 23],
[227, 80, 22],
[227, 81, 20],
[228, 82, 21],
[229, 83, 20],
[230, 83, 18],
[231, 86, 19],
[231, 86, 17],
[232, 87, 16],
[233, 88, 17],
[234, 90, 16],
[235, 91, 14],
[235, 91, 14],
[236, 93, 13],
[237, 94, 12],
[236, 96, 13],
[237, 97, 13],
[237, 99, 14],
[237, 101, 13],
[236, 103, 12],
[236, 105, 13],
[237, 106, 12],
[236, 108, 11],
[236, 112, 12],
[237, 113, 13],
[236, 115, 12],
[236, 117, 13],
[235, 119, 12],
[236, 122, 12],
[237, 123, 13],
[237, 125, 13],
[236, 127, 12],
[236, 129, 13],
[237, 130, 12],
[236, 132, 13],
[236, 134, 12],
[237, 135, 12],
[237, 137, 13],
[237, 142, 12],
[236, 144, 13],
[236, 146, 12],
[237, 147, 13],
[237, 149, 12],
[236, 151, 13],
[237, 152, 13],
[237, 154, 12],
[236, 156, 13],
[236, 158, 12],
[236, 160, 11],
[235, 161, 12],
[236, 163, 12],
[236, 165, 13],
[235, 167, 12],
[236, 170, 12],
[236, 172, 11],
[235, 173, 12],
[236, 176, 12],
[235, 179, 12],
[236, 180, 13],
[236, 182, 12],
[236, 184, 11],
[237, 185, 12],
[236, 187, 11],
[236, 188, 12],
[235, 190, 12],
[236, 191, 13],
[235, 194, 12],
[236, 196, 11],
[235, 199, 11],
[236, 201, 11],
[235, 202, 12],
[235, 204, 14],
[236, 204, 19],
[236, 205, 23],
[236, 204, 27],
[235, 206, 30],
[236, 206, 34],
[236, 207, 37],
[236, 207, 41],
[235, 208, 45],
[236, 208, 48],
[236, 209, 52],
[236, 209, 56],
[235, 211, 65],
[236, 212, 68],
[235, 212, 71],
[236, 212, 74],
[234, 212, 78],
[235, 213, 82],
[235, 214, 87],
[236, 214, 91],
[236, 215, 94],
[235, 217, 97],
[235, 216, 100],
[235, 217, 105],
[235, 216, 108],
[234, 218, 111],
[235, 218, 116],
[235, 219, 122],
[235, 220, 127],
[236, 220, 131],
[235, 221, 134],
[235, 221, 138],
[235, 222, 142],
[235, 221, 146],
[234, 222, 148],
[235, 223, 153],
[235, 224, 157],
[235, 225, 160],
[236, 225, 165],
[234, 225, 168],
[235, 226, 171],
[235, 225, 175],
[236, 227, 182],
[235, 228, 187],
[234, 228, 190],
[234, 229, 195],
[235, 230, 197],
[236, 230, 202],
[234, 230, 205],
[235, 231, 208],
[235, 232, 213],
[235, 231, 216],
[234, 232, 219],
[234, 234, 224],
[235, 234, 228],
[235, 235, 235],
[16, 16, 16],
[17, 17, 17],
[18, 18, 18],
[19, 19, 19],
[20, 20, 20],
[21, 21, 21],
[22, 22, 22],
[23, 23, 23],
[24, 24, 24],
[25, 25, 25],
[26, 26, 26],
[27, 27, 27],
[28, 28, 28],
[29, 29, 29],
[30, 30, 30],
[31, 31, 31],
[32, 32, 32],
[33, 33, 33],
[34, 34, 34],
[35, 35, 35],
[36, 36, 36],
[37, 37, 37],
[38, 38, 38],
[39, 39, 39],
[40, 40, 40],
[41, 41, 41],
[42, 42, 42],
[43, 43, 43],
[44, 44, 44],
[44, 44, 44],
[45, 45, 45],
[46, 46, 46],
]
|
6fa79823498f51bb70dc80960d2cffa4234ec9e6
|
83544ef94ce2c1a05b6028ae2ce58ef8acfb6fa8
|
/pmca/platform/backend/senser.py
|
521f5a2e879c0f7c6d479633d8733d0231c3e12d
|
[
"MIT"
] |
permissive
|
ma1co/Sony-PMCA-RE
|
9ae44c5b09580d62e860c3acff24bd1fac28a31e
|
a82f5baaa8e9c3d9f28f94699e860fb2e48cc8e0
|
refs/heads/master
| 2023-08-07T07:54:13.763912
| 2022-08-18T12:46:04
| 2022-08-18T12:46:04
| 35,510,548
| 1,788
| 228
|
MIT
| 2022-11-05T06:45:01
| 2015-05-12T20:18:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,554
|
py
|
senser.py
|
import select
from . import *
class SenserPlatformBackend(ShellPlatformBackend, FilePlatformBackend, MemoryPlatformBackend, BackupPlatformBackend):
def __init__(self, dev):
self.dev = dev
def start(self):
self.dev.readHasp()
def interactiveShell(self, conn):
self.dev.setTerminalEnable(False)
self.dev.setTerminalEnable(True)
print('Terminal activated. Press <CTRL+C> to exit.')
self.transferSenserTerminal(conn)
self.dev.setTerminalEnable(False)
def writeFile(self, path, f):
self.dev.writeFile(path, f.read())
def readFile(self, path, f, sizeCb=None):
self.dev.readFile(path, f)
def readMemory(self, offset, size, f):
self.dev.readMemory(offset, size, f)
def readBackup(self, id):
return self.dev.readBackup(id)
def writeBackup(self, id, data):
self.dev.writeBackup(id, data)
def syncBackup(self):
self.dev.saveBackup(0)
def getBackupStatus(self):
return self.dev.getBackupPresetDataStatus()
def getBackupData(self):
return self.dev.getBackupPresetData(True)
def setBackupData(self, data):
self.dev.setBackupPresetData(2, data)
def setBackupProtection(self, enable):
self.dev.setBackupId1(enable)
def transferSenserTerminal(self, conn):
if not conn:
return
try:
while True:
ready = select.select([conn], [conn], [], 0)
if ready[1]:
d = self.dev.dev.readTerminal()
conn.sendall(d)
if ready[0]:
d = conn.recv(0x40)
if d == b'':
break
self.dev.dev.writeTerminal(d)
except (ConnectionError, KeyboardInterrupt):
pass
conn.close()
|
3cd15a76348e4a7d05e90aaa321490c0a15a3c2f
|
1e2d71aeb7ae51108e93f339ea9d5bac9d6866b5
|
/algo-dev-demo/scikit-learn-demo/train.py
|
89b3cf12113e44444f445c9d56b6dc2eaaf82da5
|
[] |
no_license
|
algorithmiaio/sample-apps
|
04ed3593175984f5f64982197ba969aba1f64f80
|
a5c90698c09c61febcd03d922d47dc437a1f4cdc
|
refs/heads/develop
| 2023-07-26T20:38:44.263881
| 2021-05-06T17:56:10
| 2021-05-06T17:56:10
| 51,100,969
| 212
| 141
| null | 2023-07-06T22:20:07
| 2016-02-04T19:49:13
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 363
|
py
|
train.py
|
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from joblib import dump
data = load_boston()
X_train, X_test, y_train, y_test = train_test_split(data.data, data.target)
clf = LinearRegression()
clf.fit(X_train, y_train)
dump(clf, "data/boston-regression.joblib")
|
818d8f2af7442a9e80a9f5960b37c07047fa5e5f
|
f2c773e7ccdd60caf5a7c062305cfcd14d11beec
|
/AR_Scripts_1.74/Animation/AR_BakePLA.py
|
0fd6ecaceee579723357416ee025378404ec06d2
|
[] |
no_license
|
aturtur/cinema4d-scripts
|
4ccfbc3403326a79076d9bcf001189cd5427f46a
|
a87fc6c835db5d205f8428cc67ccd30fdd4b4d4b
|
refs/heads/master
| 2023-07-03T13:34:58.735879
| 2023-06-19T09:57:22
| 2023-06-19T09:57:22
| 63,731,563
| 316
| 49
| null | 2022-04-24T02:31:17
| 2016-07-19T22:15:05
|
Python
|
UTF-8
|
Python
| false
| false
| 13,879
|
py
|
AR_BakePLA.py
|
"""
AR_BakePLA
Author: Arttu Rautio (aturtur)
Website: http://aturtur.com/
Name-US: AR_BakePLA
Version: 1.1.0
Description-US: Bakes quickly object to Point Level Animation (PLA)
To bake splines, bake them first to alembic and then use this script to bake the alembic file to PLA.
It's important that 'Intermediate Points' is set to 'Uniform'!
Written for Maxon Cinema 4D 2023.1.0
Python version 3.9.1
Change log:
1.1.0 (18.11.2022) - Parallel processing, bakes multiple cameras in one go. Progress bar
1.0.3 (05.03.2022) - Removed User Data Xpresso Object Node hack
1.0.2 (10.10.2021) - Updated to R25
1.0.1 (27.10.2020) - Fixed setTime bug
"""
# Libraries
import c4d
from c4d import utils as u
# Global variables
suffix = "_baked"
# Functions
def MakeEditable(op):
if (op != None) and op.GetType() not in [5100, 5101]:
clone = op.GetClone() # Get clone
clone.SetMg(op.GetMg()) # Set global matrix
doc.InsertObject(clone) # Insert clone to document
bc = c4d.BaseContainer() # Initialize Base Container
makeEditable = c4d.MCOMMAND_MAKEEDITABLE # Mcommand 'Make Editable'
op = c4d.utils.SendModelingCommand(makeEditable, [clone], 0, bc, doc) # Make editable
if op: return op[0] # Return object
else:
return op.GetClone()
def DisableDynamics(objects):
for obj in objects: # Iterate through objects
theObj = obj[2] # Baked object
tags = theObj.GetTags() # Get objects tags
for t in tags: # Iterate through tags
if t.GetType() == 180000102: # If dynamics tag
t[c4d.RIGID_BODY_ENABLED] = False # Disable dynamics
if t.GetType() == 100004020: # If cloth tag
t[c4d.CLOTH_USE] = False # Disable cloth
if t.GetType() == 1018068: # If spline dynamics tag
t[c4d.EXPRESSION_ENABLE] = False # Disable spline dynamics
def DummyObject(obj, doc):
dummyObject = MakeEditable(obj) # Get clone from original object
"""
if dummyObject.GetType() == 5101: # If spline object
rpc = len(dummyObject.GetAllPoints())
dummyObject.ResizeObject(rpc, -1) # Set new point count
"""
RemoveTags(dummyObject) # Remove tags of the object
# Clean
if dummyObject.GetCTracks() != None:
for cTrack in dummyObject.GetCTracks(): cTrack.Remove() # Remove unnecessary tracks
ResetPSR(dummyObject) # Reset PSR
children = dummyObject.GetChildren() # Remove children
for c in children:
c.Remove()
dummyObject.SetName("Dummy "+obj.GetName()) # Set name
doc.InsertObject(dummyObject) # Insert dummyObject to document
MoveToLast(dummyObject, doc) # Move new Object in the object hierarchy
xpressoTag = c4d.BaseTag(c4d.Texpresso) # Initialize a xpresso tag
dummyObject.InsertTag(xpressoTag)
prioritydata = c4d.PriorityData() # Initialize a priority data
prioritydata.SetPriorityValue(c4d.PRIORITYVALUE_MODE, c4d.CYCLE_GENERATORS) # Set priority to 'Generators'
prioritydata.SetPriorityValue(c4d.PRIORITYVALUE_PRIORITY, 449) # Set priority value to last possible value
prioritydata.SetPriorityValue(c4d.PRIORITYVALUE_CAMERADEPENDENT, False) # Set Object dependent to false
xpressoTag[c4d.EXPRESSION_PRIORITY] = prioritydata # Set priority data
#link1 = CreateUserDataLink(dummyObject, "Source", obj) # Create user data link
#link2 = CreateUserDataLink(dummyObject, "Dummy", dummyObject) # Create user data link
nodemaster = xpressoTag.GetNodeMaster() # Get node master
# Create nodes
objectNodeA = nodemaster.CreateNode(nodemaster.GetRoot(), 400001000, None, x=0, y=100) # Create object node
objectNodeB = nodemaster.CreateNode(nodemaster.GetRoot(), 400001000, None, x=300, y=0) # Create 2nd object node
pythonNode = nodemaster.CreateNode(nodemaster.GetRoot(), 1022471, None, x=100, y=250) # Create python node
pointNodeA = nodemaster.CreateNode(nodemaster.GetRoot(), 400001112, None, x=300, y=150) # Create point node
pointNodeB = nodemaster.CreateNode(nodemaster.GetRoot(), 400001112, None, x=600, y=150) # Create 2nd point node
iterationNode = nodemaster.CreateNode(nodemaster.GetRoot(), 400001131, None, x=350, y=300) # Create iteration node
# Modify python node
pythonNode.RemoveUnusedPorts() # Remove default ports
pyInPort = pythonNode.AddPort(c4d.GV_PORT_INPUT, 4013, c4d.GV_PORT_FLAG_IS_VISIBLE) # Add input link port
pyInPort.SetName("Input1") # Set port's name
pyOutPortA = pythonNode.AddPort(c4d.GV_PORT_OUTPUT, 4012, c4d.GV_PORT_FLAG_IS_VISIBLE) # Add output link port
pyOutPortA.SetName("Output1") # Set port's name
pyOutPortB = pythonNode.AddPort(c4d.GV_PORT_OUTPUT, 4000, c4d.GV_PORT_FLAG_IS_VISIBLE) # Add output integer port
pyOutPortB.SetName("Output2") # Set port's name
pythonNode[c4d.GV_PYTHON_CODE] = ("import c4d\n"
"def main():\n"
"\tglobal Output1\n"
"\tglobal Output2\n"
"\tif Input1.GetType() not in [5100, 5101]:\n"
"\t\tcache = Input1.GetCache()\n"
"\t\tpntCnt = len(cache.GetAllPoints())\n"
"\telif Input1.GetType() == 5100:\n" # Polygon object
"\t\tcache = Input1\n"
"\t\tpntCnt = len(cache.GetAllPoints())\n"
"\telif Input1.GetType() == 5101:\n" # Spline object
"\t\tcache = Input1.GetRealSpline()\n"
"\t\tpntCnt = len(cache.GetAllPoints())\n"
"\tOutput1 = cache\n"
"\tOutput2 = int(pntCnt)-1") # Python node's code
# Modify object node A
objectNodeA[c4d.GV_OBJECT_OBJECT_ID] = obj
objPortA = objectNodeA.AddPort(c4d.GV_PORT_OUTPUT, 40000002, c4d.GV_PORT_FLAG_IS_VISIBLE)
#objectNodeA[c4d.GV_OBJECT_OBJECT_ID] = dummyObject
#objPortA = objectNodeA.AddPort(c4d.GV_PORT_OUTPUT, # Add 'user data link' output port to node
#c4d.DescID(c4d.DescLevel(c4d.ID_USERDATA, c4d.DTYPE_SUBCONTAINER, 0),c4d.DescLevel(1)), message=True)
# Modify object node B
objectNodeB[c4d.GV_OBJECT_OBJECT_ID] = dummyObject
objPortB = objectNodeB.AddPort(c4d.GV_PORT_OUTPUT, 40000002, c4d.GV_PORT_FLAG_IS_VISIBLE)
#objectNodeB[c4d.GV_OBJECT_OBJECT_ID] = dummyObject
#objPortB = objectNodeB.AddPort(c4d.GV_PORT_OUTPUT, # Add 'user data link' output port to node
#c4d.DescID(c4d.DescLevel(c4d.ID_USERDATA, c4d.DTYPE_SUBCONTAINER, 0),c4d.DescLevel(2)), message=True)
# Modify point nodes
pointNodeA[c4d.GV_POINT_USE_DEFORMED] = True
pointNodeB[c4d.GV_POINT_USE_DEFORMED] = True
pointNodeB.AddPort(c4d.GV_PORT_INPUT, 2002, c4d.GV_PORT_FLAG_IS_VISIBLE) # Add input point position port
# Connecting ports
objPortA.Connect(pyInPort)
objPortB.Connect(pointNodeB.GetInPort(0))
pyOutPortA.Connect(pointNodeA.GetInPort(0))
pyOutPortB.Connect(iterationNode.GetInPort(1))
iterationNode.GetOutPort(0).Connect(pointNodeA.GetInPort(1))
iterationNode.GetOutPort(0).Connect(pointNodeB.GetInPort(1))
pointNodeA.GetOutPort(1).Connect(pointNodeB.GetInPort(2))
c4d.modules.graphview.RedrawMaster(nodemaster) # Refresh xpresso
return dummyObject
def MoveToLast(obj, doc):
items = doc.GetObjects() # Get top level items from the document
last = items[-1] # The Last item in the hierarchy
obj.InsertAfter(last) # Move object after the last item
def MoveToFirst(obj, doc):
items = doc.GetObjects() # Get top level items from the document
first = items[0] # The first item in the hierarchy
obj.InsertBefore(first) # Move object before the first item
def CopyTags(objects):
for obj in objects: # Iterate through objects
source = obj[0] # Source object
target = obj[2] # Bake object
hiddenTags = [c4d.PointTag, c4d.PolygonTag] # Tag types that you dont wan't to delete
tags = source.GetTags() # Get objects tags
for t in reversed(tags): # Iterate through tags
if type(t) not in hiddenTags:
d = t.GetClone() # Duplicate the tag
target.InsertTag(d) # Copy tag
def RemoveDummys(objects):
for obj in objects: # Iterate through objects
dummy = obj[1] # Dummy object
dummy.Remove() # Delete dummy object
def ResetPSR(op):
op[c4d.ID_BASEOBJECT_REL_POSITION,c4d.VECTOR_X] = 0
op[c4d.ID_BASEOBJECT_REL_POSITION,c4d.VECTOR_Y] = 0
op[c4d.ID_BASEOBJECT_REL_POSITION,c4d.VECTOR_Z] = 0
op[c4d.ID_BASEOBJECT_REL_SCALE,c4d.VECTOR_X] = 1
op[c4d.ID_BASEOBJECT_REL_SCALE,c4d.VECTOR_Y] = 1
op[c4d.ID_BASEOBJECT_REL_SCALE,c4d.VECTOR_Z] = 1
op[c4d.ID_BASEOBJECT_REL_ROTATION,c4d.VECTOR_X] = 0
op[c4d.ID_BASEOBJECT_REL_ROTATION,c4d.VECTOR_Y] = 0
op[c4d.ID_BASEOBJECT_REL_ROTATION,c4d.VECTOR_Z] = 0
def CreateUserDataLink(obj, name, link, parentGroup=None, shortname=None):
""" Create user data link """
if obj is None: return False # If there is no object stop the function
if shortname is None: shortname = name # Short name is name
bc = c4d.GetCustomDatatypeDefault(c4d.DTYPE_BASELISTLINK) # Initialize user data
bc[c4d.DESC_NAME] = name # Set user data name
bc[c4d.DESC_SHORT_NAME] = shortname # Set userdata short name
bc[c4d.DESC_DEFAULT] = link # Set default value
bc[c4d.DESC_ANIMATE] = c4d.DESC_ANIMATE_OFF # Disable animation option
bc[c4d.DESC_SHADERLINKFLAG] = True
if parentGroup is not None: # If there is parent group
bc[c4d.DESC_PARENTGROUP] = parentGroup # Set parent group
element = obj.AddUserData(bc) # Add user data
obj[element] = link # Set user data value
return element # Return user data field
def SetCurrentFrame(frame, doc):
""" Changes editor's current frame to """
doc.SetTime(c4d.BaseTime(frame,doc.GetFps())) # Set current time to given frame
doc.ExecutePasses(None, True, True, True, 0) # Animate the current frame of the document
c4d.GeSyncMessage(c4d.EVMSG_TIMECHANGED) # Send a synchronous event message that time has changed
return
def RemoveTags(obj):
""" Removes tags of the object """
hiddenTags = [c4d.PointTag, c4d.PolygonTag] # Tag types that you dont wan't to delete
tags = obj.GetTags() # Get tags
for t in tags: # Iterate through tags
if type(t) not in hiddenTags: # If not protected tag type
t.Remove() # Remove tag
def Bake(objects):
""" Bake function """
doc = c4d.documents.GetActiveDocument() # Get active Cinema 4D document
fps = doc.GetFps() # Get Frame Rate
startFrame = doc.GetLoopMinTime().GetFrame(fps) # Get first frame of Preview Range
endFrame = doc.GetLoopMaxTime().GetFrame(fps) # Get last frame of Preview Range
desc = c4d.DescID(c4d.DescLevel(c4d.CTpla, c4d.CTpla, 0))
for i in range(startFrame, endFrame+1): # Iterate through Preview Range
#
progress = u.RangeMap(i, 0, endFrame+1, 0, 100, True)
c4d.StatusSetText("Baking frame %s of %s" % (i,endFrame+1))
c4d.StatusSetBar(progress)
#c4d.DrawViews(c4d.DRAWFLAGS_ONLY_ACTIVE_VIEW|c4d.DRAWFLAGS_NO_THREAD|c4d.DRAWFLAGS_STATICBREAK) # Updates the viewport during the script runs -> slows down potential baking speed a lot!
#
for obj in objects: # Iterate through objects
source = obj[1] # Dummy object
target = obj[2] # Bake object
PLAtrack = target.FindCTrack(desc) # Try to find CTrack
if not PLAtrack: # If CTrack does not exists
PLAtrack = c4d.CTrack(target, desc) # Initialize a PLA track
target.InsertTrackSorted(PLAtrack) # Insert PLA track to the object
curve = PLAtrack.GetCurve() # Get Curve of the CTrack
SetCurrentFrame(i, doc) # Set current frame
frame = doc.GetTime().GetFrame(fps) # Get current frame
points = source.GetAllPoints()
currentTime = c4d.BaseTime(frame, fps) # Get current time
key = curve.AddKey(currentTime)["key"]
target.SetAllPoints(points)
target.Message(c4d.MSG_UPDATE)
PLAtrack.FillKey(doc, target, key)
def main():
""" The first function to run """
doc = c4d.documents.GetActiveDocument() # Get active Cinema 4D document
selected = doc.GetActiveObjects(0) # Get selected objects
doc.StartUndo() # Start recording undos
#bakedObjects = [] # Initialize a list for collecting baked objects
objects = [] # Initialize a list for objects
for s in selected: # Iterate through objects
dummyObj = DummyObject(s, doc) # Dummy object
bakeObj = dummyObj.GetClone() # Bake object
name = s.GetName() # Get object's name
bakeObj.SetName(name+suffix) # Set baked object's name
bakeObj.InsertAfter(dummyObj) # Insert object to document
doc.AddUndo(c4d.UNDOTYPE_NEW, bakeObj) # Add undo command for creating a new object
doc.ExecutePasses(None, True, True, True, 0) # Animate the current frame of the document
RemoveTags(bakeObj) # Remove tags of the object
objects.append([s, dummyObj, bakeObj]) # Put object array to objects list
Bake(objects) # Bake the object
CopyTags(objects) # Restore tags
DisableDynamics(objects) # Disable dynamics tags
RemoveDummys(objects) # Remove dummy objects
for x in reversed(objects):
MoveToFirst(x[2], doc) # Sort
doc.EndUndo() # Stop recording undos
c4d.EventAdd() # Refresh Cinema 4D
c4d.StatusClear() # Clear status
# Execute main()
if __name__=='__main__':
main()
|
16e6023e0a0c7f9182dcefb9d0538476a941645c
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/data/tests/test_size_estimation.py
|
660c1e97738c73e7d965a95b23fd0c87f3c23877
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 7,129
|
py
|
test_size_estimation.py
|
import os
import uuid
import pytest
import ray
from ray.data._internal.arrow_block import ArrowBlockBuilder
from ray.tests.conftest import * # noqa
SMALL_VALUE = "a" * 100
LARGE_VALUE = "a" * 10000
ARROW_SMALL_VALUE = {"value": "a" * 100}
ARROW_LARGE_VALUE = {"value": "a" * 10000}
def assert_close(actual, expected, tolerance=0.3):
print("assert_close", actual, expected)
assert abs(actual - expected) / expected < tolerance, (actual, expected)
def test_arrow_size(ray_start_regular_shared):
b = ArrowBlockBuilder()
assert b.get_estimated_memory_usage() == 0
b.add(ARROW_SMALL_VALUE)
assert_close(b.get_estimated_memory_usage(), 118)
b.add(ARROW_SMALL_VALUE)
assert_close(b.get_estimated_memory_usage(), 236)
for _ in range(8):
b.add(ARROW_SMALL_VALUE)
assert_close(b.get_estimated_memory_usage(), 1180)
for _ in range(90):
b.add(ARROW_SMALL_VALUE)
assert_close(b.get_estimated_memory_usage(), 11800)
for _ in range(900):
b.add(ARROW_SMALL_VALUE)
assert_close(b.get_estimated_memory_usage(), 118000)
assert b.build().num_rows == 1000
def test_arrow_size_diff_values(ray_start_regular_shared):
b = ArrowBlockBuilder()
assert b.get_estimated_memory_usage() == 0
b.add(ARROW_LARGE_VALUE)
assert b._num_compactions == 0
assert_close(b.get_estimated_memory_usage(), 10019)
b.add(ARROW_LARGE_VALUE)
assert b._num_compactions == 0
assert_close(b.get_estimated_memory_usage(), 20038)
for _ in range(10):
b.add(ARROW_SMALL_VALUE)
assert_close(b.get_estimated_memory_usage(), 25178)
for _ in range(100):
b.add(ARROW_SMALL_VALUE)
assert b._num_compactions == 0
assert_close(b.get_estimated_memory_usage(), 35394)
for _ in range(13000):
b.add(ARROW_LARGE_VALUE)
assert_close(b.get_estimated_memory_usage(), 130131680)
assert b._num_compactions == 2
for _ in range(4000):
b.add(ARROW_LARGE_VALUE)
assert_close(b.get_estimated_memory_usage(), 170129189)
assert b._num_compactions == 3
assert b.build().num_rows == 17112
def test_arrow_size_add_block(ray_start_regular_shared):
b = ArrowBlockBuilder()
for _ in range(2000):
b.add(ARROW_LARGE_VALUE)
block = b.build()
b2 = ArrowBlockBuilder()
for _ in range(5):
b2.add_block(block)
assert b2._num_compactions == 0
assert_close(b2.get_estimated_memory_usage(), 100040020)
assert b2.build().num_rows == 10000
def test_split_read_csv(ray_start_regular_shared, tmp_path):
ctx = ray.data.context.DataContext.get_current()
def gen(name):
path = os.path.join(tmp_path, name)
ray.data.range(1000, parallelism=1).map(
lambda _: {"out": LARGE_VALUE}
).write_csv(path)
return ray.data.read_csv(path, parallelism=1)
# 20MiB
ctx.target_max_block_size = 20_000_000
ds1 = gen("out1")
assert ds1._block_num_rows() == [1000]
# 3MiB
ctx.target_max_block_size = 3_000_000
ds2 = gen("out2")
nrow = ds2._block_num_rows()
assert 3 < len(nrow) < 5, nrow
for x in nrow[:-1]:
assert 200 < x < 400, (x, nrow)
# 1MiB
ctx.target_max_block_size = 1_000_000
ds3 = gen("out3")
nrow = ds3._block_num_rows()
assert 8 < len(nrow) < 12, nrow
for x in nrow[:-1]:
assert 80 < x < 120, (x, nrow)
# Disabled.
# Setting infinite block size effectively disables block splitting.
ctx.target_max_block_size = float("inf")
ds4 = gen("out4")
assert ds4._block_num_rows() == [1000]
def test_split_read_parquet(ray_start_regular_shared, tmp_path):
ctx = ray.data.context.DataContext.get_current()
def gen(name):
path = os.path.join(tmp_path, name)
ds = (
ray.data.range(200000, parallelism=1)
.map(lambda _: {"out": uuid.uuid4().hex})
.materialize()
)
# Fully execute the operations prior to write, because with
# parallelism=1, there is only one task; so the write operator
# will only write to one file, even though there are multiple
# blocks created by block splitting.
ds.write_parquet(path)
return ray.data.read_parquet(path, parallelism=1)
# 20MiB
ctx.target_max_block_size = 20_000_000
ds1 = gen("out1")
assert ds1._block_num_rows() == [200000]
# 3MiB
ctx.target_max_block_size = 3_000_000
ds2 = gen("out2")
nrow = ds2._block_num_rows()
assert 2 < len(nrow) < 5, nrow
for x in nrow[:-1]:
assert 50000 < x < 95000, (x, nrow)
# 1MiB
ctx.target_max_block_size = 1_000_000
ds3 = gen("out3")
nrow = ds3._block_num_rows()
assert 6 < len(nrow) < 12, nrow
for x in nrow[:-1]:
assert 20000 < x < 35000, (x, nrow)
@pytest.mark.parametrize("use_actors", [False, True])
def test_split_map(shutdown_only, use_actors):
ray.shutdown()
ray.init(num_cpus=3)
kwargs = {}
if use_actors:
kwargs = {"compute": ray.data.ActorPoolStrategy()}
# Arrow block
ctx = ray.data.context.DataContext.get_current()
ctx.target_max_block_size = 20_000_000
ctx.target_max_block_size = 20_000_000
ds2 = ray.data.range(1000, parallelism=1).map(lambda _: ARROW_LARGE_VALUE, **kwargs)
nblocks = len(ds2.map(lambda x: x, **kwargs).get_internal_block_refs())
assert nblocks == 1, nblocks
ctx.target_max_block_size = 2_000_000
nblocks = len(ds2.map(lambda x: x, **kwargs).get_internal_block_refs())
assert 4 < nblocks < 7 or use_actors, nblocks
# Disabled.
# Setting infinite block size effectively disables block splitting.
ctx.target_max_block_size = float("inf")
ds3 = ray.data.range(1000, parallelism=1).map(lambda _: ARROW_LARGE_VALUE, **kwargs)
nblocks = len(ds3.map(lambda x: x, **kwargs).get_internal_block_refs())
assert nblocks == 1, nblocks
def test_split_flat_map(ray_start_regular_shared):
ctx = ray.data.context.DataContext.get_current()
ctx.target_max_block_size = 20_000_000
# Arrow block
ctx.target_max_block_size = 20_000_000
ds2 = ray.data.range(1000, parallelism=1).map(lambda _: ARROW_LARGE_VALUE)
nblocks = len(ds2.flat_map(lambda x: [x]).get_internal_block_refs())
assert nblocks == 1, nblocks
ctx.target_max_block_size = 2_000_000
nblocks = len(ds2.flat_map(lambda x: [x]).get_internal_block_refs())
assert 4 < nblocks < 7, nblocks
def test_split_map_batches(ray_start_regular_shared):
ctx = ray.data.context.DataContext.get_current()
ctx.target_max_block_size = 20_000_000
# Arrow block
ctx.target_max_block_size = 20_000_000
ds2 = ray.data.range(1000, parallelism=1).map(lambda _: ARROW_LARGE_VALUE)
nblocks = len(ds2.map_batches(lambda x: x, batch_size=1).get_internal_block_refs())
assert nblocks == 1, nblocks
ctx.target_max_block_size = 2_000_000
nblocks = len(ds2.map_batches(lambda x: x, batch_size=16).get_internal_block_refs())
assert 4 < nblocks < 7, nblocks
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
b87c4119f05cbcbb71b1bbc5e52bd442f78ec1fe
|
634f578ec3016e8aad06a57c948377f63fc36b16
|
/python/ch13/solns/bratu_jacobian.py
|
aee43b9b673b4fc97ed1b93819aa086c9a03366e
|
[
"MIT"
] |
permissive
|
bueler/p4pdes
|
ecad37430c3a0bf350d786d7d0e284abdb446317
|
098e000a0eea4f96e074fa437752086a1ab15207
|
refs/heads/master
| 2023-07-31T17:30:43.546800
| 2023-07-18T21:45:11
| 2023-07-18T21:45:11
| 24,288,784
| 179
| 66
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,271
|
py
|
bratu_jacobian.py
|
from firedrake import *
from firedrake.petsc import PETSc
withJ = False # optionally provide the Jacobian to solve
mesh = UnitSquareMesh(10, 10)
V = FunctionSpace(mesh, "CG", 1)
x, y = SpatialCoordinate(mesh)
frhs = Function(V).interpolate(2.0*(x*(1.0-x) + y*(1.0-y)) + exp(x*(1.0-x)*y*(1.0-y)))
u = Function(V)
v = TestFunction(V)
F = (inner(grad(u), grad(v)) + exp(u) * v - frhs * v) * dx
bc = DirichletBC(V, Constant(0.0), (1, 2, 3, 4))
sp = {'snes_type': 'newtonls',
'snes_linesearch_type': 'basic',
'snes_monitor': None,
'ksp_type': 'preonly',
'pc_type': 'lu'}
if withJ:
# this just (partly) exposes what the internals do, generating a Jacobian
# by symbolic differentiation in UFL
w = TrialFunction(V)
J = derivative(F, u, du=w)
solve(F == 0, u, solver_parameters=sp, bcs=bc, J=J)
else:
solve(F == 0, u, solver_parameters=sp, bcs=bc)
uexact = Function(V).interpolate(x*(1.0-x)*y*(1.0-y)) # for comparison
u.rename('u (numerical soln)')
uexact.rename('uexact (exact soln)')
File("bratu.pvd").write(u,uexact)
diffu = Function(V).interpolate(u - uexact)
error_L2 = sqrt(assemble(dot(diffu, diffu) * dx))
PETSc.Sys.Print('done on mesh with %d nodes: |u-uexact|_2 = %.3e' \
% (len(u.dat.data), error_L2))
|
2cba9d2d3bd6afa7e2f49dab388bd656f379446a
|
8fd5b4350fa83624f47d806ca4a60531ca99c973
|
/tools/update_build_version.py
|
c3fad9767749170fbce960097d2b3785b342fee7
|
[
"Apache-2.0"
] |
permissive
|
google/amber
|
7bde68c048ba2b136623b17c2156afbe27f2b2b8
|
6408a26c479042304ce523e3ff99a9302796f650
|
refs/heads/main
| 2023-08-26T09:06:11.238025
| 2023-06-27T17:39:00
| 2023-06-27T17:39:00
| 157,452,897
| 196
| 75
|
Apache-2.0
| 2023-09-02T12:32:42
| 2018-11-13T22:00:36
|
C++
|
UTF-8
|
Python
| false
| false
| 2,340
|
py
|
update_build_version.py
|
#!/usr/bin/env python
# Copyright 2018 The Amber Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generates build-versions.h in the src/ directory.
#
# Args: <output_dir> <amber-dir> <spirv-tools-dir> <spirv-headers> <glslang-dir> <shaderc-dir>
from __future__ import print_function
import datetime
import os.path
import re
import subprocess
import sys
import time
OUTFILE = 'src/build-versions.h'
def command_output(cmd, directory):
p = subprocess.Popen(cmd,
cwd=directory,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
raise RuntimeError('Failed to run {} in {}\nstdout: {}\nstderr: {}'.format(cmd, directory, stdout, stderr))
return stdout
def describe(directory):
if not os.path.exists(directory):
return "-"
return command_output(
['git', 'log', '-1', '--format=%h'], directory).rstrip().decode()
def get_version_string(project, directory):
name = project.upper().replace('-', '_')
return "#define {}_VERSION \"{}\"".format(name, describe(directory))
def main():
if len(sys.argv) != 4:
print('usage: {} <outdir> <amber-dir> <third_party>'.format(
sys.argv[0]))
sys.exit(1)
outdir = sys.argv[1]
srcdir = sys.argv[3]
projects = ['spirv-tools', 'spirv-headers', 'glslang', 'shaderc']
new_content = get_version_string('amber', sys.argv[2]) + "\n"
new_content = new_content + ''.join([
'{}\n'.format(get_version_string(p, os.path.join(srcdir, p)))
for p in projects
])
file = outdir + "/" + OUTFILE
if os.path.isfile(file):
with open(file, 'r') as f:
if new_content == f.read():
return
with open(file, 'w') as f:
f.write(new_content)
if __name__ == '__main__':
main()
|
37cbb41034769612584cec229129a74a4d46b474
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/python_modules/dagster/dagster/_core/storage/noop_compute_log_manager.py
|
686f75e418b058a71e616d5b4c1ec84d1f988f97
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794
| 2023-09-05T19:54:52
| 2023-09-05T19:54:52
| 131,619,646
| 8,565
| 1,154
|
Apache-2.0
| 2023-09-14T21:57:37
| 2018-04-30T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 3,288
|
py
|
noop_compute_log_manager.py
|
from contextlib import contextmanager
from typing import IO, Any, Generator, Mapping, Optional, Sequence
from typing_extensions import Self
import dagster._check as check
from dagster._core.storage.captured_log_manager import (
CapturedLogContext,
CapturedLogData,
CapturedLogManager,
CapturedLogMetadata,
CapturedLogSubscription,
)
from dagster._serdes import ConfigurableClass, ConfigurableClassData
from .compute_log_manager import (
MAX_BYTES_FILE_READ,
ComputeIOType,
ComputeLogFileData,
ComputeLogManager,
)
class NoOpComputeLogManager(CapturedLogManager, ComputeLogManager, ConfigurableClass):
"""When enabled for a Dagster instance, stdout and stderr will not be available for any step."""
def __init__(self, inst_data: Optional[ConfigurableClassData] = None):
self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)
@property
def inst_data(self):
return self._inst_data
@classmethod
def config_type(cls):
return {}
@classmethod
def from_config_value(
cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]
) -> Self:
return NoOpComputeLogManager(inst_data=inst_data, **config_value)
def enabled(self, _dagster_run, _step_key):
return False
def _watch_logs(self, dagster_run, step_key=None):
pass
def get_local_path(self, run_id: str, key: str, io_type: ComputeIOType) -> str:
raise NotImplementedError()
def is_watch_completed(self, run_id, key):
return True
def on_watch_start(self, dagster_run, step_key):
pass
def on_watch_finish(self, dagster_run, step_key):
pass
def download_url(self, run_id, key, io_type):
return None
def read_logs_file(self, run_id, key, io_type, cursor=0, max_bytes=MAX_BYTES_FILE_READ):
return ComputeLogFileData(
path=f"{key}.{io_type}", data=None, cursor=0, size=0, download_url=None
)
def on_subscribe(self, subscription):
pass
def on_unsubscribe(self, subscription):
pass
@contextmanager
def capture_logs(self, log_key: Sequence[str]) -> Generator[CapturedLogContext, None, None]:
yield CapturedLogContext(log_key=log_key)
def is_capture_complete(self, log_key: Sequence[str]):
return True
@contextmanager
def open_log_stream(
self, log_key: Sequence[str], io_type: ComputeIOType
) -> Generator[Optional[IO], None, None]:
yield None
def get_log_data(
self,
log_key: Sequence[str],
cursor: Optional[str] = None,
max_bytes: Optional[int] = None,
) -> CapturedLogData:
return CapturedLogData(log_key=log_key)
def get_log_metadata(self, log_key: Sequence[str]) -> CapturedLogMetadata:
return CapturedLogMetadata()
def delete_logs(
self, log_key: Optional[Sequence[str]] = None, prefix: Optional[Sequence[str]] = None
):
pass
def subscribe(
self, log_key: Sequence[str], cursor: Optional[str] = None
) -> CapturedLogSubscription:
return CapturedLogSubscription(self, log_key, cursor)
def unsubscribe(self, subscription: CapturedLogSubscription):
pass
|
74de5c4e4b6c1bed0d288119a054eb6c995fee67
|
8358c8d86600703663eb8a8f30493c20704cf586
|
/xcube/webapi/viewer/context.py
|
036b66400aee51c8aebfe4cc461d4431e4c942f1
|
[
"MIT"
] |
permissive
|
dcs4cop/xcube
|
612ffeb416dfee4e6a32677a719eab1a26aee990
|
a5a4da14bdc2dba80e0dd7d89b221fb30d148b77
|
refs/heads/master
| 2023-08-17T06:36:57.207806
| 2023-08-08T15:16:09
| 2023-08-08T15:16:09
| 130,693,090
| 149
| 21
|
MIT
| 2023-09-14T07:38:55
| 2018-04-23T12:27:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,846
|
py
|
context.py
|
# The MIT License (MIT)
# Copyright (c) 2022 by the xcube team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from functools import cached_property
from typing import Mapping, Optional
import fsspec
from xcube.webapi.common.context import ResourcesContext
class ViewerContext(ResourcesContext):
"""Context for xcube Viewer API."""
@cached_property
def config_items(self) -> Optional[Mapping[str, bytes]]:
if self.config_path is None:
return None
return fsspec.get_mapper(self.config_path)
@cached_property
def config_path(self) -> Optional[str]:
if "Viewer" not in self.config:
return None
return self.get_config_path(
self.config["Viewer"].get("Configuration", {}),
"'Configuration' item of 'Viewer'"
)
|
7189ad0f267f2cc76c5bcaa7995cc6e7df7a59b2
|
e22eeb5256e17a96a98b3ff25433aec2d641cd2c
|
/openstack/tests/unit/network/v2/test_network.py
|
17ea946e5438d707c26dc33c91fcc872980f691b
|
[
"Apache-2.0"
] |
permissive
|
openstack/openstacksdk
|
b4b95fd7869653feea5a3b783e9a5c588235c039
|
d474eb84c605c429bb9cccb166cabbdd1654d73c
|
refs/heads/master
| 2023-09-03T22:50:03.398512
| 2023-07-27T14:09:35
| 2023-08-29T16:28:46
| 16,223,378
| 124
| 130
|
Apache-2.0
| 2023-09-06T02:52:47
| 2014-01-25T02:48:00
|
Python
|
UTF-8
|
Python
| false
| false
| 5,446
|
py
|
test_network.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.network.v2 import network
from openstack.tests.unit import base
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'admin_state_up': True,
'availability_zone_hints': ['1', '2'],
'availability_zones': ['3'],
'created_at': '2016-03-09T12:14:57.233772',
'description': '4',
'dns_domain': '5',
'id': IDENTIFIER,
'ipv4_address_scope': '6',
'ipv6_address_scope': '7',
'is_default': False,
'mtu': 8,
'name': '9',
'port_security_enabled': True,
'project_id': '10',
'provider:network_type': '11',
'provider:physical_network': '12',
'provider:segmentation_id': '13',
'qos_policy_id': '14',
'revision_number': 15,
'router:external': True,
'segments': '16',
'shared': True,
'status': '17',
'subnets': ['18', '19'],
'updated_at': '2016-07-09T12:14:57.233772',
'vlan_transparent': False,
}
class TestNetwork(base.TestCase):
def test_basic(self):
sot = network.Network()
self.assertEqual('network', sot.resource_key)
self.assertEqual('networks', sot.resources_key)
self.assertEqual('/networks', sot.base_path)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertTrue(sot.allow_commit)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = network.Network(**EXAMPLE)
self.assertTrue(sot.is_admin_state_up)
self.assertEqual(
EXAMPLE['availability_zone_hints'], sot.availability_zone_hints
)
self.assertEqual(EXAMPLE['availability_zones'], sot.availability_zones)
self.assertEqual(EXAMPLE['created_at'], sot.created_at)
self.assertEqual(EXAMPLE['description'], sot.description)
self.assertEqual(EXAMPLE['dns_domain'], sot.dns_domain)
self.assertEqual(EXAMPLE['id'], sot.id)
self.assertEqual(
EXAMPLE['ipv4_address_scope'], sot.ipv4_address_scope_id
)
self.assertEqual(
EXAMPLE['ipv6_address_scope'], sot.ipv6_address_scope_id
)
self.assertFalse(sot.is_default)
self.assertEqual(EXAMPLE['mtu'], sot.mtu)
self.assertEqual(EXAMPLE['name'], sot.name)
self.assertTrue(sot.is_port_security_enabled)
self.assertEqual(EXAMPLE['project_id'], sot.project_id)
self.assertEqual(
EXAMPLE['provider:network_type'], sot.provider_network_type
)
self.assertEqual(
EXAMPLE['provider:physical_network'], sot.provider_physical_network
)
self.assertEqual(
EXAMPLE['provider:segmentation_id'], sot.provider_segmentation_id
)
self.assertEqual(EXAMPLE['qos_policy_id'], sot.qos_policy_id)
self.assertEqual(EXAMPLE['revision_number'], sot.revision_number)
self.assertTrue(sot.is_router_external)
self.assertEqual(EXAMPLE['segments'], sot.segments)
self.assertTrue(sot.is_shared)
self.assertEqual(EXAMPLE['status'], sot.status)
self.assertEqual(EXAMPLE['subnets'], sot.subnet_ids)
self.assertEqual(EXAMPLE['updated_at'], sot.updated_at)
self.assertEqual(EXAMPLE['vlan_transparent'], sot.is_vlan_transparent)
self.assertDictEqual(
{
'limit': 'limit',
'marker': 'marker',
'description': 'description',
'name': 'name',
'project_id': 'project_id',
'status': 'status',
'ipv4_address_scope_id': 'ipv4_address_scope',
'ipv6_address_scope_id': 'ipv6_address_scope',
'is_admin_state_up': 'admin_state_up',
'is_port_security_enabled': 'port_security_enabled',
'is_router_external': 'router:external',
'is_shared': 'shared',
'provider_network_type': 'provider:network_type',
'provider_physical_network': 'provider:physical_network',
'provider_segmentation_id': 'provider:segmentation_id',
'tags': 'tags',
'any_tags': 'tags-any',
'not_tags': 'not-tags',
'not_any_tags': 'not-tags-any',
},
sot._query_mapping._mapping,
)
class TestDHCPAgentHostingNetwork(base.TestCase):
def test_basic(self):
net = network.DHCPAgentHostingNetwork()
self.assertEqual('network', net.resource_key)
self.assertEqual('networks', net.resources_key)
self.assertEqual('/agents/%(agent_id)s/dhcp-networks', net.base_path)
self.assertEqual('dhcp-network', net.resource_name)
self.assertFalse(net.allow_create)
self.assertTrue(net.allow_fetch)
self.assertFalse(net.allow_commit)
self.assertFalse(net.allow_delete)
self.assertTrue(net.allow_list)
|
091ce0eb18224a558399b79975835be75937b260
|
0b134572e3ac3903ebb44df6d4138cbab9d3327c
|
/app/grandchallenge/algorithms/migrations/0013_auto_20210830_1128.py
|
de9a03a8b40c4edfab2a2a3443a8ca359aeffb36
|
[
"Apache-2.0"
] |
permissive
|
comic/grand-challenge.org
|
660de3bafaf8f4560317f1dfd9ae9585ec272896
|
dac25f93b395974b32ba2a8a5f9e19b84b49e09d
|
refs/heads/main
| 2023-09-01T15:57:14.790244
| 2023-08-31T14:23:04
| 2023-08-31T14:23:04
| 4,557,968
| 135
| 53
|
Apache-2.0
| 2023-09-14T13:41:03
| 2012-06-05T09:26:39
|
Python
|
UTF-8
|
Python
| false
| false
| 438
|
py
|
0013_auto_20210830_1128.py
|
# Generated by Django 3.1.13 on 2021-08-30 11:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("algorithms", "0012_auto_20210728_1019")]
operations = [
migrations.AlterModelOptions(
name="job",
options={
"ordering": ("created",),
"permissions": [("view_logs", "Can view the jobs logs")],
},
)
]
|
19fb881e6f1797708317ee4693df6cf74a252807
|
74dbaccf7da7038876aa3f0c48f7a6b909b9fba6
|
/models/Team.py
|
8ae36d826061bc290b9e1610dd906d2dd67f242e
|
[
"Apache-2.0"
] |
permissive
|
moloch--/RootTheBox
|
8308ac4e9c03cda28b399e526de02079853a1fb1
|
de44dd6ef86dd5b97524d0e438d0441922099930
|
refs/heads/master
| 2023-08-28T04:58:48.197975
| 2023-07-17T00:20:50
| 2023-07-17T00:20:50
| 3,685,111
| 804
| 327
|
Apache-2.0
| 2023-06-22T01:40:27
| 2012-03-11T06:30:48
|
Python
|
UTF-8
|
Python
| false
| false
| 12,342
|
py
|
Team.py
|
# -*- coding: utf-8 -*-
"""
Created on Mar 12, 2012
@author: moloch
Copyright 2012 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=no-member
import xml.etree.cElementTree as ET
import os
import imghdr
import io
from uuid import uuid4
from datetime import datetime
from sqlalchemy import Column, desc
from sqlalchemy.orm import relationship, backref
from sqlalchemy.types import Integer, Unicode, String
from models import dbsession
from models.BaseModels import DatabaseObject
from models.User import User
from models.GameHistory import GameHistory
from models.Relationships import (
team_to_box,
team_to_item,
team_to_flag,
team_to_game_level,
team_to_source_code,
team_to_hint,
)
from libs.BotManager import BotManager
from libs.XSSImageCheck import is_xss_image, get_new_avatar
from libs.XSSImageCheck import MAX_AVATAR_SIZE, MIN_AVATAR_SIZE, IMG_FORMATS
from libs.ValidationError import ValidationError
from tornado.options import options
from PIL import Image
from resizeimage import resizeimage
from random import randint
from libs.StringCoding import encode
from builtins import str
class Team(DatabaseObject):
"""Team definition"""
uuid = Column(String(36), unique=True, nullable=False, default=lambda: str(uuid4()))
_name = Column(Unicode(24), unique=True, nullable=False)
_motto = Column(Unicode(32))
_avatar = Column(String(64))
_notes = Column(Unicode(512))
_code = Column(
"code", String(16), unique=True, default=lambda: str(uuid4().hex)[:16]
)
files = relationship(
"FileUpload",
backref=backref("team", lazy="select"),
cascade="all,delete,delete-orphan",
)
pastes = relationship(
"PasteBin",
backref=backref("team", lazy="select"),
cascade="all,delete,delete-orphan",
)
money = Column(Integer, default=options.starting_team_money, nullable=False)
members = relationship(
"User",
backref=backref("team", lazy="select"),
cascade="all,delete,delete-orphan",
)
flags = relationship(
"Flag", secondary=team_to_flag, backref=backref("team", lazy="select")
)
boxes = relationship(
"Box", secondary=team_to_box, back_populates="teams", lazy="select"
)
items = relationship(
"MarketItem", secondary=team_to_item, backref=backref("team", lazy="joined")
)
purchased_source_code = relationship(
"SourceCode",
secondary=team_to_source_code,
backref=backref("team", lazy="select"),
)
hints = relationship(
"Hint", secondary=team_to_hint, backref=backref("team", lazy="select")
)
game_levels = relationship(
"GameLevel", secondary=team_to_game_level, back_populates="teams", lazy="select"
)
game_history = relationship(
"GameHistory",
backref=backref("team", lazy="select"),
cascade="all,delete,delete-orphan",
)
@classmethod
def all(cls):
"""Returns a list of all objects in the database"""
return dbsession.query(cls).all()
@classmethod
def by_id(cls, _id):
"""Returns a the object with id of _id"""
return dbsession.query(cls).filter_by(id=_id).first()
@classmethod
def by_uuid(cls, _uuid):
"""Return and object based on a uuid"""
return dbsession.query(cls).filter_by(uuid=_uuid).first()
@classmethod
def by_name(cls, name):
"""Return the team object based on "team_name" """
return dbsession.query(cls).filter_by(_name=str(name)).first()
@classmethod
def by_code(cls, code):
"""Return the team object based on the _code"""
return dbsession.query(cls).filter_by(_code=code).first()
@classmethod
def ranks(cls):
"""Returns a list of unlocked objects in the database"""
ranked = []
for team in sorted(dbsession.query(cls).order_by(desc(cls.money)).all()):
if not team.locked:
ranked.append(team)
return ranked
@classmethod
def count(cls):
return dbsession.query(cls).count()
@property
def name(self):
return self._name
def get_score(self, _type):
if _type == "money":
return self.money
elif _type == "flag":
return len(self.flags)
elif _type == "hint":
return len(self.hints)
elif _type == "bot":
return self.bot_count
return 0
def set_score(self, _type, _money):
score_update = GameHistory(type=_type, value=_money)
self.game_history.append(score_update)
self.money = _money
def set_bot(self, botcount):
bot_update = GameHistory(type="bot_count", value=botcount)
self.game_history.append(bot_update)
def add_flag(self, flag):
self.flags.append(flag)
add_flag = GameHistory(type="flag_count", value=len(self.flags))
self.game_history.append(add_flag)
def get_history(self, _type=None):
history = []
for item in self.game_history:
if _type == "bots":
if item.type == "bot_count":
history.append(item.to_dict())
elif _type == "flags":
if item.type == "flag_count":
history.append(item.to_dict())
elif item.type != "flag_count" and item.type != "bot_count":
history.append(item.to_dict())
return history
@name.setter
def name(self, value):
if not 3 <= len(value) <= 24:
raise ValidationError("Team name must be 3 - 24 characters")
else:
self._name = str(value)
@property
def motto(self):
return self._motto
@motto.setter
def motto(self, value):
if 32 < len(value):
raise ValidationError("Motto must be less than 32 characters")
else:
self._motto = str(value)
@property
def notes(self):
if self._notes is None:
self._notes = ""
return self._notes
@notes.setter
def notes(self, new_notes):
if len(new_notes) > 512:
raise ValidationError("Notes must be 0 - 512 characters")
self._notes = str(new_notes)
@property
def code(self):
return self._code
@property
def locked(self):
# Hides team from scoreboard if all users are locked or no users
if len(self.members) > 0:
for user in self.members:
if not user.locked:
return False
return True
@property
def avatar(self):
if self._avatar is not None:
return self._avatar
else:
if options.teams:
avatar = get_new_avatar("team")
else:
avatar = get_new_avatar("user", True)
if not avatar.startswith("default_"):
self._avatar = avatar
dbsession.add(self)
dbsession.commit()
return avatar
@avatar.setter
def avatar(self, image_data):
if MIN_AVATAR_SIZE < len(image_data) < MAX_AVATAR_SIZE:
ext = imghdr.what("", h=image_data)
if ext in IMG_FORMATS and not is_xss_image(image_data):
try:
if self._avatar is not None and os.path.exists(
options.avatar_dir + "/upload/" + self._avatar
):
os.unlink(options.avatar_dir + "/upload/" + self._avatar)
file_path = str(
options.avatar_dir + "/upload/" + self.uuid + "." + ext
)
image = Image.open(io.BytesIO(image_data))
cover = resizeimage.resize_cover(image, [500, 250])
cover.save(file_path, image.format)
self._avatar = "upload/" + self.uuid + "." + ext
except Exception as e:
raise ValidationError(e)
else:
raise ValidationError(
"Invalid image format, avatar must be: %s"
% (", ".join(IMG_FORMATS))
)
else:
raise ValidationError(
"The image is too large must be %d - %d bytes"
% (MIN_AVATAR_SIZE, MAX_AVATAR_SIZE)
)
@property
def levels(self):
"""Sorted game_levels"""
return sorted(self.game_levels)
def last_scored(self):
for item in reversed(self.game_history):
if item.type == "flag_count":
return item.created.strftime("%s")
return datetime.now().strftime("%s")
def level_flags(self, lvl):
"""Given a level number return all flags captured for that level"""
return [flag for flag in self.flags if flag.game_level.number == lvl]
def box_flags(self, box):
"""Given a box return all flags captured for that box"""
return [flag for flag in self.flags if flag.box == box]
@property
def bot_count(self):
bot_manager = BotManager.instance()
return bot_manager.count_by_team_uuid(self.uuid)
def file_by_file_name(self, file_name):
"""Return file object based on file_name"""
ls = self.files.filter_by(file_name=file_name)
return ls[0] if 0 < len(ls) else None
def to_dict(self):
"""Use for JSON related tasks; return public data only"""
return {
"uuid": self.uuid,
"name": self.name,
"motto": self.motto,
"money": self.money,
"avatar": self.avatar,
"notes": self.notes,
}
def to_xml(self, parent):
team_elem = ET.SubElement(parent, "team")
ET.SubElement(team_elem, "name").text = self.name
ET.SubElement(team_elem, "motto").text = self.motto
ET.SubElement(team_elem, "notes").text = self.notes
users_elem = ET.SubElement(team_elem, "users")
users_elem.set("count", "%s" % str(len(self.members)))
for user in self.members:
user.to_xml(users_elem)
def __repr__(self):
return "<Team - name: %s, money: %d>" % (self.name, self.money)
def __str__(self):
return self.name
def __eq__(self, other):
return self.id == other.id
def __ne__(self, other):
return not self.__eq__(other)
def __cmp__(self, other):
"""Compare based on the config option rank_by"""
if options.rank_by.lower() != "money":
"""flags ▲, money ▲, hints ▼, time ▼"""
this, that = len(self.flags), len(other.flags)
if this == that:
this, that = self.money, other.money
if this == that:
this, that = len(other.hints), len(self.hints)
if this == that:
this, that = other.last_scored(), self.last_scored()
else:
"""money ▲, hints ▼, time ▼, flags ▲"""
this, that = self.money, other.money
if this == that:
this, that = len(other.hints), len(self.hints)
if this == that:
this, that = other.last_scored(), self.last_scored()
if this == that:
this, that = len(self.flags), len(other.flags)
if this < that:
return 1
elif this == that:
return 0
else:
return -1
def __gt__(self, other):
return self.__cmp__(other) > 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
def __le__(self, other):
return self.__cmp__(other) <= 0
|
ceb0c5c2bcb32df0d90656a98f3c00ac5ad250ce
|
cfb638fee5fa2cdd3149a8ea91043e6bc0808275
|
/contrib/matlabsim/continuous_example/continuous.py
|
692f09e072d99cca95ec7486e1a41bbfb21279b6
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] |
permissive
|
tulip-control/tulip-control
|
a23436a122dc317d39b0980c40f2da5740433ae5
|
83f993c2ae06aa8368e4bbba02bf52d68725e106
|
refs/heads/main
| 2023-08-22T14:39:02.797004
| 2022-06-23T19:40:03
| 2022-06-23T19:40:03
| 13,993,728
| 107
| 37
|
BSD-3-Clause
| 2023-09-06T17:29:38
| 2013-10-30T17:15:55
|
Python
|
UTF-8
|
Python
| false
| false
| 3,198
|
py
|
continuous.py
|
#!/usr/bin/env python
#
# WARNING: This example may not yet be working. Please check again in
# the upcoming release.
#
"""
Continuous example with MATLAB export at the end. Mostly identical to
examples/robot_planning/continuous.py
"""
#
# Note: This code is commented to allow components to be extracted into
# the tutorial that is part of the users manual. Comments containing
# strings of the form @label@ are used for this purpose.
#
# @import_section@
import sys
sys.path.append('../')
import numpy as np
import tomatlab
from tulip import spec, synth, hybrid
from polytope import box2poly
from tulip.abstract import prop2part, discretize, find_controller
# @import_section_end@
visualize = False
from tulip.abstract.plot import plot_partition
# @dynamics_section@
# Problem parameters
input_bound = 1.0
uncertainty = 0.01
# Continuous state space
cont_state_space = box2poly([[0., 3.], [0., 2.]])
# Continuous dynamics
A = np.array([[1.0, 0.], [ 0., 1.0]])
B = np.array([[0.1, 0.], [ 0., 0.1]])
E = np.array([[1,0], [0,1]])
# Available control, possible disturbances
U = input_bound *np.array([[-1., 1.], [-1., 1.]])
W = uncertainty *np.array([[-1., 1.], [-1., 1.]])
# Convert to polyhedral representation
U = box2poly(U)
W = box2poly(W)
# Construct the LTI system describing the dynamics
sys_dyn = hybrid.LtiSysDyn(A, B, E, None, U, W, cont_state_space)
# @dynamics_section_end@
# @partition_section@
# Define atomic propositions for relevant regions of state space
cont_props = {}
cont_props['home'] = box2poly([[0., 1.], [0., 1.]])
cont_props['lot'] = box2poly([[2., 3.], [1., 2.]])
# Compute the proposition preserving partition of the continuous state space
cont_partition = prop2part(cont_state_space, cont_props)
plot_partition(cont_partition)
# @partition_section_end@
# @discretize_section@
# Given dynamics & proposition-preserving partition, find feasible transitions
disc_params = {'closed_loop':True, 'N':8, 'min_cell_volume':0.1,
'plotit':visualize, 'conservative':False}
disc_dynamics = discretize(cont_partition, sys_dyn, **disc_params)
# @discretize_section_end@
"""Visualize transitions in continuous domain (optional)"""
plot_partition(disc_dynamics.ppp, disc_dynamics.ts,
disc_dynamics.ppp2ts)
"""Specifications"""
# Environment variables and assumptions
env_vars = {'park'}
env_init = set() # empty set
env_prog = '!park'
env_safe = set() # empty set
# System variables and requirements
sys_vars = {'X0reach'}
sys_init = {'X0reach'}
sys_prog = {'home'} # []<>home
sys_safe = {'(X(X0reach) <-> lot) || (X0reach && !park)'}
sys_prog |= {'X0reach'}
# Create the specification
specs = spec.GRSpec(env_vars, sys_vars, env_init, sys_init,
env_safe, sys_safe, env_prog, sys_prog)
specs.moore = False
specs.qinit = r'\A \E'
specs.plus_one = False
# @synthesize_section@
"""Synthesize"""
ctrl = synth.synthesize(
specs, sys=disc_dynamics.ts, ignore_sys_init=True, solver='gr1c')
# Unrealizable spec ?
if ctrl is None:
sys.exit()
# Export Simulink Model
tomatlab.export('robot_continuous.mat', ctrl, sys_dyn, disc_dynamics,
disc_params)
|
37fb9909b9ad9a9a4fdf9bea6f1a9221519aecd2
|
967968e56ec17a2ee641af84cfca669c1d16a6f1
|
/tests/nessus/schema/test_settings.py
|
98f55ea08e2c030cfa27beb548ecaa7f2feef7cb
|
[
"MIT"
] |
permissive
|
tenable/pyTenable
|
72108c2564682e65cba181ded6ef6a9c990ef004
|
4e31049891f55016168b14ae30d332a965523640
|
refs/heads/master
| 2023-08-30T23:26:33.161062
| 2023-08-08T04:39:04
| 2023-08-08T04:39:04
| 114,689,090
| 300
| 211
|
MIT
| 2023-08-08T04:39:05
| 2017-12-18T21:23:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
test_settings.py
|
import pytest
from tenable.nessus.schema.settings import SettingsSchema, SettingsListSchema
def test_settings_schema():
schema = SettingsSchema()
data = {'action': 'add', 'name': 'name', 'id': 'id-val', 'value': '123'}
assert schema.dump(schema.load(data)) == data
def test_setting_list():
schema = SettingsListSchema()
setting = {'action': 'add', 'name': 'name', 'id': 'id-val', 'value': '123'}
resp = schema.dump(schema.load({'settings': [setting for _ in range(3)]}))
assert resp == {'setting.0.action': 'add',
'setting.0.name': 'name',
'setting.0.id': 'id-val',
'setting.0.value': '123',
'setting.1.action': 'add',
'setting.1.name': 'name',
'setting.1.id': 'id-val',
'setting.1.value': '123',
'setting.2.action': 'add',
'setting.2.name': 'name',
'setting.2.id': 'id-val',
'setting.2.value': '123',
}
|
e70aba20d56f59720ccba84c0d3ef46cf0f26875
|
e3f5f41b242650b4bef68aa191a5779aedd3e02e
|
/Chapter11/config.py
|
5a495165f3dfec10ec6dbd457dc565b8d8e9bfc1
|
[
"MIT"
] |
permissive
|
PacktPublishing/Mastering-Flask-Web-Development-Second-Edition
|
d4675c047bb51b0154958205f53c962ab4d32e4c
|
c3174127b40f8af1e2ab5e614994ffed7acbc11b
|
refs/heads/master
| 2023-05-11T00:23:30.213655
| 2023-01-18T09:14:14
| 2023-01-18T09:14:14
| 154,667,293
| 168
| 131
|
MIT
| 2023-05-01T20:52:13
| 2018-10-25T12:30:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,522
|
py
|
config.py
|
import os
from celery.schedules import crontab
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = '736670cb10a600b695a55839ca3a5aa54a7d7356cdef815d2ad6e19a2031182b'
RECAPTCHA_PUBLIC_KEY = "6LdKkQQTAAAAAEH0GFj7NLg5tGicaoOus7G9Q5Uw"
RECAPTCHA_PRIVATE_KEY = '6LdKkQQTAAAAAMYroksPTJ7pWhobYb88fTAcxcYn'
POSTS_PER_PAGE = 10
TWITTER_API_KEY = "XXXX"
TWITTER_API_SECRET = "XXXX"
FACEBOOK_CLIENT_ID = "XXX"
FACEBOOK_CLIENT_SECRET = "XXXX"
CELERY_BROKER_URL = "amqp://rabbitmq:rabbitmq@localhost//"
CELERY_BACKEND_URL = "amqp://rabbitmq:rabbitmq@localhost//"
MAIL_SERVER = "smtp.gmail.com"
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USER = "@gmail.com"
MAIL_PASSWORD = "password"
MAIL_DEFAULT_SENDER = "from@flask.com"
CELERYBEAT_SCHEDULE = {
'weekly-digest': {
'task': 'blog.tasks.digest',
'schedule': crontab(day_of_week=6, hour='10')
},
}
class ProdConfig(Config):
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'database.db')
CACHE_TYPE = 'redis'
CACHE_REDIS_HOST = 'localhost'
CACHE_REDIS_PORT = '6379'
CACHE_REDIS_PASSWORD = ''
CACHE_REDIS_DB = '0'
class DevConfig(Config):
DEBUG = True
DEBUG_TB_INTERCEPT_REDIRECTS = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'database.db')
CACHE_TYPE = 'simple'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.