blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e18338428c5691fbb73bceba9368eb984f92c950
|
f312fcd24d94be8b32e2d1e50643b01c619aa23b
|
/tensorboard/backend/event_processing/directory_loader_test.py
|
39db1d37c0a2e8127105afb4aa5794d702139e8a
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/tensorboard
|
bf316fc5d47f78ef980dd2106c99207892a508d5
|
5961c76dca0fb9bb40d146f5ce13834ac29d8ddb
|
refs/heads/master
| 2023-09-03T23:59:03.264261
| 2023-08-30T22:24:07
| 2023-08-30T22:24:07
| 91,379,993
| 6,766
| 2,063
|
Apache-2.0
| 2023-09-14T20:55:56
| 2017-05-15T20:08:07
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 10,711
|
py
|
directory_loader_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for directory_loader."""
import functools
import glob
import os
import shutil
from unittest import mock
import tensorflow as tf
from tensorboard.backend.event_processing import directory_loader
from tensorboard.backend.event_processing import directory_watcher
from tensorboard.backend.event_processing import event_file_loader
from tensorboard.backend.event_processing import io_wrapper
from tensorboard.util import test_util
class _TimestampedByteLoader:
"""A loader that loads timestamped bytes from a file."""
def __init__(self, path, registry=None):
self._path = path
self._registry = registry if registry is not None else []
self._registry.append(path)
self._f = open(path)
def __del__(self):
self._registry.remove(self._path)
def Load(self):
while True:
line = self._f.readline()
if not line:
return
ts, value = line.rstrip("\n").split(":")
yield float(ts), value
class DirectoryLoaderTest(tf.test.TestCase):
def setUp(self):
# Put everything in a directory so it's easier to delete w/in tests.
self._directory = os.path.join(self.get_temp_dir(), "testdir")
os.mkdir(self._directory)
self._loader = directory_loader.DirectoryLoader(
self._directory, _TimestampedByteLoader
)
def _WriteToFile(self, filename, data, timestamps=None):
if timestamps is None:
timestamps = range(len(data))
self.assertEqual(len(data), len(timestamps))
path = os.path.join(self._directory, filename)
with open(path, "a") as f:
for byte, timestamp in zip(data, timestamps):
f.write("%f:%s\n" % (timestamp, byte))
def assertLoaderYields(self, values):
self.assertEqual(list(self._loader.Load()), values)
def testRaisesWithBadArguments(self):
with self.assertRaises(ValueError):
directory_loader.DirectoryLoader(None, lambda x: None)
with self.assertRaises(ValueError):
directory_loader.DirectoryLoader("dir", None)
def testEmptyDirectory(self):
self.assertLoaderYields([])
def testSingleFileLoading(self):
self._WriteToFile("a", "abc")
self.assertLoaderYields(["a", "b", "c"])
self.assertLoaderYields([])
self._WriteToFile("a", "xyz")
self.assertLoaderYields(["x", "y", "z"])
self.assertLoaderYields([])
def testMultipleFileLoading(self):
self._WriteToFile("a", "a")
self._WriteToFile("b", "b")
self.assertLoaderYields(["a", "b"])
self.assertLoaderYields([])
self._WriteToFile("a", "A")
self._WriteToFile("b", "B")
self._WriteToFile("c", "c")
# The loader should read new data from all the files.
self.assertLoaderYields(["A", "B", "c"])
self.assertLoaderYields([])
def testMultipleFileLoading_intermediateEmptyFiles(self):
self._WriteToFile("a", "a")
self._WriteToFile("b", "")
self._WriteToFile("c", "c")
self.assertLoaderYields(["a", "c"])
def testPathFilter(self):
self._loader = directory_loader.DirectoryLoader(
self._directory,
_TimestampedByteLoader,
lambda path: "tfevents" in path,
)
self._WriteToFile("skipped", "a")
self._WriteToFile("event.out.tfevents.foo.bar", "b")
self._WriteToFile("tf.event", "c")
self.assertLoaderYields(["b"])
def testActiveFilter_staticFilterBehavior(self):
"""Tests behavior of a static active_filter."""
loader_registry = []
loader_factory = functools.partial(
_TimestampedByteLoader, registry=loader_registry
)
active_filter = lambda timestamp: timestamp >= 2
self._loader = directory_loader.DirectoryLoader(
self._directory, loader_factory, active_filter=active_filter
)
def assertLoadersForPaths(paths):
paths = [os.path.join(self._directory, path) for path in paths]
self.assertEqual(loader_registry, paths)
# a: normal-looking file.
# b: file without sufficiently active data (should be marked inactive).
# c: file with timestamps in reverse order (max computed correctly).
# d: empty file (should be considered active in absence of timestamps).
self._WriteToFile("a", ["A1", "A2"], [1, 2])
self._WriteToFile("b", ["B1"], [1])
self._WriteToFile("c", ["C2", "C1", "C0"], [2, 1, 0])
self._WriteToFile("d", [], [])
self.assertLoaderYields(["A1", "A2", "B1", "C2", "C1", "C0"])
assertLoadersForPaths(["a", "c", "d"])
self._WriteToFile("a", ["A3"], [3])
self._WriteToFile("b", ["B3"], [3])
self._WriteToFile("c", ["C0"], [0])
self._WriteToFile("d", ["D3"], [3])
self.assertLoaderYields(["A3", "C0", "D3"])
assertLoadersForPaths(["a", "c", "d"])
# Check that a 0 timestamp in file C on the most recent load doesn't
# override the max timestamp of 2 seen in the earlier load.
self._WriteToFile("c", ["C4"], [4])
self.assertLoaderYields(["C4"])
assertLoadersForPaths(["a", "c", "d"])
def testActiveFilter_dynamicFilterBehavior(self):
"""Tests behavior of a dynamic active_filter."""
loader_registry = []
loader_factory = functools.partial(
_TimestampedByteLoader, registry=loader_registry
)
threshold = 0
active_filter = lambda timestamp: timestamp >= threshold
self._loader = directory_loader.DirectoryLoader(
self._directory, loader_factory, active_filter=active_filter
)
def assertLoadersForPaths(paths):
paths = [os.path.join(self._directory, path) for path in paths]
self.assertEqual(loader_registry, paths)
self._WriteToFile("a", ["A1", "A2"], [1, 2])
self._WriteToFile("b", ["B1", "B2", "B3"], [1, 2, 3])
self._WriteToFile("c", ["C1"], [1])
threshold = 2
# First load pass should leave file C marked inactive.
self.assertLoaderYields(["A1", "A2", "B1", "B2", "B3", "C1"])
assertLoadersForPaths(["a", "b"])
self._WriteToFile("a", ["A4"], [4])
self._WriteToFile("b", ["B4"], [4])
self._WriteToFile("c", ["C4"], [4])
threshold = 3
# Second load pass should mark file A as inactive (due to newly
# increased threshold) and thus skip reading data from it.
self.assertLoaderYields(["B4"])
assertLoadersForPaths(["b"])
self._WriteToFile("b", ["B5", "B6"], [5, 6])
# Simulate a third pass in which the threshold increases while
# we're processing a file, so it's still active at the start of the
# load but should be marked inactive at the end.
load_generator = self._loader.Load()
self.assertEqual("B5", next(load_generator))
threshold = 7
self.assertEqual(["B6"], list(load_generator))
assertLoadersForPaths([])
# Confirm that all loaders are now inactive.
self._WriteToFile("b", ["B7"], [7])
self.assertLoaderYields([])
def testDoesntCrashWhenCurrentFileIsDeleted(self):
# Use actual file loader so it emits the real error.
self._loader = directory_loader.DirectoryLoader(
self._directory, event_file_loader.TimestampedEventFileLoader
)
with test_util.FileWriter(
self._directory, filename_suffix=".a"
) as writer_a:
writer_a.add_test_summary("a")
events = list(self._loader.Load())
events.pop(0) # Ignore the file_version event.
self.assertEqual(1, len(events))
self.assertEqual("a", events[0].summary.value[0].tag)
os.remove(glob.glob(os.path.join(self._directory, "*.a"))[0])
with test_util.FileWriter(
self._directory, filename_suffix=".b"
) as writer_b:
writer_b.add_test_summary("b")
events = list(self._loader.Load())
events.pop(0) # Ignore the file_version event.
self.assertEqual(1, len(events))
self.assertEqual("b", events[0].summary.value[0].tag)
def testDoesntCrashWhenUpcomingFileIsDeleted(self):
# Use actual file loader so it emits the real error.
self._loader = directory_loader.DirectoryLoader(
self._directory, event_file_loader.TimestampedEventFileLoader
)
with test_util.FileWriter(
self._directory, filename_suffix=".a"
) as writer_a:
writer_a.add_test_summary("a")
with test_util.FileWriter(
self._directory, filename_suffix=".b"
) as writer_b:
writer_b.add_test_summary("b")
generator = self._loader.Load()
next(generator) # Ignore the file_version event.
event = next(generator)
self.assertEqual("a", event.summary.value[0].tag)
os.remove(glob.glob(os.path.join(self._directory, "*.b"))[0])
self.assertEmpty(list(generator))
def testRaisesDirectoryDeletedError_whenDirectoryIsDeleted(self):
self._WriteToFile("a", "a")
self.assertLoaderYields(["a"])
shutil.rmtree(self._directory)
with self.assertRaises(directory_watcher.DirectoryDeletedError):
next(self._loader.Load())
def testDoesntRaiseDirectoryDeletedError_forUnrecognizedException(self):
self._WriteToFile("a", "a")
self.assertLoaderYields(["a"])
class MyException(Exception):
pass
with mock.patch.object(
io_wrapper, "ListDirectoryAbsolute"
) as mock_listdir:
mock_listdir.side_effect = MyException
with self.assertRaises(MyException):
next(self._loader.Load())
self.assertLoaderYields([])
if __name__ == "__main__":
tf.test.main()
|
bf939a8240b592f5771f1867b27867532d48a393
|
dbf613469aa8c69ba013e016de82c942508b2485
|
/ppb/__init__.py
|
a3eca9cc385285c215fcb6e82f6b9ed1c10b6a4a
|
[
"Artistic-2.0"
] |
permissive
|
ppb/pursuedpybear
|
dcb96ae35d352a9165898872048c72c6224074b9
|
d067c2f7d440389fc542638d79e3244281c6e737
|
refs/heads/canon
| 2022-11-22T11:57:27.406370
| 2022-02-08T02:12:45
| 2022-02-08T02:12:45
| 54,496,479
| 247
| 119
|
Artistic-2.0
| 2022-11-12T22:36:20
| 2016-03-22T17:46:34
|
Python
|
UTF-8
|
Python
| false
| false
| 5,598
|
py
|
__init__.py
|
"""
A python game framework.
PursuedPyBear is object oriented and event driven. Practically, this means that
most of your code will be organized into classes. Game objects in
:mod:`ppb` are :class:`Sprite` instances, which get contained in
:class:`Scenes <Scene>`. In turn, the :class:`GameEngine`
contains the scenes and :class:`Systems <System>`.
:mod:`Events <events>` are defined as simple classes and event handlers
are based on their names.
The classes, modules, and methods exported directly are the most used parts of
the library and intended to be used by users at all levels (barring
make_engine). Advanced features tend to be in their own modules and subpackages.
Exports:
* :class:`~ppb_vector.Vector`
* :class:`Scene`
* :class:`Circle`
* :class:`Image`
* :class:`Sprite`
* :class:`Square`
* :class:`Sound`
* :class:`Triangle`
* :mod:`events`
* :class:`Font`
* :class:`Text`
* :mod:`directions`
"""
import logging
import warnings
from sys import version_info
from typing import Callable
from ppb import directions
from ppb import events
from ppb_vector import Vector
from ppb.assets import Circle
from ppb.assets import Ellipse
from ppb.assets import Rectangle
from ppb.assets import Square
from ppb.assets import Triangle
from ppb.engine import GameEngine
from ppb.engine import Signal
from ppb.scenes import Scene
from ppb.sprites import RectangleSprite
from ppb.sprites import Sprite
from ppb.systems import Image
from ppb.systems import Sound
from ppb.systems import Font
from ppb.systems import Text
from ppb.utils import get_time
__all__ = (
# Shortcuts
'Vector', 'Scene', 'Circle', 'Image', 'Sprite', 'RectangleSprite',
'Square', 'Sound', 'Triangle', 'events', 'Font', 'Text', 'directions',
'Rectangle', 'Ellipse', 'Signal',
# Local stuff
'run', 'make_engine',
)
def _make_kwargs(setup, title, engine_opts):
kwargs = {
"resolution": (800, 600),
"scene_kwargs": {
"set_up": setup,
},
"window_title": title,
**engine_opts
}
return kwargs
def _validate_python_support(required_version='3.7', ppb_release='2.0',
release_date='June 2022'):
"""
Verifies Supported Python Version.
This function verifies ppb is running on a supported Python version.
:param required_version: Minimum Python Version Supported by PPB
:type required_version: str
:param ppb_release: PPB release version deprecation will occur
:type ppb_release: str
:param release_date: Estimated release month for PPB Version
:type release_date: str
"""
# Creates (Major, Minor) version tuples for comparisson
if version_info[0:2] <= tuple(map(int, required_version.split('.'))):
deprecation_message = f"PPB v{ppb_release} will no longer support "\
f"Python {version_info[0]}.{version_info[1]} " \
f"once released around {release_date}. Please " \
f"update to Python {required_version} or newer."
warnings.filterwarnings('default')
warnings.warn(deprecation_message, DeprecationWarning)
def run(setup: Callable[[Scene], None] = None, *, log_level=logging.WARNING,
starting_scene=Scene, title="PursuedPyBear", **engine_opts):
"""
Run a game.
This is the default entry point for ppb games.
Sample usage:
::
import ppb
def setup(scene):
scene.add(ppb.Sprite())
ppb.run(setup)
Alternatively:
::
import ppb
class Game(ppb.Scene):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.add(ppb.Sprite())
ppb.run(starting_scene=Game)
See the :doc:`../getting-started` guide for a more complete guide to
building games.
All parameters are optional.
:param setup: Called with the first scene to allow initialization of
your game.
:type setup: Callable[[Scene], None]
:param log_level: The logging level from :func:`logging` to send to the
console.
:param starting_scene: A scene class to use. Defaults to
:class:`~ppb.scenes.Scene`
:type starting_scene: type
:param title: The title of the rendered window.
:type title: str
:param engine_opts: Additional keyword arguments passed to the
:class:`~ppb.engine.GameEngine`.
"""
logging.basicConfig(level=log_level)
_validate_python_support()
with make_engine(setup, starting_scene=starting_scene, title=title, **engine_opts) as eng:
eng.run()
def make_engine(setup: Callable[[Scene], None] = None, *,
starting_scene=Scene, title="PursedPyBear",
**engine_opts):
"""
Setup a :class:`GameEngine`.
This function exists for third party modules to use the same code paths
as :func:`run` for setting up their engine. If you want to instantiate
your own engine, you can do so directly using the
:class:`constructor <GameEngine>`.
:param setup: Called with the first scene to allow initialization of
your game.
:type setup: Callable[[Scene], None]
:param starting_scene: A scene class to use. Defaults to
:class:`~ppb.scenes.Scene`
:type starting_scene: type
:param title: The title of the rendered window.
:type title: str
:param engine_opts: Additional keyword arguments passed to the
:class:`~ppb.engine.GameEngine`
:return: A GameEngine instance.
"""
return GameEngine(starting_scene, **_make_kwargs(setup, title, engine_opts))
|
c81ce0458c3745596e1b31555458d63a54021601
|
6415c13547e6943f7b65337cbd2790c4e18723c8
|
/netbox/core/forms/model_forms.py
|
666a19e85b84cf944a466ca0958466433933d8dd
|
[
"Apache-2.0"
] |
permissive
|
netbox-community/netbox
|
287254a9698270d51f57b1297118e9f01536da5a
|
506884bc4dc70299db3e2a7ad577dd7fd808065e
|
refs/heads/develop
| 2023-08-24T09:11:46.685121
| 2023-08-23T18:44:14
| 2023-08-23T18:44:14
| 52,796,596
| 8,122
| 1,817
|
Apache-2.0
| 2023-09-14T18:16:01
| 2016-02-29T14:15:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,470
|
py
|
model_forms.py
|
import copy
from django import forms
from core.forms.mixins import SyncedDataMixin
from core.models import *
from netbox.forms import NetBoxModelForm
from netbox.registry import registry
from utilities.forms import get_field_value
from utilities.forms.fields import CommentField
from utilities.forms.widgets import HTMXSelect
__all__ = (
'DataSourceForm',
'ManagedFileForm',
)
class DataSourceForm(NetBoxModelForm):
comments = CommentField()
class Meta:
model = DataSource
fields = [
'name', 'type', 'source_url', 'enabled', 'description', 'comments', 'ignore_rules', 'tags',
]
widgets = {
'type': HTMXSelect(),
'ignore_rules': forms.Textarea(
attrs={
'rows': 5,
'class': 'font-monospace',
'placeholder': '.cache\n*.txt'
}
),
}
@property
def fieldsets(self):
fieldsets = [
('Source', ('name', 'type', 'source_url', 'enabled', 'description', 'tags', 'ignore_rules')),
]
if self.backend_fields:
fieldsets.append(
('Backend Parameters', self.backend_fields)
)
return fieldsets
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Determine the selected backend type
backend_type = get_field_value(self, 'type')
backend = registry['data_backends'].get(backend_type)
# Add backend-specific form fields
self.backend_fields = []
for name, form_field in backend.parameters.items():
field_name = f'backend_{name}'
self.backend_fields.append(field_name)
self.fields[field_name] = copy.copy(form_field)
if self.instance and self.instance.parameters:
self.fields[field_name].initial = self.instance.parameters.get(name)
def save(self, *args, **kwargs):
parameters = {}
for name in self.fields:
if name.startswith('backend_'):
parameters[name[8:]] = self.cleaned_data[name]
self.instance.parameters = parameters
return super().save(*args, **kwargs)
class ManagedFileForm(SyncedDataMixin, NetBoxModelForm):
upload_file = forms.FileField(
required=False
)
fieldsets = (
('File Upload', ('upload_file',)),
('Data Source', ('data_source', 'data_file', 'auto_sync_enabled')),
)
class Meta:
model = ManagedFile
fields = ('data_source', 'data_file', 'auto_sync_enabled')
def clean(self):
super().clean()
if self.cleaned_data.get('upload_file') and self.cleaned_data.get('data_file'):
raise forms.ValidationError("Cannot upload a file and sync from an existing file")
if not self.cleaned_data.get('upload_file') and not self.cleaned_data.get('data_file'):
raise forms.ValidationError("Must upload a file or select a data file to sync")
return self.cleaned_data
def save(self, *args, **kwargs):
# If a file was uploaded, save it to disk
if self.cleaned_data['upload_file']:
self.instance.file_path = self.cleaned_data['upload_file'].name
with open(self.instance.full_path, 'wb+') as new_file:
new_file.write(self.cleaned_data['upload_file'].read())
return super().save(*args, **kwargs)
|
7ac1b7fa895a352af9307e71ae05716c4201b8bf
|
8b93c6fe926241f6c95f981f7dae32414d820148
|
/wrap/csllbc/csharp/script_tools/gen_errno_code.py
|
c2129f2e1ec92764bf537c3b9678c119d4cebd80
|
[
"MIT"
] |
permissive
|
lailongwei/llbc
|
e0873c7a34eea6a3fff260f78f1bdbf06520363b
|
2b4ccb3387549f612a6df001f091680bfd9b3adb
|
refs/heads/master
| 2023-08-15T08:22:03.602550
| 2023-08-04T07:31:58
| 2023-08-04T07:31:58
| 45,900,425
| 126
| 48
|
MIT
| 2023-09-11T13:59:49
| 2015-11-10T09:08:46
|
C++
|
UTF-8
|
Python
| false
| false
| 2,672
|
py
|
gen_errno_code.py
|
# -*- coding: utf-8 -*-
"""
FILE gen_errno_code.py
AUTHOR Sherlock <445498529@qq.com>
DATE 2017/03/14
VERSION 1.0
BRIEF Auto generate C# errno code.
"""
import os
from os import path as op
import shutil
import re
import hashlib
# This script file path
SCRIPT_PATH = op.abspath(op.dirname(__file__))
# LLBC root path
LLBC_ROOT_PATH = op.normpath(op.join(SCRIPT_PATH, u'../../../../'))
# CPP errno file name
CPP_ERRNO_FILE = op.join(LLBC_ROOT_PATH, u'llbc', u'include', u'llbc', u'common', u'Errno.h')
# Csharp code path
CS_ERRNO_FILE = op.join(LLBC_ROOT_PATH, u'wrap', u'csllbc', u'csharp', u'native', u'common', u'ErrnoNative.cs')
# analyse csharp file
def analyse_file(name):
f = open(name)
if not f:
print('file not found: ' + name)
exit()
ls = []
spc = False
for line in f:
result = re.match(r'#define LLBC_ERROR_([^\s]*)[\s]*(.*)', line)
if not result:
if spc == False:
ls.append('\n')
spc = True
continue
if re.match(r'[^\(]*\([^\)]*\)', result.group(1)):
continue
key = result.group(1)
value = result.group(2)
result = re.match(r'\(\([^\)]*\)\(([^\)]*)\)\)', value)
if result:
value = result.group(1)
str = ' static public uint LLBC_ERROR_' + key
size = len(key)
if size < 24:
for i in range(1, 24 - size):
str = str + ' '
ls.append(str + '= ' + value + ';\n')
spc = False
f.close()
return ls
# get file md5 set
def get_file_md5_digest(name):
if not os.path.exists(name):
return 0
with open(name, 'rb') as f:
md5 = hashlib.md5()
md5.update(f.read())
digest = md5.digest()
return digest
# generate c++ head file
def generate_file(name, ls):
olddigest = get_file_md5_digest(name)
text = 'using System;\n'
text = text + '\n'
text = text + 'namespace llbc\n'
text = text + '{\n'
text = text + ' public class Errno\n'
text = text + ' {\n'
for line in ls:
text = text + line
text = text + ' }\n'
text = text + '}\n'
md5 = hashlib.md5()
md5.update(text.encode('utf-8'))
if md5.digest() == olddigest:
return 'Errno: file check finished!'
f = open(name, 'wb')
if not f:
return 'Errno: file open failure: ' + name
f.write(text)
f.close()
return 'Errno: grenerate finished!\n at: ' + name
def main():
ls = analyse_file(CPP_ERRNO_FILE)
str = generate_file(CS_ERRNO_FILE, ls)
if str != '':
print(str)
if __name__ == '__main__':
main()
|
0c632c91812b5cc77df12ce8cdc45fdf4aec9440
|
39164ede111f154b31cbb61663ea837f16f8aa4f
|
/odps/df/backends/sqlalchemy/tests/test_engine.py
|
d19baf8c94df20fb8542ab8846764aba7113502e
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-odps-python-sdk
|
217631252e6d52e75354a2a19faab5b9ff40e272
|
c5b897f03759b1a9851505eea3858a96d628f105
|
refs/heads/master
| 2023-08-16T22:42:12.441717
| 2023-07-19T06:28:25
| 2023-07-19T06:28:25
| 45,234,875
| 437
| 116
|
Apache-2.0
| 2023-08-03T06:45:34
| 2015-10-30T07:07:59
|
Python
|
UTF-8
|
Python
| false
| false
| 29,398
|
py
|
test_engine.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2022 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import itertools
import time
from collections import namedtuple
from datetime import datetime, timedelta
from functools import partial
import pytest
from .....compat import six, futures
from .....models import TableSchema
from .....tests.core import get_result, approx_list
from .....utils import to_text
from ....expr.expressions import CollectionExpr
from ....types import validate_data_type
from .... import Scalar, output_names, output_types, output, day, millisecond, agg
from ...tests.core import tn, NumGenerators
from ...context import context
from ...odpssql.types import df_schema_to_odps_schema
from ..engine import SQLAlchemyEngine, _engine_to_connections
from ..types import df_schema_to_sqlalchemy_columns
pytestmark = pytest.mark.skip
@pytest.fixture
def setup(odps):
def create_table_and_insert_data(table_name, df_schema, data, drop_first=True):
import sqlalchemy
columns = df_schema_to_sqlalchemy_columns(df_schema, engine=sql_engine)
t = sqlalchemy.Table(table_name, metadata, *columns)
conn.execute('DROP TABLE IF EXISTS %s' % table_name)
t.create()
conn.execute(t.insert(), [
dict((n, v) for n, v in zip(df_schema.names, d)) for d in data
])
return t
def gen_data(rows=None, data=None, nullable_field=None, value_range=None):
if data is None:
data = []
for _ in range(rows):
record = []
for t in schema.types:
method = getattr(NumGenerators, 'gen_random_%s' % t.name)
if t.name == 'bigint':
record.append(method(value_range=value_range))
else:
record.append(method())
data.append(record)
if nullable_field is not None:
j = schema._name_indexes[nullable_field]
for i, l in enumerate(data):
if i % 2 == 0:
data[i][j] = None
conn.execute(table.insert(), [
dict((n, v) for n, v in zip(schema.names, d)) for d in data])
return data
datatypes = lambda *types: [validate_data_type(t) for t in types]
pd_schema = TableSchema.from_lists(['name', 'id', 'fid', 'isMale', 'scale', 'birth'],
datatypes('string', 'int64', 'float64', 'boolean', 'decimal', 'datetime'))
df_schema = pd_schema
schema = df_schema_to_odps_schema(pd_schema)
df = None
expr = None
engine = SQLAlchemyEngine()
import sqlalchemy
from sqlalchemy import create_engine
sql_engine = sa_engine = create_engine('postgres://localhost/pyodps')
# setup.sql_engine = engine = create_engine('mysql://localhost/pyodps')
# setup.sql_engine = engine = create_engine('sqlite://')
conn = sa_engine.connect()
metadata = metadata = sqlalchemy.MetaData(bind=sa_engine)
columns = df_schema_to_sqlalchemy_columns(df_schema, engine=sql_engine)
t = sqlalchemy.Table('pyodps_test_data', metadata, *columns)
metadata.create_all()
table = t
expr = CollectionExpr(_source_data=table, _schema=df_schema)
class FakeBar(object):
def update(self, *args, **kwargs):
pass
faked_bar = FakeBar()
nt = namedtuple("NT", "df_schema, schema, df, expr, engine, sql_engine, conn, "
"metadata, table, faked_bar, gen_data, create_table_and_insert_data")
try:
yield nt(df_schema, schema, df, expr, engine, sql_engine,
conn, metadata, table, faked_bar, gen_data,
create_table_and_insert_data)
finally:
[conn.close() for conn in _engine_to_connections.values()]
table.drop()
conn.close()
def test_async(odps, setup):
data = setup.gen_data(10, value_range=(-1000, 1000))
expr = setup.expr.id.sum()
future = setup.engine.execute(expr, async_=True, priority=4)
assert future.done() is False
res = future.result()
assert sum(it[1] for it in data) == res
def test_cache(odps, setup):
import sqlalchemy
data = setup.gen_data(10, value_range=(-1000, 1000))
expr = setup.expr[setup.expr.id < 10].cache()
cnt = expr.count()
dag = setup.engine.compile(expr)
assert len(dag.nodes()) == 2
res = setup.engine.execute(cnt)
assert len([it for it in data if it[1] < 10]) == res
assert context.is_cached(expr) is True
table = context.get_cached(expr)
assert isinstance(table, sqlalchemy.Table)
def test_batch(odps, setup):
if setup.sql_engine.name == 'mysql':
# TODO: mysqldb is not thread-safe, skip first
return
data = setup.gen_data(10, value_range=(-1000, 1000))
expr = setup.expr[setup.expr.id < 10].cache()
expr1 = expr.id.sum()
expr2 = expr.id.mean()
dag = setup.engine.compile([expr1, expr2])
assert len(dag.nodes()) == 3
assert sum(len(v) for v in dag._graph.values()) == 2
expect1 = sum(d[1] for d in data if d[1] < 10)
length = len([d[1] for d in data if d[1] < 10])
expect2 = (expect1 / float(length)) if length > 0 else 0.0
res = setup.engine.execute([expr1, expr2], n_parallel=2)
assert res[0] == expect1
assert pytest.approx(res[1]) == expect2
assert context.is_cached(expr) is True
# test async and timeout
expr = setup.expr[setup.expr.id < 10]
expr1 = expr.id.sum()
expr2 = expr.id.mean()
fs = setup.engine.execute([expr, expr1, expr2], n_parallel=2, async_=True, timeout=1)
assert len(fs) == 3
assert fs[1].result() == expect1
assert pytest.approx(fs[2].result()) == expect2
assert context.is_cached(expr) is True
def test_element(odps, setup):
data = setup.gen_data(5, nullable_field='name')
fields = [
setup.expr.name.isnull().rename('name1'),
setup.expr.name.notnull().rename('name2'),
setup.expr.name.fillna('test').rename('name3'),
setup.expr.id.isin([1, 2, 3]).rename('id1'),
setup.expr.id.isin(setup.expr.fid.astype('int')).rename('id2'),
setup.expr.id.notin([1, 2, 3]).rename('id3'),
setup.expr.id.notin(setup.expr.fid.astype('int')).rename('id4'),
setup.expr.id.between(setup.expr.fid, 3).rename('id5'),
setup.expr.name.fillna('test').switch('test', 'test' + setup.expr.name.fillna('test'),
'test2', 'test2' + setup.expr.name.fillna('test'),
default=setup.expr.name).rename('name4'),
setup.expr.name.fillna('test').switch('test', 1, 'test2', 2).rename('name5'),
setup.expr.id.cut(
[100, 200, 300],
labels=['xsmall', 'small', 'large', 'xlarge'],
include_under=True,
include_over=True,
).rename('id6')
]
expr = setup.expr[fields]
res = setup.engine.execute(expr)
result = get_result(res)
assert len(data) == len(result)
assert len([it for it in data if it[0] is None]) == len([it[0] for it in result if it[0]])
assert len([it[0] for it in data if it[0] is not None]) == len([it[1] for it in result if it[1]])
assert [(it[0] if it[0] is not None else 'test') for it in data] == [it[2] for it in result]
assert [(it[1] in (1, 2, 3)) for it in data] == [it[3] for it in result]
fids = [int(it[2]) for it in data]
assert [(it[1] in fids) for it in data] == [it[4] for it in result]
assert [(it[1] not in (1, 2, 3)) for it in data] == [it[5] for it in result]
assert [(it[1] not in fids) for it in data] == [it[6] for it in result]
assert [(it[2] <= it[1] <= 3) for it in data] == [it[7] for it in result]
assert [to_text('testtest' if it[0] is None else it[0]) for it in data] == [to_text(it[8]) for it in result]
assert [to_text(1 if it[0] is None else None) for it in data] == [to_text(it[9]) for it in result]
def get_val(val):
if val <= 100:
return 'xsmall'
elif 100 < val <= 200:
return 'small'
elif 200 < val <= 300:
return 'large'
else:
return 'xlarge'
assert [to_text(get_val(it[1])) for it in data] == [to_text(it[10]) for it in result]
def test_arithmetic(odps, setup):
data = setup.gen_data(5, value_range=(-1000, 1000))
fields = [
(setup.expr.id + 1).rename('id1'),
(setup.expr.fid - 1).rename('fid1'),
(setup.expr.scale * 2).rename('scale1'),
(setup.expr.scale + setup.expr.id).rename('scale2'),
(setup.expr.id / 2).rename('id2'),
(setup.expr.id ** 2).rename('id3'),
abs(setup.expr.id).rename('id4'),
(~setup.expr.id).rename('id5'),
(-setup.expr.fid).rename('fid2'),
(~setup.expr.isMale).rename('isMale1'),
(-setup.expr.isMale).rename('isMale2'),
(setup.expr.id // 2).rename('id6'),
(setup.expr.birth + day(1).rename('birth1')),
(setup.expr.birth - (setup.expr.birth - millisecond(10))).rename('birth2'),
(setup.expr.id % 2).rename('id7'),
]
expr = setup.expr[fields]
res = setup.engine.execute(expr)
result = get_result(res)
assert len(data) == len(result)
assert [it[1] + 1 for it in data] == [it[0] for it in result]
assert approx_list([it[2] - 1 for it in data], abs=.001) \
== [it[1] for it in result]
assert [it[4] * 2 for it in data] == [it[2] for it in result]
assert [it[4] + it[1] for it in data] == [it[3] for it in result]
assert approx_list([float(it[1]) / 2 for it in data], abs=.001) \
== [it[4] for it in result]
assert [int(it[1] ** 2) for it in data] == [it[5] for it in result]
assert [abs(it[1]) for it in data] == [it[6] for it in result]
assert [~it[1] for it in data] == [it[7] for it in result]
assert approx_list([-it[2] for it in data], abs=.001) \
== [it[8] for it in result]
assert [not it[3] for it in data] == [it[9] for it in result]
assert [it[1] // 2 for it in data] == [it[11] for it in result]
assert [it[5] + timedelta(days=1) for it in data] == [it[12].replace(tzinfo=None) for it in result]
assert [10] * len(data) == [it[13] for it in result]
assert [it[1] % 2 for it in data] == [it[14] for it in result]
def test_math(odps, setup):
# TODO: test sinh, cosh..., and acosh, asinh...
data = setup.gen_data(5, value_range=(1, 90))
if hasattr(math, 'expm1'):
expm1 = math.expm1
else:
expm1 = lambda x: 2 * math.exp(x / 2.0) * math.sinh(x / 2.0)
methods_to_fields = [
(math.sin, setup.expr.id.sin()),
(math.cos, setup.expr.id.cos()),
(math.tan, setup.expr.id.tan()),
(math.log, setup.expr.id.log()),
(lambda v: math.log(v, 2), setup.expr.id.log2()),
(math.log10, setup.expr.id.log10()),
(math.log1p, setup.expr.id.log1p()),
(math.exp, setup.expr.id.exp()),
(expm1, setup.expr.id.expm1()),
(math.atan, setup.expr.id.arctan()),
(math.sqrt, setup.expr.id.sqrt()),
(abs, setup.expr.id.abs()),
(math.ceil, setup.expr.id.ceil()),
(math.floor, setup.expr.id.floor()),
(math.trunc, setup.expr.id.trunc()),
(round, setup.expr.id.round()),
(lambda x: round(x, 2), setup.expr.id.round(2)),
]
fields = [it[1].rename('id'+str(i)) for i, it in enumerate(methods_to_fields)]
expr = setup.expr[fields]
res = setup.engine.execute(expr)
result = get_result(res)
for i, it in enumerate(methods_to_fields):
mt = it[0]
def method(v):
try:
return mt(v)
except ValueError:
return float('nan')
first = [method(it[1]) for it in data]
second = [it[i] for it in result]
assert len(first) == len(second)
for it1, it2 in zip(first, second):
not_valid = lambda x: \
x is None or (isinstance(x, float) and (math.isnan(x) or math.isinf(x)))
if not_valid(it1) and not_valid(it2):
continue
if isinstance(it1, float) and it1 > 1.0e15:
scale = 0.1 ** (int(math.log10(it1)) - 15)
assert pytest.approx(it1 * scale) == it2 * scale
else:
assert pytest.approx(it1) == it2
def test_string(odps, setup):
data = setup.gen_data(5)
methods_to_fields = [
(lambda s: s.capitalize(), setup.expr.name.capitalize()),
(lambda s: data[0][0] in s, setup.expr.name.contains(data[0][0], regex=False)),
(lambda s: s[0] + '|' + str(s[1]), setup.expr.name.cat(setup.expr.id.astype('string'), sep='|')),
(lambda s: s.endswith(data[0][0]), setup.expr.name.endswith(data[0][0])),
(lambda s: s.startswith(data[0][0]), setup.expr.name.startswith(data[0][0])),
(lambda s: s.replace(data[0][0], 'test'), setup.expr.name.replace(data[0][0], 'test', regex=False)),
(lambda s: s[0], setup.expr.name.get(0)),
(lambda s: len(s), setup.expr.name.len()),
(lambda s: s.ljust(10), setup.expr.name.ljust(10)),
(lambda s: s.ljust(20, '*'), setup.expr.name.ljust(20, fillchar='*')),
(lambda s: s.rjust(10), setup.expr.name.rjust(10)),
(lambda s: s.rjust(20, '*'), setup.expr.name.rjust(20, fillchar='*')),
(lambda s: s * 4, setup.expr.name.repeat(4)),
(lambda s: s[1:], setup.expr.name.slice(1)),
(lambda s: s[1: 6], setup.expr.name.slice(1, 6)),
(lambda s: s.title(), setup.expr.name.title()),
(lambda s: s.rjust(20, '0'), setup.expr.name.zfill(20)),
]
if setup.sql_engine.name == 'mysql':
methods_to_fields = methods_to_fields[:-2] + methods_to_fields[-1:]
fields = [it[1].rename('id'+str(i)) for i, it in enumerate(methods_to_fields)]
expr = setup.expr[fields]
res = setup.engine.execute(expr)
result = get_result(res)
for i, it in enumerate(methods_to_fields):
method = it[0]
if i != 2:
first = [method(it[0]) for it in data]
else:
# cat
first = [method(it) for it in data]
second = [it[i] for it in result]
assert first == second
def test_datetime(odps, setup):
data = setup.gen_data(5)
def date_value(sel):
if isinstance(sel, six.string_types):
fun = lambda v: getattr(v, sel)
else:
fun = sel
col_id = [idx for idx, col in enumerate(setup.schema.names) if col == 'birth'][0]
return [fun(row[col_id]) for row in data]
methods_to_fields = [
(partial(date_value, 'year'), setup.expr.birth.year),
(partial(date_value, 'month'), setup.expr.birth.month),
(partial(date_value, 'day'), setup.expr.birth.day),
(partial(date_value, 'hour'), setup.expr.birth.hour),
(partial(date_value, 'minute'), setup.expr.birth.minute),
(partial(date_value, 'second'), setup.expr.birth.second),
(partial(date_value, lambda d: d.isocalendar()[1]), setup.expr.birth.weekofyear),
(partial(date_value, lambda d: d.weekday()), setup.expr.birth.dayofweek),
(partial(date_value, lambda d: d.weekday()), setup.expr.birth.weekday),
(partial(date_value, lambda d: time.mktime(d.timetuple())), setup.expr.birth.unix_timestamp),
(partial(date_value, lambda d: datetime.combine(d.date(), datetime.min.time())), setup.expr.birth.date),
]
fields = [it[1].rename('birth'+str(i)) for i, it in enumerate(methods_to_fields)]
expr = setup.expr[fields]
res = setup.engine.execute(expr)
result = get_result(res)
for i, it in enumerate(methods_to_fields):
method = it[0]
first = method()
try:
import pandas as pd
def conv(v):
if isinstance(v, pd.Timestamp):
v = v.to_datetime()
if isinstance(v, datetime):
return v.replace(tzinfo=None)
return v
except ImportError:
conv = lambda v: v
second = [conv(it[i]) for it in result]
assert first == second
def test_groupby_aggregation(odps, setup):
data = [
['name1', 4, 5.3, None, None, None],
['name2', 2, 3.5, None, None, None],
['name1', 4, 4.2, None, None, None],
['name1', 3, 2.2, None, None, None],
['name1', 3, 4.1, None, None, None],
]
setup.gen_data(data=data)
expr = setup.expr.groupby(['name', 'id'])[lambda x: x.fid.min() * 2 < 8] \
.agg(setup.expr.fid.max() + 1, new_id=setup.expr.id.sum())
res = setup.engine.execute(expr)
result = get_result(res)
expected = [
['name1', 3, 5.1, 6],
['name2', 2, 4.5, 2]
]
result = sorted(result, key=lambda k: k[0])
assert approx_list(expected, abs=.001) == result
expr = setup.expr.name.value_counts()[:25]
expected = [
['name1', 4],
['name2', 1]
]
res = setup.engine.execute(expr)
result = get_result(res)
assert expected == result
expr = setup.expr.name.topk(25)
res = setup.engine.execute(expr)
result = get_result(res)
assert expected == result
expr = setup.expr.groupby('name').count()
res = setup.engine.execute(expr)
result = get_result(res)
assert [it[1:] for it in expected] == result
expected = [
['name1', 2],
['name2', 1]
]
expr = setup.expr.groupby('name').id.nunique()
res = setup.engine.execute(expr)
result = get_result(res)
assert [it[1:] for it in expected] == result
expr = setup.expr[setup.expr['id'] > 2].name.value_counts()[:25]
expected = [
['name1', 4]
]
res = setup.engine.execute(expr)
result = get_result(res)
assert expected == result
expr = setup.expr.groupby('name', Scalar(1).rename('constant')) \
.agg(id=setup.expr.id.sum())
expected = [
['name1', 1, 14],
['name2', 1, 2]
]
res = setup.engine.execute(expr)
result = get_result(res)
assert expected == result
expr = setup.expr[:1]
expr = expr.groupby('name').agg(expr.id.sum())
res = setup.engine.execute(expr)
result = get_result(res)
expected = [
['name1', 4]
]
assert expected == result
expr = setup.expr.groupby('id').name.cat(sep=',')
res = setup.engine.execute(expr)
result = get_result(res)
expected = [['name2'], ['name1,name1'], ['name1,name1']]
assert sorted(result) == sorted(expected)
def test_join_groupby(odps, setup):
data = [
['name1', 4, 5.3, None, None, None],
['name2', 2, 3.5, None, None, None],
['name1', 4, 4.2, None, None, None],
['name1', 3, 2.2, None, None, None],
['name1', 3, 4.1, None, None, None],
]
data2 = [
['name1', 4, -1],
['name2', 1, -2]
]
datatypes = lambda *types: [validate_data_type(t) for t in types]
schema2 = TableSchema.from_lists(['name', 'id2', 'id3'],
datatypes('string', 'int64', 'int64'))
table_name = tn('pyodps_test_engine_table2')
table2 = setup.create_table_and_insert_data(table_name, schema2, data2)
expr2 = CollectionExpr(_source_data=table2, _schema=schema2)
setup.gen_data(data=data)
expr = setup.expr.join(expr2, on='name')[setup.expr]
expr = expr.groupby('id').agg(expr.fid.sum())
res = setup.engine.execute(expr)
result = get_result(res)
id_idx = [idx for idx, col in enumerate(setup.expr.schema.names) if col == 'id'][0]
fid_idx = [idx for idx, col in enumerate(setup.expr.schema.names) if col == 'fid'][0]
expected = [[k, sum(v[fid_idx] for v in row)]
for k, row in itertools.groupby(sorted(data, key=lambda r: r[id_idx]), lambda r: r[id_idx])]
for it in zip(sorted(expected, key=lambda it: it[0]), sorted(result, key=lambda it: it[0])):
assert pytest.approx(it[0][0]) == it[1][0]
assert pytest.approx(it[0][1]) == it[1][1]
def test_filter_groupby(odps, setup):
data = [
['name1', 4, 5.3, None, None, None],
['name2', 2, 3.5, None, None, None],
['name1', 4, 4.2, None, None, None],
['name1', 3, 2.2, None, None, None],
['name1', 3, 4.1, None, None, None],
]
setup.gen_data(data=data)
expr = setup.expr.groupby(['name']).agg(id=setup.expr.id.max())[lambda x: x.id > 3]
res = setup.engine.execute(expr)
result = get_result(res)
assert len(result) == 1
expected = [
['name1', 4]
]
assert expected == result
def test_window_function(odps, setup):
if setup.sql_engine.name == 'mysql':
# mysql doesn't support window function
return
data = [
['name1', 4, 5.3, None, None, None],
['name2', 2, 3.5, None, None, None],
['name1', 4, 4.2, None, None, None],
['name1', 3, 2.2, None, None, None],
['name1', 3, 6.1, None, None, None],
]
setup.gen_data(data=data)
expr = setup.expr.groupby('name').id.cumsum()
res = setup.engine.execute(expr)
result = get_result(res)
expected = [[14]] * 4 + [[2]]
assert sorted(expected) == sorted(result)
expr = setup.expr.groupby('name').sort('fid').id.cummax()
res = setup.engine.execute(expr)
result = get_result(res)
expected = [[3], [4], [4], [4], [2]]
assert sorted(expected) == sorted(result)
expr = setup.expr[
setup.expr.groupby('name', 'id').sort('fid').id.cummean(),
]
res = setup.engine.execute(expr)
result = get_result(res)
expected = [
[3], [3], [4], [4], [2]
]
assert sorted(expected) == sorted(result)
expr = setup.expr.groupby('name').mutate(id2=lambda x: x.id.cumcount(),
fid2=lambda x: x.fid.cummin(sort='id'))
res = setup.engine.execute(expr['name', 'id2', 'fid2'])
result = get_result(res)
expected = [
['name1', 4, 2.2],
['name1', 4, 2.2],
['name1', 4, 2.2],
['name1', 4, 2.2],
['name2', 1, 3.5],
]
assert sorted(expected) == sorted(result)
expr = setup.expr[
setup.expr.id,
setup.expr.groupby('name').rank('id'),
setup.expr.groupby('name').dense_rank('fid', ascending=False),
setup.expr.groupby('name').row_number(sort=['id', 'fid'], ascending=[True, False]),
setup.expr.groupby('name').percent_rank('id'),
]
res = setup.engine.execute(expr)
result = get_result(res)
expected = [
[4, 3, 2, 3, float(2) / 3],
[2, 1, 1, 1, 0.0],
[4, 3, 3, 4, float(2) / 3],
[3, 1, 4, 2, float(0) / 3],
[3, 1, 1, 1, float(0) / 3]
]
for l, r in zip(sorted(expected), sorted(result)):
assert approx_list(l) == r
expr = setup.expr[
setup.expr.id,
setup.expr.groupby('name').id.lag(offset=3, default=0, sort=['id', 'fid']).rename('id2'),
setup.expr.groupby('name').id.lead(offset=1, default=-1,
sort=['id', 'fid'], ascending=[False, False]).rename('id3'),
]
res = setup.engine.execute(expr)
result = get_result(res)
expected = [
[4, 3, 4],
[2, 0, -1],
[4, 0, 3],
[3, 0, -1],
[3, 0, 3]
]
assert sorted(expected) == sorted(result)
def test_sort_distinct(odps, setup):
data = [
['name1', 4, None, None, None, None],
['name2', 2, None, None, None, None],
['name1', 4, None, None, None, None],
['name1', 3, None, None, None, None],
]
setup.gen_data(data=data)
expr = setup.expr.sort(['name', -setup.expr.id]).distinct(['name', lambda x: x.id + 1])[:50]
res = setup.engine.execute(expr)
result = get_result(res)
assert len(result) == 3
expected = [
['name1', 5],
['name1', 4],
['name2', 3]
]
assert sorted(expected) == sorted(result)
def test_join(odps, setup):
data = [
['name1', 4, 5.3, None, None, None],
['name2', 2, 3.5, None, None, None],
['name1', 4, 4.2, None, None, None],
['name1', 3, 2.2, None, None, None],
['name1', 3, 4.1, None, None, None],
]
data2 = [
['name1', 4, -1],
['name2', 1, -2]
]
datatypes = lambda *types: [validate_data_type(t) for t in types]
schema2 = TableSchema.from_lists(['name', 'id2', 'id3'],
datatypes('string', 'int64', 'int64'))
table_name = tn('pyodps_test_engine_table2')
table2 = setup.create_table_and_insert_data(table_name, schema2, data2)
expr2 = CollectionExpr(_source_data=table2, _schema=schema2)
setup.gen_data(data=data)
try:
expr = setup.expr.join(expr2)['name', 'id2']
res = setup.engine.execute(expr)
result = get_result(res)
assert len(result) == 5
expected = [
[to_text('name1'), 4],
[to_text('name2'), 1]
]
assert all(it in expected for it in result) is True
expr = setup.expr.join(expr2, on=['name', ('id', 'id2')])[setup.expr.name, expr2.id2]
res = setup.engine.execute(expr)
result = get_result(res)
assert len(result) == 2
expected = [to_text('name1'), 4]
assert all(it == expected for it in result) is True
expr = setup.expr.left_join(expr2, on=['name', ('id', 'id2')])[setup.expr.name, expr2.id2]
res = setup.engine.execute(expr)
result = get_result(res)
expected = [
['name1', 4],
['name2', None],
['name1', 4],
['name1', None],
['name1', None]
]
assert len(result) == 5
assert all(it in expected for it in result) is True
expr = setup.expr.right_join(expr2, on=['name', ('id', 'id2')])[setup.expr.name, expr2.id2]
res = setup.engine.execute(expr)
result = get_result(res)
expected = [
['name1', 4],
['name1', 4],
[None, 1],
]
assert len(result) == 3
assert all(it in expected for it in result) is True
if setup.sql_engine.name != 'mysql':
expr = setup.expr.outer_join(expr2, on=['name', ('id', 'id2')])[setup.expr.name, expr2.id2]
res = setup.engine.execute(expr)
result = get_result(res)
expected = [
['name1', 4],
['name1', 4],
['name2', None],
['name1', None],
['name1', None],
[None, 1],
]
assert len(result) == 6
assert all(it in expected for it in result) is True
grouped = setup.expr.groupby('name').agg(new_id=setup.expr.id.sum()).cache()
setup.engine.execute(setup.expr.join(grouped, on='name'))
if setup.sql_engine.name != 'mysql':
expr = setup.expr.join(expr2, on=['name', ('id', 'id2')])[
lambda x: x.groupby(Scalar(1)).sort('name').row_number(), ]
setup.engine.execute(expr)
finally:
[conn.close() for conn in _engine_to_connections.values()]
table2.drop()
def test_union(odps, setup):
data = [
['name1', 4, 5.3, None, None, None],
['name2', 2, 3.5, None, None, None],
['name1', 4, 4.2, None, None, None],
['name1', 3, 2.2, None, None, None],
['name1', 3, 4.1, None, None, None],
]
data2 = [
['name3', 5, -1],
['name4', 6, -2]
]
datatypes = lambda *types: [validate_data_type(t) for t in types]
schema2 = TableSchema.from_lists(['name', 'id2', 'id3'],
datatypes('string', 'int64', 'int64'))
table_name = tn('pyodps_test_engine_table2')
table2 = setup.create_table_and_insert_data(table_name, schema2, data2)
expr2 = CollectionExpr(_source_data=table2, _schema=schema2)
setup.gen_data(data=data)
try:
expr = setup.expr['name', 'id'].distinct().union(expr2[expr2.id2.rename('id'), 'name'])
res = setup.engine.execute(expr)
result = get_result(res)
expected = [
['name1', 4],
['name1', 3],
['name2', 2],
['name3', 5],
['name4', 6]
]
result = sorted(result)
expected = sorted(expected)
assert len(result) == len(expected)
for e, r in zip(result, expected):
assert [to_text(t) for t in e] == [to_text(t) for t in r]
finally:
[conn.close() for conn in _engine_to_connections.values()]
table2.drop()
|
8a98ad43a99f3ab81c1a2783b921ed05c254f856
|
dcd772f567ef8a8a1173a9f437cd68f211fb9362
|
/ravenframework/Models/PostProcessors/ValidationBase.py
|
87948f36cee3302e03f32b1eb54481c23e6470e3
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
idaholab/raven
|
39cdce98ad916c638399232cdc01a9be00e200a2
|
2b16e7aa3325fe84cab2477947a951414c635381
|
refs/heads/devel
| 2023-08-31T08:40:16.653099
| 2023-08-29T16:21:51
| 2023-08-29T16:21:51
| 85,989,537
| 201
| 126
|
Apache-2.0
| 2023-09-13T21:55:43
| 2017-03-23T19:29:27
|
C++
|
UTF-8
|
Python
| false
| false
| 10,133
|
py
|
ValidationBase.py
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on March 20, 2021
@author: alfoa, wangc
description: Validation Postprocessor Base class. It is aimed to
to represent a base class for any validation tecniques and processes
"""
#External Modules---------------------------------------------------------------
import numpy as np
import copy
import time
import xarray as xr
#External Modules End-----------------------------------------------------------
#Internal Modules---------------------------------------------------------------
from .PostProcessorReadyInterface import PostProcessorReadyInterface
from ...utils import utils, mathUtils, xmlUtils
from ...utils import InputData, InputTypes
from ... import DataObjects
from ... import MetricDistributor
#Internal Modules End-----------------------------------------------------------
class ValidationBase(PostProcessorReadyInterface):
"""
Validation class. It will apply the specified validation algorithms in
the models to a dataset, each specified algorithm's output can be loaded to
dataObject.
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, specs, InputData.ParameterInput, class to use for
specifying input of cls.
"""
## This will replace the lines above
specs = super(ValidationBase, cls).getInputSpecification()
preProcessorInput = InputData.parameterInputFactory("PreProcessor", contentType=InputTypes.StringType)
preProcessorInput.addParam("class", InputTypes.StringType)
preProcessorInput.addParam("type", InputTypes.StringType)
specs.addSub(preProcessorInput)
pivotParameterInput = InputData.parameterInputFactory("pivotParameter", contentType=InputTypes.StringType)
specs.addSub(pivotParameterInput)
featuresInput = InputData.parameterInputFactory("Features", contentType=InputTypes.StringListType)
specs.addSub(featuresInput)
targetsInput = InputData.parameterInputFactory("Targets", contentType=InputTypes.StringListType)
specs.addSub(targetsInput)
metricInput = InputData.parameterInputFactory("Metric", contentType=InputTypes.StringType)
metricInput.addParam("class", InputTypes.StringType)
metricInput.addParam("type", InputTypes.StringType)
specs.addSub(metricInput)
return specs
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
super().__init__()
self.printTag = 'POSTPROCESSOR VALIDATION'
self.pivotParameter = None ## default pivotParameter for HistorySet
self._type = None ## the type of library that are used for validation, i.e. DSS
# add assembly objects (and set up pointers)
self.PreProcessor = None ## Instance of PreProcessor, default is None
self.metrics = None ## Instance of Metric, default is None
self.dataType = ['static', 'dynamic'] # the type of data can be passed in (static aka PointSet, dynamic aka HistorySet) (if both are present the validation algorithm can work for both data types)
self.acceptableMetrics = [] # if not populated all types of metrics are accepted, otherwise list the metrics (see Probablistic.py for an example)
self.features = None # list of feature variables
self.targets = None # list of target variables
self.pivotValues = None # pivot values (present if dynamic == True)
self.addAssemblerObject('Metric', InputData.Quantity.zero_to_infinity)
self.addAssemblerObject('PreProcessor', InputData.Quantity.zero_to_infinity)
## dataset option
self.setInputDataType('xrDataset')
# swith to 'dict' when you are using dict as operations
# self.setInputDataType('dict')
# If you want to keep the input meta data, please pass True, otherwise False
self.keepInputMeta(False)
def _localWhatDoINeed(self):
"""
This method is a local mirror of the general whatDoINeed method.
It is implemented by the samplers that need to request special objects
@ In , None, None
@ Out, dict, dictionary of objects needed
"""
return {'internal':[(None,'jobHandler')]}
def _localGenerateAssembler(self,initDict):
"""Generates the assembler.
@ In, initDict, dict, init objects
@ Out, None
"""
self.jobHandler = initDict['internal']['jobHandler']
def _handleInput(self, paramInput):
"""
Function to handle the parsed paramInput for this class.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
super()._handleInput(paramInput)
# this loop set the pivot parameter (it could use paramInput.findFirst but we want to show how to add more paramters)
for child in paramInput.subparts:
if child.getName() == 'pivotParameter':
self.pivotParameter = child.value
elif child.getName() == 'Features':
self.features = child.value
elif child.getName() == 'Targets':
self.targets = child.value
if 'static' not in self.dataType and self.pivotParameter is None:
self.raiseAnError(IOError, "The validation algorithm '{}' is a dynamic model ONLY but no <pivotParameter> node has been inputted".format(self._type))
if not self.features:
self.raiseAnError(IOError, "XML node 'Features' is required but not provided")
def initialize(self, runInfo, inputs, initDict):
"""
Method to initialize the DataMining pp.
@ In, runInfo, dict, dictionary of run info (e.g. working dir, etc)
@ In, inputs, list, list of inputs
@ In, initDict, dict, dictionary with initialization options
@ Out, None
"""
super().initialize(runInfo, inputs, initDict)
if 'PreProcessor' in self.assemblerDict:
self.PreProcessor = self.assemblerDict['PreProcessor'][0][3]
if 'Metric' in self.assemblerDict:
metrics = [metric[3] for metric in self.assemblerDict['Metric']]
self.metrics = [MetricDistributor.factory.returnInstance('MetricDistributor', metric) for metric in metrics]
if len(inputs) > 1:
# if inputs > 1, check if the | is present to understand where to get the features and target
notStandard = [k for k in self.features + self.targets if "|" not in k]
if notStandard:
self.raiseAnError(IOError, "# Input Datasets/DataObjects > 1! features and targets must use the syntax DataObjectName|feature to be usable! Not standard features are: {}!".format(",".join(notStandard)))
# now lets check that the variables are in the dataobjects
if isinstance(inputs[0], DataObjects.DataSet):
do = [inp.name for inp in inputs]
if len(inputs) > 1:
allFound = [feat.split("|")[0].strip() in do for feat in self.features]
allFound += [targ.split("|")[0].strip() in do for targ in self.targets]
if not all(allFound):
self.raiseAnError(IOError, "Targets and Features are linked to DataObjects that have not been listed as inputs in the Step. Please check input!")
# check variables
for indx, dobj in enumerate(do):
variables = [var.split("|")[-1].strip() for var in (self.features + self.targets) if dobj in var]
if not utils.isASubset(variables,inputs[indx].getVars()):
self.raiseAnError(IOError, "The variables '{}' not found in input DataObjet '{}'!".format(",".join(list(set(list(inputs[indx].getVars())) - set(variables))), dobj))
if self.acceptableMetrics:
acceptable = [True if metric.estimator.isInstanceString(self.acceptableMetrics) else False for metric in self.metrics]
if not all(acceptable):
notAcceptable = [self.metrics[i].estimator.interfaceKind for i, x in enumerate(acceptable) if not x]
self.raiseAnError(IOError,
"The metrics '{}' are not acceptable for validation algorithm: '{}'".format(', '.join(notAcceptable), self.name))
def _getDataFromDataDict(self, datasets, var, names=None):
"""
Utility function to retrieve the data from dataDict
@ In, datasets, list, list of datasets (data1,data2,etc.) to search from.
@ In, names, list, optional, list of datasets names (data1,data2,etc.). If not present, the search will be done on the full list.
@ In, var, str, the variable to find (either in fromat dataobject|var or simply var)
@ Out, data, tuple(numpy.ndarray, xarray.DataArray or None), the retrived data (data, probability weights (None if not present))
"""
pw = None
if "|" in var and names is not None:
do, feat = var.split("|")
dat = datasets[do][feat]
else:
for doIndex, ds in enumerate(datasets):
if var in ds:
dat = ds[var]
break
if 'ProbabilityWeight-{}'.format(feat) in datasets[do]:
pw = datasets[do]['ProbabilityWeight-{}'.format(feat)].values
elif 'ProbabilityWeight' in datasets[do]:
pw = datasets[do]['ProbabilityWeight'].values
dim = len(dat.shape)
# (numRealizations, numHistorySteps) for MetricDistributor
dat = dat.values
if dim == 1:
# the following reshaping does not require a copy
dat.shape = (dat.shape[0], 1)
data = dat, pw
return data
# Each individual validation pp should implement their own run method.
# def run(self, input):
@staticmethod
def getDataSetName(ds):
"""
"""
datasetMeta = ds.attrs['DataSet'].getRoot()
name = xmlUtils.findPath(datasetMeta, 'general/datasetName').text
return name
|
b17055afd64655cf5fbb2f544747ba6761cfc10e
|
683ea88855eee3d09a8d9cc23aeda232b3257b66
|
/doc/conf.py
|
1a9fb0bb8c7c2076f251c7398c03eac1cdc81d92
|
[
"MIT"
] |
permissive
|
scott-griffiths/bitstring
|
edf63e8a7c9f2f318c1225d1ae8cd09a55d64730
|
f7b16b4879f7b328321648b62174cce966714a39
|
refs/heads/main
| 2023-08-18T21:35:13.514898
| 2023-08-17T20:50:58
| 2023-08-17T20:50:58
| 33,791,767
| 400
| 86
|
MIT
| 2023-08-29T16:48:37
| 2015-04-11T20:49:04
|
Python
|
UTF-8
|
Python
| false
| false
| 982
|
py
|
conf.py
|
# Configuration file for the Sphinx documentation builder.
#
import os
import time
import datetime
year = datetime.datetime.utcfromtimestamp(
int(os.environ.get('SOURCE_DATE_EPOCH', time.time()))
).year
project = 'bitstring'
copyright = f'2006 - {year}, Scott Griffiths'
author = 'Scott Griffiths'
release = '4.1'
extensions = []
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
root_doc = 'index'
add_function_parentheses = False
add_module_names = False
html_show_sphinx = False
html_static_path = ['_static']
html_css_files = ["custom.css"]
html_theme = 'piccolo_theme'
html_theme_options = {
# "banner_text": "New major version released. Requires Python 3.7 or later - see release notes for full details.",
# "banner_hiding": "permanent",
"show_theme_credit": False,
"globaltoc_maxdepth": 2,
"source_url": 'https://github.com/scott-griffiths/bitstring/',
}
html_logo = './bitstring_logo_small_white.png'
|
b2214a3a16f6d92bfe277477c565c959dca9e604
|
da1721d2783ea4d67ff4e73cee6eee71292f2ef7
|
/toontown/coghq/DistributedCogKart.py
|
0ab2b165fd85222947b9d5376a38794035b17017
|
[
"BSD-3-Clause"
] |
permissive
|
open-toontown/open-toontown
|
bbdeb1b7bf0fb2861eba2df5483738c0112090ca
|
464c2d45f60551c31397bd03561582804e760b4a
|
refs/heads/develop
| 2023-07-07T01:34:31.959657
| 2023-05-30T23:49:10
| 2023-05-30T23:49:10
| 219,221,570
| 143
| 104
|
BSD-3-Clause
| 2023-09-11T09:52:34
| 2019-11-02T22:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 17,454
|
py
|
DistributedCogKart.py
|
import math
from panda3d.core import CollisionSphere, CollisionNode, Vec3, Point3, deg2Rad
from direct.interval.IntervalGlobal import Sequence, Func, Parallel, ActorInterval, Wait, Parallel, LerpHprInterval, ProjectileInterval, LerpPosInterval
from direct.directnotify import DirectNotifyGlobal
from toontown.building import ElevatorConstants
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.safezone import DistributedGolfKart
from toontown.building import DistributedElevatorExt
from toontown.building import ElevatorConstants
from toontown.distributed import DelayDelete
from direct.showbase import PythonUtil
from toontown.building import BoardingGroupShow
class DistributedCogKart(DistributedElevatorExt.DistributedElevatorExt):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCogKart')
JumpOutOffsets = ((6.5, -2, -0.025),
(-6.5, -2, -0.025),
(3.75, 5, -0.025),
(-3.75, 5, -0.025))
def __init__(self, cr):
DistributedElevatorExt.DistributedElevatorExt.__init__(self, cr)
self.type = ElevatorConstants.ELEVATOR_COUNTRY_CLUB
self.kartModelPath = 'phase_12/models/bossbotHQ/Coggolf_cart3.bam'
self.leftDoor = None
self.rightDoor = None
self.fillSlotTrack = None
return
def generate(self):
DistributedElevatorExt.DistributedElevatorExt.generate(self)
self.loader = self.cr.playGame.hood.loader
if self.loader:
self.notify.debug('Loader has been loaded')
self.notify.debug(str(self.loader))
else:
self.notify.debug('Loader has not been loaded')
self.golfKart = render.attachNewNode('golfKartNode')
self.kart = loader.loadModel(self.kartModelPath)
self.kart.setPos(0, 0, 0)
self.kart.setScale(1)
self.kart.reparentTo(self.golfKart)
self.golfKart.reparentTo(self.loader.geom)
self.wheels = self.kart.findAllMatches('**/wheelNode*')
self.numWheels = self.wheels.getNumPaths()
def announceGenerate(self):
DistributedElevatorExt.DistributedElevatorExt.announceGenerate(self)
angle = self.startingHpr[0]
angle -= 90
radAngle = deg2Rad(angle)
unitVec = Vec3(math.cos(radAngle), math.sin(radAngle), 0)
unitVec *= 45.0
self.endPos = self.startingPos + unitVec
self.endPos.setZ(0.5)
dist = Vec3(self.endPos - self.enteringPos).length()
wheelAngle = dist / (4.8 * 1.4 * math.pi) * 360
self.kartEnterAnimateInterval = Parallel(LerpHprInterval(self.wheels[0], 5.0, Vec3(self.wheels[0].getH(), wheelAngle, self.wheels[0].getR())), LerpHprInterval(self.wheels[1], 5.0, Vec3(self.wheels[1].getH(), wheelAngle, self.wheels[1].getR())), LerpHprInterval(self.wheels[2], 5.0, Vec3(self.wheels[2].getH(), wheelAngle, self.wheels[2].getR())), LerpHprInterval(self.wheels[3], 5.0, Vec3(self.wheels[3].getH(), wheelAngle, self.wheels[3].getR())), name='CogKartAnimate')
trolleyExitTrack1 = Parallel(LerpPosInterval(self.golfKart, 5.0, self.endPos), self.kartEnterAnimateInterval, name='CogKartExitTrack')
self.trolleyExitTrack = Sequence(trolleyExitTrack1)
self.trolleyEnterTrack = Sequence(LerpPosInterval(self.golfKart, 5.0, self.startingPos, startPos=self.enteringPos))
self.closeDoors = Sequence(self.trolleyExitTrack, Func(self.onDoorCloseFinish))
self.openDoors = Sequence(self.trolleyEnterTrack)
def delete(self):
DistributedElevatorExt.DistributedElevatorExt.delete(self)
if hasattr(self, 'elevatorFSM'):
del self.elevatorFSM
def setBldgDoId(self, bldgDoId):
self.bldg = None
self.setupElevatorKart()
return
def setupElevatorKart(self):
collisionRadius = ElevatorConstants.ElevatorData[self.type]['collRadius']
self.elevatorSphere = CollisionSphere(0, 0, 0, collisionRadius)
self.elevatorSphere.setTangible(1)
self.elevatorSphereNode = CollisionNode(self.uniqueName('elevatorSphere'))
self.elevatorSphereNode.setIntoCollideMask(ToontownGlobals.WallBitmask)
self.elevatorSphereNode.addSolid(self.elevatorSphere)
self.elevatorSphereNodePath = self.getElevatorModel().attachNewNode(self.elevatorSphereNode)
self.elevatorSphereNodePath.hide()
self.elevatorSphereNodePath.reparentTo(self.getElevatorModel())
self.elevatorSphereNodePath.stash()
self.boardedAvIds = {}
self.finishSetup()
def setColor(self, r, g, b):
pass
def getElevatorModel(self):
return self.golfKart
def enterWaitEmpty(self, ts):
DistributedElevatorExt.DistributedElevatorExt.enterWaitEmpty(self, ts)
def exitWaitEmpty(self):
DistributedElevatorExt.DistributedElevatorExt.exitWaitEmpty(self)
def forceDoorsOpen(self):
pass
def forceDoorsClosed(self):
pass
def setPosHpr(self, x, y, z, h, p, r):
self.startingPos = Vec3(x, y, z)
self.enteringPos = Vec3(x, y, z - 10)
self.startingHpr = Vec3(h, 0, 0)
self.golfKart.setPosHpr(x, y, z, h, 0, 0)
def enterClosing(self, ts):
if self.localToonOnBoard:
elevator = self.getPlaceElevator()
if elevator:
elevator.fsm.request('elevatorClosing')
self.closeDoors.start(ts)
def enterClosed(self, ts):
self.forceDoorsClosed()
self.kartDoorsClosed(self.getZoneId())
def kartDoorsClosed(self, zoneId):
if self.localToonOnBoard:
hoodId = ZoneUtil.getHoodId(zoneId)
doneStatus = {'loader': 'suitInterior',
'where': 'suitInterior',
'hoodId': hoodId,
'zoneId': zoneId,
'shardId': None}
elevator = self.elevatorFSM
del self.elevatorFSM
elevator.signalDone(doneStatus)
return
def setCountryClubInteriorZone(self, zoneId):
if self.localToonOnBoard:
hoodId = self.cr.playGame.hood.hoodId
countryClubId = self.countryClubId
if bboard.has('countryClubIdOverride'):
countryClubId = bboard.get('countryClubIdOverride')
doneStatus = {'loader': 'cogHQLoader',
'where': 'countryClubInterior',
'how': 'teleportIn',
'zoneId': zoneId,
'countryClubId': self.countryClubId,
'hoodId': hoodId}
self.cr.playGame.getPlace().elevator.signalDone(doneStatus)
def setCountryClubInteriorZoneForce(self, zoneId):
place = self.cr.playGame.getPlace()
if place:
place.fsm.request('elevator', [self, 1])
hoodId = self.cr.playGame.hood.hoodId
countryClubId = self.countryClubId
if bboard.has('countryClubIdOverride'):
countryClubId = bboard.get('countryClubIdOverride')
doneStatus = {'loader': 'cogHQLoader',
'where': 'countryClubInterior',
'how': 'teleportIn',
'zoneId': zoneId,
'countryClubId': self.countryClubId,
'hoodId': hoodId}
if hasattr(place, 'elevator') and place.elevator:
place.elevator.signalDone(doneStatus)
else:
self.notify.warning("setMintInteriorZoneForce: Couldn't find playGame.getPlace().elevator, zoneId: %s" % zoneId)
else:
self.notify.warning("setCountryClubInteriorZoneForce: Couldn't find playGame.getPlace(), zoneId: %s" % zoneId)
def setCountryClubId(self, countryClubId):
self.countryClubId = countryClubId
def getZoneId(self):
return 0
def fillSlot(self, index, avId, wantBoardingShow = 0):
self.notify.debug('%s.fillSlot(%s, %s, ... %s)' % (self.doId,
index,
avId,
globalClock.getRealTime()))
request = self.toonRequests.get(index)
if request:
self.cr.relatedObjectMgr.abortRequest(request)
del self.toonRequests[index]
if avId == 0:
pass
elif avId not in self.cr.doId2do:
func = PythonUtil.Functor(self.gotToon, index, avId)
self.toonRequests[index] = self.cr.relatedObjectMgr.requestObjects([avId], allCallback=func)
elif not self.isSetup:
self.deferredSlots.append((index, avId, wantBoardingShow))
else:
if avId == base.localAvatar.getDoId():
place = base.cr.playGame.getPlace()
if not place:
return
elevator = self.getPlaceElevator()
if elevator == None:
place.fsm.request('elevator')
elevator = self.getPlaceElevator()
if not elevator:
return
self.localToonOnBoard = 1
if hasattr(localAvatar, 'boardingParty') and localAvatar.boardingParty:
localAvatar.boardingParty.forceCleanupInviteePanel()
localAvatar.boardingParty.forceCleanupInviterPanels()
if hasattr(base.localAvatar, 'elevatorNotifier'):
base.localAvatar.elevatorNotifier.cleanup()
cameraTrack = Sequence()
cameraTrack.append(Func(elevator.fsm.request, 'boarding', [self.getElevatorModel()]))
cameraTrack.append(Func(elevator.fsm.request, 'boarded'))
toon = self.cr.doId2do[avId]
toon.stopSmooth()
toon.wrtReparentTo(self.golfKart)
sitStartDuration = toon.getDuration('sit-start')
jumpTrack = self.generateToonJumpTrack(toon, index)
track = Sequence(jumpTrack, Func(toon.setAnimState, 'Sit', 1.0), Func(self.clearToonTrack, avId), name=toon.uniqueName('fillElevator'), autoPause=1)
if wantBoardingShow:
boardingTrack, boardingTrackType = self.getBoardingTrack(toon, index, True)
track = Sequence(boardingTrack, track)
if avId == base.localAvatar.getDoId():
cameraWaitTime = 2.5
if boardingTrackType == BoardingGroupShow.TRACK_TYPE_RUN:
cameraWaitTime = 0.5
cameraTrack = Sequence(Wait(cameraWaitTime), cameraTrack)
if self.canHideBoardingQuitBtn(avId):
track = Sequence(Func(localAvatar.boardingParty.groupPanel.disableQuitButton), track)
if avId == base.localAvatar.getDoId():
track = Parallel(cameraTrack, track)
track.delayDelete = DelayDelete.DelayDelete(toon, 'CogKart.fillSlot')
self.storeToonTrack(avId, track)
track.start()
self.fillSlotTrack = track
self.boardedAvIds[avId] = None
return
def generateToonJumpTrack(self, av, seatIndex):
av.pose('sit', 47)
hipOffset = av.getHipsParts()[2].getPos(av)
def getToonJumpTrack(av, seatIndex):
def getJumpDest(av = av, node = self.golfKart):
dest = Point3(0, 0, 0)
if hasattr(self, 'golfKart') and self.golfKart:
dest = Vec3(self.golfKart.getPos(av.getParent()))
seatNode = self.golfKart.find('**/seat' + str(seatIndex + 1))
dest += seatNode.getPos(self.golfKart)
dna = av.getStyle()
dest -= hipOffset
if seatIndex < 2:
dest.setY(dest.getY() + 2 * hipOffset.getY())
dest.setZ(dest.getZ() + 0.1)
else:
self.notify.warning('getJumpDestinvalid golfKart, returning (0,0,0)')
return dest
def getJumpHpr(av = av, node = self.golfKart):
hpr = Point3(0, 0, 0)
if hasattr(self, 'golfKart') and self.golfKart:
hpr = self.golfKart.getHpr(av.getParent())
if seatIndex < 2:
hpr.setX(hpr.getX() + 180)
else:
hpr.setX(hpr.getX())
angle = PythonUtil.fitDestAngle2Src(av.getH(), hpr.getX())
hpr.setX(angle)
else:
self.notify.warning('getJumpHpr invalid golfKart, returning (0,0,0)')
return hpr
toonJumpTrack = Parallel(ActorInterval(av, 'jump'), Sequence(Wait(0.43), Parallel(LerpHprInterval(av, hpr=getJumpHpr, duration=0.9), ProjectileInterval(av, endPos=getJumpDest, duration=0.9))))
return toonJumpTrack
def getToonSitTrack(av):
toonSitTrack = Sequence(ActorInterval(av, 'sit-start'), Func(av.loop, 'sit'))
return toonSitTrack
toonJumpTrack = getToonJumpTrack(av, seatIndex)
toonSitTrack = getToonSitTrack(av)
jumpTrack = Sequence(Parallel(toonJumpTrack, Sequence(Wait(1), toonSitTrack)))
return jumpTrack
def emptySlot(self, index, avId, bailFlag, timestamp, timeSent = 0):
if self.fillSlotTrack:
self.fillSlotTrack.finish()
self.fillSlotTrack = None
if avId == 0:
pass
elif not self.isSetup:
newSlots = []
for slot in self.deferredSlots:
if slot[0] != index:
newSlots.append(slot)
self.deferredSlots = newSlots
elif avId in self.cr.doId2do:
if bailFlag == 1 and hasattr(self, 'clockNode'):
if timestamp < self.countdownTime and timestamp >= 0:
self.countdown(self.countdownTime - timestamp)
else:
self.countdown(self.countdownTime)
toon = self.cr.doId2do[avId]
toon.stopSmooth()
sitStartDuration = toon.getDuration('sit-start')
jumpOutTrack = self.generateToonReverseJumpTrack(toon, index)
track = Sequence(jumpOutTrack, Func(self.notifyToonOffElevator, toon), Func(self.clearToonTrack, avId), name=toon.uniqueName('emptyElevator'), autoPause=1)
if self.canHideBoardingQuitBtn(avId):
track.append(Func(localAvatar.boardingParty.groupPanel.enableQuitButton))
track.append(Func(localAvatar.boardingParty.enableGoButton))
track.delayDelete = DelayDelete.DelayDelete(toon, 'CogKart.emptySlot')
self.storeToonTrack(toon.doId, track)
track.start()
if avId == base.localAvatar.getDoId():
messenger.send('exitElevator')
if avId in self.boardedAvIds:
del self.boardedAvIds[avId]
else:
self.notify.warning('toon: ' + str(avId) + " doesn't exist, and" + ' cannot exit the elevator!')
return
def generateToonReverseJumpTrack(self, av, seatIndex):
self.notify.debug('av.getH() = %s' % av.getH())
def getToonJumpTrack(av, destNode):
def getJumpDest(av = av, node = destNode):
dest = node.getPos(av.getParent())
dest += Vec3(*self.JumpOutOffsets[seatIndex])
return dest
def getJumpHpr(av = av, node = destNode):
hpr = node.getHpr(av.getParent())
hpr.setX(hpr.getX() + 180)
angle = PythonUtil.fitDestAngle2Src(av.getH(), hpr.getX())
hpr.setX(angle)
return hpr
toonJumpTrack = Parallel(ActorInterval(av, 'jump'), Sequence(Wait(0.1), Parallel(ProjectileInterval(av, endPos=getJumpDest, duration=0.9))))
return toonJumpTrack
toonJumpTrack = getToonJumpTrack(av, self.golfKart)
jumpTrack = Sequence(toonJumpTrack, Func(av.loop, 'neutral'), Func(av.wrtReparentTo, render))
return jumpTrack
def startCountdownClock(self, countdownTime, ts):
DistributedElevatorExt.DistributedElevatorExt.startCountdownClock(self, countdownTime, ts)
self.clock.setH(self.clock.getH() + 180)
def rejectBoard(self, avId, reason = 0):
print('rejectBoard %s' % reason)
if hasattr(base.localAvatar, 'elevatorNotifier'):
if reason == ElevatorConstants.REJECT_SHUFFLE:
base.localAvatar.elevatorNotifier.showMe(TTLocalizer.ElevatorHoppedOff)
elif reason == ElevatorConstants.REJECT_MINLAFF:
base.localAvatar.elevatorNotifier.showMe(TTLocalizer.KartMinLaff % self.minLaff)
elif reason == ElevatorConstants.REJECT_PROMOTION:
base.localAvatar.elevatorNotifier.showMe(TTLocalizer.BossElevatorRejectMessage)
elif reason == ElevatorConstants.REJECT_NOT_YET_AVAILABLE:
base.localAvatar.elevatorNotifier.showMe(TTLocalizer.NotYetAvailable)
doneStatus = {'where': 'reject'}
elevator = self.getPlaceElevator()
if elevator:
elevator.signalDone(doneStatus)
def getDestName(self):
if self.countryClubId == ToontownGlobals.BossbotCountryClubIntA:
return TTLocalizer.ElevatorBossBotCourse0
elif self.countryClubId == ToontownGlobals.BossbotCountryClubIntB:
return TTLocalizer.ElevatorBossBotCourse1
elif self.countryClubId == ToontownGlobals.BossbotCountryClubIntC:
return TTLocalizer.ElevatorBossBotCourse2
|
5e42922ccf2eedd4e1f80ed1c1f3651caff0edbc
|
a133a7c64f6e08def0f936898466990d1fd1b31f
|
/atomate/vasp/analysis/linear_response.py
|
41dde3a7cc231a0be9cf1407aeb00679456c9418
|
[
"LicenseRef-scancode-hdf5",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
hackingmaterials/atomate
|
a6458f9323b8f14d7b4ebb6558fb578d50a3f1ed
|
f4060e55ae3a22289fde9516ff0e8e4ac1d22190
|
refs/heads/main
| 2023-08-07T21:53:24.701157
| 2023-07-25T22:28:06
| 2023-07-25T22:28:06
| 43,023,379
| 217
| 173
|
NOASSERTION
| 2023-08-25T22:09:48
| 2015-09-23T19:53:55
|
Python
|
UTF-8
|
Python
| false
| false
| 17,723
|
py
|
linear_response.py
|
import numpy as np
from pymatgen.analysis.magnetism import CollinearMagneticStructureAnalyzer
from atomate.utils.utils import get_logger
logger = get_logger(__name__)
def procure_response_dict(
struct_final,
num_perturb_sites,
incar_dict,
outcar_dict,
inv_block_dict,
response_dict,
perturb_dict,
rkey,
# keys,
ldaul_vals,
analyzer_gs,
calcs_skipped,
):
"""
Function to gather response data, in preparation for linear regression.
This data is organized into `response_dict`.
"""
# perform magnetic ordering analysis
analyzer_output = CollinearMagneticStructureAnalyzer(struct_final, threshold=0.61)
magnet_order = analyzer_output.ordering.value
# if rkey == keys[0]: # store ground state ordering
# magnet_order_gs = magnet_order
# check if ordering matches ground state configuration
if analyzer_gs:
if not analyzer_gs.matches_ordering(struct_final):
# use_calc = False
calcs_skipped.append(
{
"ICHARG": incar_dict.get("ICHARG", 0),
"ISPIN": incar_dict.get("ISPIN", 1),
"LDAUU": incar_dict["LDAUU"].copy(),
"LDAUJ": incar_dict["LDAUJ"].copy(),
}
)
for i in range(num_perturb_sites):
specie = struct_final[i].specie
ldaul = ldaul_vals[i]
orbital = inv_block_dict[str(ldaul)]
perturb_dict.update({f"site{i}": {"specie": str(specie), "orbital": orbital}})
# Obtain occupancy values
n_tot = float(outcar_dict["charge"][i][orbital])
# FIXME: Adapt for noncollinear
m_z = float(outcar_dict["magnetization"][i][orbital])
n_up = 0.5 * (n_tot + m_z)
n_dn = 0.5 * (n_tot - m_z)
v_up = float(incar_dict["LDAUU"][i])
v_dn = float(incar_dict["LDAUJ"][i])
response_dict[rkey][f"site{i}"]["Nup"].append(n_up)
response_dict[rkey][f"site{i}"]["Ndn"].append(n_dn)
response_dict[rkey][f"site{i}"]["Ntot"].append(n_tot)
response_dict[rkey][f"site{i}"]["Mz"].append(m_z)
response_dict[rkey][f"site{i}"]["Vup"].append(v_up)
response_dict[rkey][f"site{i}"]["Vdn"].append(v_dn)
response_dict[rkey]["magnetic order"].append(magnet_order)
def response_fit(x, y):
"""
Function for fitting to response data. Returns: slope and associated error
"""
(p, pcov) = np.polyfit(x, y, 1, cov=True)
perr = np.sqrt(np.diag(pcov))
return p, perr
def response_fit_stepped(x, y, tol=1.0e-6):
"""
Function for fitting to response data
- includes the "slope ~ zero" case for stepped data due to low precision
Returns: slope and associated error
"""
is_stepped = False
step_id = -1
y_sort = [y for y, _ in sorted(zip(y, x))]
x_sort = [x for _, x in sorted(zip(y, x))]
buff_size = 1 # must be gte three for first-order fit
for i in range(buff_size, len(y_sort) - buff_size + 1):
if np.std(y_sort[0:i]) < tol and np.std(y_sort[i:]) < tol:
is_stepped = True
step_id = i
break
if is_stepped:
buff_max = 3 # must be >= three for first-order fit
if step_id < buff_max:
(p, perr) = response_fit(x[step_id:], y[step_id:])
elif step_id > (len(y) - buff_max):
(p, perr) = response_fit(x[0:step_id], y[0:step_id])
else:
(p1, p1err) = response_fit(x_sort[0:step_id], y_sort[0:step_id])
(p2, p2err) = response_fit(x_sort[step_id:], y_sort[step_id:])
p = 0.5 * (np.array(p1) + np.array(p2))
perr = np.sqrt(0.5 * (np.array(p1err) ** 2 + np.array(p2err) ** 2))
return p, perr
else:
(p, perr) = response_fit(x, y)
return p, perr
def obtain_response_matrices(
n_response,
spin_polarized,
response_dict,
keys,
):
"""
Function to compute self-consistent (SCF) and non-self-consistent (NSCF)
linear response "chi" matrices; In addition to using linear regression
to compute slopes about zero potential, the uncertainty associated
with these values are also stored for subsequent error quantification.
Returns: chi_matrix_nscf, chi_matrix_scf, chi_nscf_err, chi_scf_err
"""
# Matrices for self-consistent and non-self-consistent responses
# & associated element-wise errors
chi_matrix_nscf = np.zeros([n_response, n_response])
chi_matrix_scf = np.zeros([n_response, n_response])
chi_nscf_err = np.zeros([n_response, n_response])
chi_scf_err = np.zeros([n_response, n_response])
# Compute response matrices using fitting function
for ii in range(n_response):
for jj in range(n_response):
if spin_polarized:
i, j = ii // 2, jj // 2
si, sj = (
"up" if np.mod(ii, 2) == 0 else "dn",
"up" if np.mod(jj, 2) == 0 else "dn",
)
v_key = "V" + sj
n_key = "N" + si
else:
i, j = ii, jj
v_key = "Vup"
n_key = "Ntot"
if (
response_dict[keys[1]][f"site{i}"][n_key]
and response_dict[keys[2]][f"site{i}"][n_key]
and response_dict[keys[1]][f"site{j}"][v_key]
and response_dict[keys[2]][f"site{j}"][v_key]
):
# gather NSCF & SCF response data
v_nscf, n_nscf = [], []
v_scf, n_scf = [], []
for ll in [1, 2]:
for idx in range(len(response_dict[keys[ll]][f"site{j}"][v_key])):
v = response_dict[keys[ll]][f"site{j}"][v_key][idx]
n = response_dict[keys[ll]][f"site{i}"][n_key][idx]
# order = response_dict[keys[ll]]["magnetic order"][l]
# if order == magnet_order_gs:
isolated_response = v != 0.0
if isolated_response:
if ll == 1:
v_nscf.append(v)
n_nscf.append(n)
elif ll == 2:
v_scf.append(v)
n_scf.append(n)
# Add ground state
if (
response_dict[keys[0]][f"site{j}"][v_key]
and response_dict[keys[0]][f"site{i}"][n_key]
):
v = response_dict[keys[0]][f"site{j}"][v_key][0]
n = response_dict[keys[0]][f"site{i}"][n_key][0]
v_nscf.append(v)
n_nscf.append(n)
v_scf.append(v)
n_scf.append(n)
try:
fit_nscf = response_fit(v_nscf, n_nscf)
chi_nscf, err_chi_nscf = fit_nscf[0][-2], fit_nscf[1][-2]
fit_scf = response_fit(v_scf, n_scf)
chi_scf, err_chi_scf = fit_scf[0][-2], fit_scf[1][-2]
except Exception as exc:
chi_nscf, err_chi_nscf = float("nan"), float("nan")
chi_scf, err_chi_scf = float("nan"), float("nan")
logger.warning("Slope fitting fail", exc)
else:
chi_nscf, err_chi_nscf = float("nan"), float("nan")
chi_scf, err_chi_scf = float("nan"), float("nan")
chi_matrix_nscf[ii, jj] = chi_nscf
chi_matrix_scf[ii, jj] = chi_scf
chi_nscf_err[ii, jj] = err_chi_nscf
chi_scf_err[ii, jj] = err_chi_scf
return chi_matrix_nscf, chi_matrix_scf, chi_nscf_err, chi_scf_err
def inverse_matrix_uncertainty(matrix, matrix_covar):
"""
Function to compute the element-wise error propagation in matrix inversion
"""
m, n = matrix.shape
if m != n:
logger.warning("Matrix dimension error")
return float("nan") * matrix, float("nan") * matrix
matrixinv = np.linalg.inv(matrix)
matrixinv_var = np.zeros([m, n])
if m == 1 and n == 1:
jacobian = -1 / matrix[0, 0] ** 2
jacobians = [[jacobian]]
sigma_f = jacobian * matrix_covar[0, 0] * jacobian
matrixinv_var[0, 0] = sigma_f
return matrixinv, matrixinv_var, jacobians
# Function to determine the symbolic partial derivative of the
# determinant w.r.t. matrix element
def det_deriv(matrix, i, j):
mij = np.delete(np.delete(matrix, i, 0), j, 1)
partial = (-1) ** (i + j) * np.linalg.det(mij)
return partial
# Jacobians of each element of matrix inversion w.r.t.
# original matrix elements
jacobians = [[] for i in range(m)]
det = np.linalg.det(matrix)
for idx in range(m):
for jdx in range(n):
mji = np.delete(np.delete(matrix, jdx, 0), idx, 1)
minor = (-1) ** (idx + jdx) * np.linalg.det(mji)
j_matrix = np.zeros([m, n])
for kdx in range(m):
for ldx in range(n):
det_p = det_deriv(matrix, kdx, ldx)
if kdx == jdx or ldx == idx:
minor_p = 0.0
else:
kk, ll = (
kdx - 1 if kdx > jdx else kdx,
ldx - 1 if ldx > idx else ldx,
)
minor_p = (-1) ** (idx + jdx) * det_deriv(mji, kk, ll)
j_matrix[kdx, ldx] = (minor_p * det - minor * det_p) / det**2
jacobians[idx].append(j_matrix)
j_vec = np.reshape(j_matrix, [m * n, 1])
sigma_f = np.sum(np.dot(np.transpose(j_vec), np.dot(matrix_covar, j_vec)))
matrixinv_var[idx, jdx] = sigma_f
return matrixinv, matrixinv_var, jacobians
def chi_inverse(chi, chi_err, method="full"):
"""
Function to compute inverse of response matrix and associated
element-wise uncertainty for point-wise, atom-wise,
and full matrix inversion
"""
n_response = len(chi)
chi_block = chi.copy()
chi_err_block = chi_err.copy()
if method == "point":
# diagonal 1x1
for ii in range(n_response):
for jj in range(n_response):
if ii != jj:
chi_block[ii, jj], chi_err_block[ii, jj] = 0.0, 0.0
elif method == "atom":
# 2x2 block diagonal
for ii in range(n_response):
for jj in range(n_response):
i, j = ii // 2, jj // 2
if i != j:
chi_block[ii, jj], chi_err_block[ii, jj] = 0.0, 0.0
elif method != "full":
raise ValueError(
"Unsupported method, method must be point (diagonal 1x1 inversion), "
"atom (block 2x2 inverse), or full (full inverse)"
)
# Assume cross-covariances are zero
chi_covar = np.diag(np.reshape(chi_err_block**2, [n_response * n_response]))
(chi_inv, chi_inv_var, chi_inv_jacobs) = inverse_matrix_uncertainty(
chi_block, chi_covar
)
return chi_block, chi_inv, chi_inv_var, chi_inv_jacobs
def compute_u_pointwise(
site_index,
f_matrix,
f_matrix_err,
):
"""
Function to compute Hubbard U value using point-wise (diagonal) inversion,
in addition to the associated uncertainty value
- based on the study by Linscott et. al.
"""
i = site_index
umat = f_matrix[2 * i : 2 * (i + 1), 2 * i : 2 * (i + 1)]
umat_err = f_matrix_err[2 * i : 2 * (i + 1), 2 * i : 2 * (i + 1)]
uval = 0.5 * np.sum(np.diag(umat))
uval_err = 0.5 * np.sqrt(np.sum(np.diag(umat_err) ** 2))
return uval, uval_err
def compute_uj_simple_two_by_two(
site_index,
f_matrix,
f_matrix_err,
):
"""
Function to compute Hubbard U and Hund J values using simple 2x2 formula,
in addition to the associated uncertainty values
- based on the study by Linscott et. al.
"""
i = site_index
umat = f_matrix[2 * site_index : 2 * (i + 1), 2 * i : 2 * (i + 1)]
umat_err = f_matrix_err[2 * i : 2 * (i + 1), 2 * i : 2 * (i + 1)]
uval = 0.25 * np.sum(umat)
uval_err = 0.25 * np.sqrt(np.sum(umat_err**2))
jmat = np.array([[-1, 1], [1, -1]]) * umat.copy()
jmat_err = umat_err.copy()
jval = 0.25 * np.sum(jmat)
jval_err = 0.25 * np.sqrt(np.sum(jmat_err**2))
return uval, uval_err, jval, jval_err
def compute_uj_scaled_two_by_two(
site_index,
f_matrix,
f_matrix_err,
chi_matrix_scf,
chi_scf_err,
chi_matrix_nscf,
chi_nscf_err,
chi_scf_inv_jacobs,
chi_nscf_inv_jacobs,
):
"""
Function to compute Hubbard U and Hund J values using scaled 2x2 formula,
in addition to the associated uncertainty values
- based on the study by Linscott et. al.
"""
nx2 = 2 * site_index
np1x2 = nx2 + 2
# helpful functions for computing derivatives of "f" (Dyson) matrix
def fmat_deriv_scf(kk, ll, ik, il):
fd = chi_scf_inv_jacobs[nx2 + kk][nx2 + ll][nx2 + ik, nx2 + il]
return fd
def fmat_deriv_nscf(kk, ll, ik, il):
fd = -chi_nscf_inv_jacobs[nx2 + kk][nx2 + ll][nx2 + ik, nx2 + il]
return fd
fmat = f_matrix[nx2:np1x2, nx2:np1x2]
chi_sub_scf = chi_matrix_scf[nx2:np1x2, nx2:np1x2]
chi_sub_scf_err = chi_scf_err[nx2:np1x2, nx2:np1x2]
chi_sub_nscf_err = chi_nscf_err[nx2:np1x2, nx2:np1x2]
# compute U value and error
lam = (chi_sub_scf[0, 0] + chi_sub_scf[0, 1]) / (
chi_sub_scf[1, 0] + chi_sub_scf[1, 1]
)
lam_deriv = np.zeros([2, 2])
lam_deriv[0, 0] = 1.0 / (chi_sub_scf[1, 0] + chi_sub_scf[1, 1])
lam_deriv[0, 1] = lam_deriv[0, 0]
lam_deriv[1, 1] = -lam / (chi_sub_scf[1, 0] + chi_sub_scf[1, 1])
lam_deriv[1, 0] = lam_deriv[1, 1]
uval = (
0.5 * (lam * (fmat[0, 0] + fmat[1, 0]) + fmat[0, 1] + fmat[1, 1]) / (lam + 1.0)
)
uval_err = 0.0
# scf component
u_deriv_scf = np.zeros([2, 2])
for ik in [0, 1]:
for il in [0, 1]:
u_deriv_scf[ik, il] = (
0.5
/ (lam + 1.0)
* (
lam_deriv[ik, il] * (fmat[0, 0] + fmat[1, 0])
+ lam
* (fmat_deriv_scf(0, 0, ik, il) + fmat_deriv_scf(1, 0, ik, il))
+ fmat_deriv_scf(0, 1, ik, il)
+ fmat_deriv_scf(1, 1, ik, il)
)
- uval / (lam + 1.0) * lam_deriv[ik, il]
)
jacob_vec = np.reshape(u_deriv_scf, [4, 1])
uval_err = uval_err + np.sum(
np.dot(
np.transpose(jacob_vec),
np.dot(np.diag(np.reshape(chi_sub_scf_err**2, [4])), jacob_vec),
)
)
# nscf component
u_deriv_nscf = np.zeros([2, 2])
for ik in [0, 1]:
for il in [0, 1]:
u_deriv_nscf[ik, il] = (
0.5
/ (lam + 1.0)
* (
+lam
* (fmat_deriv_nscf(0, 0, ik, il) + fmat_deriv_nscf(1, 0, ik, il))
+ fmat_deriv_nscf(0, 1, ik, il)
+ fmat_deriv_nscf(1, 1, ik, il)
)
)
jacob_vec = np.reshape(u_deriv_nscf, [4, 1])
uval_err = uval_err + np.sum(
np.dot(
np.transpose(jacob_vec),
np.dot(np.diag(np.reshape(chi_sub_nscf_err**2, [4])), jacob_vec),
)
)
# compute std
uval_err = np.sqrt(uval_err)
# compute J value and error
lam = (chi_sub_scf[0, 0] - chi_sub_scf[0, 1]) / (
chi_sub_scf[1, 0] - chi_sub_scf[1, 1]
)
lam_deriv = np.zeros([2, 2])
lam_deriv[0, 0] = 1.0 / (chi_sub_scf[1, 0] - chi_sub_scf[1, 1])
lam_deriv[0, 1] = -lam_deriv[0, 0]
lam_deriv[1, 1] = lam / (chi_sub_scf[1, 0] - chi_sub_scf[1, 1])
lam_deriv[1, 0] = -lam_deriv[1, 1]
jval = (
-0.5 * (lam * (fmat[0, 0] - fmat[1, 0]) + fmat[0, 1] - fmat[1, 1]) / (lam - 1.0)
)
jval_err = 0.0
# scf component
j_deriv_scf = np.zeros([2, 2])
for ik in [0, 1]:
for il in [0, 1]:
u_deriv_scf[ik, il] = (
-0.5
/ (lam - 1.0)
* (
lam_deriv[ik, il] * (fmat[0, 0] - fmat[1, 0])
+ lam
* (fmat_deriv_scf(0, 0, ik, il) - fmat_deriv_scf(1, 0, ik, il))
+ fmat_deriv_scf(0, 1, ik, il)
- fmat_deriv_scf(1, 1, ik, il)
)
+ uval / (lam - 1.0) * lam_deriv[ik, il]
)
jacob_vec = np.reshape(j_deriv_scf, [4, 1])
jval_err = jval_err + np.sum(
np.dot(
np.transpose(jacob_vec),
np.dot(np.diag(np.reshape(chi_sub_scf_err**2, [4])), jacob_vec),
)
)
# nscf component
j_deriv_nscf = np.zeros([2, 2])
for ik in [0, 1]:
for il in [0, 1]:
j_deriv_nscf[ik, il] = (
-0.5
/ (lam - 1.0)
* (
+lam
* (fmat_deriv_nscf(0, 0, ik, il) - fmat_deriv_nscf(1, 0, ik, il))
+ fmat_deriv_nscf(0, 1, ik, il)
- fmat_deriv_nscf(1, 1, ik, il)
)
)
jacob_vec = np.reshape(j_deriv_nscf, [4, 1])
jval_err = jval_err + np.sum(
np.dot(
np.transpose(jacob_vec),
np.dot(np.diag(np.reshape(chi_sub_nscf_err**2, [4])), jacob_vec),
)
)
# compute std
jval_err = np.sqrt(jval_err)
return uval, uval_err, jval, jval_err
|
41177822558b8e70ff7d705c161494a7dfaffa11
|
43c870b240a807db330a7abf53758c1fef924432
|
/cloudkitty/orchestrator.py
|
87391411a720b6c5ae5c7cb39c36b01e1a410d79
|
[
"Apache-2.0"
] |
permissive
|
openstack/cloudkitty
|
fb3cb83d8d89b27efe64768862f07781a2ef3b5a
|
94630b97cd1fb4bdd9a638070ffbbe3625de8aa2
|
refs/heads/master
| 2023-08-31T15:10:44.524252
| 2023-08-23T16:27:35
| 2023-08-23T20:01:01
| 20,042,606
| 103
| 57
|
Apache-2.0
| 2017-11-29T15:39:28
| 2014-05-22T00:52:38
|
Python
|
UTF-8
|
Python
| false
| false
| 27,369
|
py
|
orchestrator.py
|
# Copyright 2014 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
from datetime import timedelta
import decimal
import functools
import hashlib
import multiprocessing
import random
import sys
import time
import cotyledon
import futurist
from futurist import waiters
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import uuidutils
from stevedore import driver
from tooz import coordination
from cloudkitty import collector
from cloudkitty import config # noqa
from cloudkitty import dataframe
from cloudkitty import extension_manager
from cloudkitty import messaging
from cloudkitty import storage
from cloudkitty import storage_state as state
from cloudkitty import utils as ck_utils
from cloudkitty.utils import tz as tzutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
orchestrator_opts = [
cfg.StrOpt(
'coordination_url',
secret=True,
help='Coordination backend URL',
default='file:///var/lib/cloudkitty/locks'),
cfg.IntOpt(
'max_workers',
default=multiprocessing.cpu_count(),
sample_default=4,
min=0,
help='Max number of workers to execute the rating process. Defaults '
'to the number of available CPU cores.'),
cfg.IntOpt(
'max_workers_reprocessing',
default=multiprocessing.cpu_count(),
min=0,
help='Max number of workers to execute the reprocessing. Defaults to '
'the number of available CPU cores.'),
cfg.IntOpt('max_threads',
# NOTE(peschk_l): This is the futurist default
default=multiprocessing.cpu_count() * 5,
sample_default=20,
min=1,
deprecated_name='max_greenthreads',
advanced=True,
help='Maximal number of threads to use per worker. Defaults to '
'5 times the number of available CPUs'),
]
CONF.register_opts(orchestrator_opts, group='orchestrator')
CONF.import_opt('backend', 'cloudkitty.fetcher', 'fetcher')
FETCHERS_NAMESPACE = 'cloudkitty.fetchers'
PROCESSORS_NAMESPACE = 'cloudkitty.rating.processors'
COLLECTORS_NAMESPACE = 'cloudkitty.collector.backends'
STORAGES_NAMESPACE = 'cloudkitty.storage.backends'
def get_lock(coord, tenant_id):
name = hashlib.sha256(
("cloudkitty-"
+ str(tenant_id + '-')
+ str(CONF.collect.collector + '-')
+ str(CONF.fetcher.backend + '-')
+ str(CONF.collect.scope_key)).encode('ascii')).hexdigest()
return name, coord.get_lock(name.encode('ascii'))
class RatingEndpoint(object):
target = oslo_messaging.Target(namespace='rating',
version='1.0')
def __init__(self, orchestrator):
self._global_reload = False
self._pending_reload = []
self._module_state = {}
self._orchestrator = orchestrator
def get_reload_list(self):
lock = lockutils.lock('module-reload')
with lock:
reload_list = self._pending_reload
self._pending_reload = []
return reload_list
def get_module_state(self):
lock = lockutils.lock('module-state')
with lock:
module_list = self._module_state
self._module_state = {}
return module_list
def quote(self, ctxt, res_data):
LOG.debug('Received quote request [%s] from RPC.', res_data)
worker = APIWorker()
start = tzutils.localized_now()
end = tzutils.add_delta(start, timedelta(seconds=CONF.collect.period))
# Need to prepare data to support the V2 processing format
usage = {}
for k in res_data['usage']:
all_data_points_for_metric = []
all_quote_data_entries = res_data['usage'][k]
for p in all_quote_data_entries:
vol = p['vol']
desc = p.get('desc', {})
data_point = dataframe.DataPoint(
vol['unit'],
vol['qty'],
0,
desc.get('groupby', []),
desc.get('metadata', []),
)
all_data_points_for_metric.append(data_point)
usage[k] = all_data_points_for_metric
frame = dataframe.DataFrame(
start=start,
end=end,
usage=usage,
)
quote_result = worker.quote(frame)
LOG.debug("Quote result [%s] for input data [%s].",
quote_result, res_data)
return str(quote_result)
def reload_modules(self, ctxt):
LOG.info('Received reload modules command.')
lock = lockutils.lock('module-reload')
with lock:
self._global_reload = True
def reload_module(self, ctxt, name):
LOG.info('Received reload command for module %s.', name)
lock = lockutils.lock('module-reload')
with lock:
if name not in self._pending_reload:
self._pending_reload.append(name)
def enable_module(self, ctxt, name):
LOG.info('Received enable command for module %s.', name)
lock = lockutils.lock('module-state')
with lock:
self._module_state[name] = True
def disable_module(self, ctxt, name):
LOG.info('Received disable command for module %s.', name)
lock = lockutils.lock('module-state')
with lock:
self._module_state[name] = False
if name in self._pending_reload:
self._pending_reload.remove(name)
class ScopeEndpoint(object):
target = oslo_messaging.Target(version='1.0')
def __init__(self):
self._coord = coordination.get_coordinator(
CONF.orchestrator.coordination_url,
uuidutils.generate_uuid().encode('ascii'))
self._state = state.StateManager()
self._storage = storage.get_storage()
self._coord.start(start_heart=True)
def reset_state(self, ctxt, res_data):
LOG.info('Received state reset command. {}'.format(res_data))
random.shuffle(res_data['scopes'])
for scope in res_data['scopes']:
lock_name, lock = get_lock(self._coord, scope['scope_id'])
LOG.debug(
'[ScopeEndpoint] Trying to acquire lock "{}" ...'.format(
lock_name,
)
)
if lock.acquire(blocking=True):
LOG.debug(
'[ScopeEndpoint] Acquired lock "{}".'.format(
lock_name,
)
)
last_processed_timestamp = tzutils.dt_from_iso(
res_data['last_processed_timestamp'])
try:
self._storage.delete(
begin=last_processed_timestamp, end=None, filters={
scope['scope_key']: scope['scope_id']})
self._state.set_last_processed_timestamp(
scope['scope_id'],
last_processed_timestamp,
fetcher=scope['fetcher'],
collector=scope['collector'],
scope_key=scope['scope_key'],
)
finally:
lock.release()
LOG.debug(
'[ScopeEndpoint] Released lock "{}" .'.format(
lock_name,
)
)
class BaseWorker(object):
def __init__(self, tenant_id=None):
self._tenant_id = tenant_id
# Rating processors
self._processors = []
self._load_rating_processors()
def _load_rating_processors(self):
self._processors = []
processors = extension_manager.EnabledExtensionManager(
PROCESSORS_NAMESPACE,
invoke_kwds={'tenant_id': self._tenant_id})
for processor in processors:
self._processors.append(processor)
self._processors.sort(key=lambda x: x.obj.priority, reverse=True)
class APIWorker(BaseWorker):
def __init__(self, tenant_id=None):
super(APIWorker, self).__init__(tenant_id)
def quote(self, res_data):
quote_result = res_data
for processor in self._processors:
quote_result = processor.obj.quote(quote_result)
price = decimal.Decimal(0)
for _, point in quote_result.iterpoints():
price += point.price
return price
def _check_state(obj, period, tenant_id):
timestamp = obj._state.get_state(tenant_id)
return ck_utils.check_time_state(timestamp,
period,
CONF.collect.wait_periods)
class Worker(BaseWorker):
def __init__(self, collector, storage, tenant_id, worker_id):
super(Worker, self).__init__(tenant_id)
self._collector = collector
self._storage = storage
self._period = CONF.collect.period
self._wait_time = CONF.collect.wait_periods * self._period
self._worker_id = worker_id
self._log_prefix = '[scope: {scope}, worker: {worker}] '.format(
scope=self._tenant_id, worker=self._worker_id)
self._conf = ck_utils.load_conf(CONF.collect.metrics_conf)
self._state = state.StateManager()
self.next_timestamp_to_process = functools.partial(
_check_state, self, self._period, self._tenant_id)
super(Worker, self).__init__(self._tenant_id)
def _collect(self, metric, start_timestamp):
next_timestamp = tzutils.add_delta(
start_timestamp, timedelta(seconds=self._period))
name, data = self._collector.retrieve(
metric,
start_timestamp,
next_timestamp,
self._tenant_id
)
if not data:
raise collector.NoDataCollected(self._collector, metric)
return name, data
def _do_collection(self, metrics, timestamp):
def _get_result(metric):
try:
return self._collect(metric, timestamp)
except collector.NoDataCollected:
LOG.info(
self._log_prefix + 'No data collected '
'for metric {metric} at timestamp {ts}'.format(
metric=metric, ts=timestamp))
return metric, None
except Exception as e:
LOG.exception(
self._log_prefix + 'Error while collecting'
' metric {metric} at timestamp {ts}: {e}. Exiting.'.format(
metric=metric, ts=timestamp, e=e))
# FIXME(peschk_l): here we just exit, and the
# collection will be retried during the next collect
# cycle. In the future, we should implement a retrying
# system in workers
sys.exit(1)
return self._do_execute_collection(_get_result, metrics)
def _do_execute_collection(self, _get_result, metrics):
"""Execute the metric measurement collection
When executing this method a ZeroDivisionError might be raised.
This happens when no executions have happened in the
`futurist.ThreadPoolExecutor`; then, when calling the
`average_runtime`, the exception is thrown. In such a case, there is
no need for further actions, and we can ignore the error.
:param _get_result: the method to execute and get the metrics
:param metrics: the list of metrics to be collected
:return: the metrics measurements
"""
results = []
try:
with futurist.ThreadPoolExecutor(
max_workers=CONF.orchestrator.max_threads) as tpool:
futs = [tpool.submit(_get_result, metric)
for metric in metrics]
LOG.debug(self._log_prefix +
'Collecting [{}] metrics.'.format(metrics))
results = [r.result() for r in waiters.wait_for_all(futs).done]
log_message = self._log_prefix + \
"Collecting {} metrics took {}s total, with {}s average"
LOG.debug(log_message.format(tpool.statistics.executed,
tpool.statistics.runtime,
tpool.statistics.average_runtime))
except ZeroDivisionError as zeroDivisionError:
LOG.debug("Ignoring ZeroDivisionError for metrics [%s]: [%s].",
metrics, zeroDivisionError)
return dict(filter(lambda x: x[1] is not None, results))
def run(self):
should_continue_processing = self.execute_worker_processing()
while should_continue_processing:
should_continue_processing = self.execute_worker_processing()
def execute_worker_processing(self):
timestamp = self.next_timestamp_to_process()
LOG.debug("Processing timestamp [%s] for storage scope [%s].",
timestamp, self._tenant_id)
if not timestamp:
LOG.debug("Worker [%s] finished processing storage scope [%s].",
self._worker_id, self._tenant_id)
return False
if self._state.get_state(self._tenant_id):
if not self._state.is_storage_scope_active(self._tenant_id):
LOG.debug("Skipping processing for storage scope [%s] "
"because it is marked as inactive.",
self._tenant_id)
return False
else:
LOG.debug("No need to check if [%s] is de-activated. "
"We have never processed it before.")
self.do_execute_scope_processing(timestamp)
return True
def do_execute_scope_processing(self, timestamp):
metrics = list(self._collector.conf.keys())
# Collection
metrics = sorted(metrics)
usage_data = self._do_collection(metrics, timestamp)
LOG.debug("Usage data [%s] found for storage scope [%s] in "
"timestamp [%s].", usage_data, self._tenant_id,
timestamp)
start_time = timestamp
end_time = tzutils.add_delta(timestamp,
timedelta(seconds=self._period))
# No usage records found in
if not usage_data:
LOG.warning("No usage data for storage scope [%s] on "
"timestamp [%s]. You might want to consider "
"de-activating it.", self._tenant_id, timestamp)
else:
frame = self.execute_measurements_rating(end_time, start_time,
usage_data)
self.persist_rating_data(end_time, frame, start_time)
self.update_scope_processing_state_db(timestamp)
def persist_rating_data(self, end_time, frame, start_time):
LOG.debug("Persisting processed frames [%s] for scope [%s] and time "
"[start=%s,end=%s]", frame, self._tenant_id, start_time,
end_time)
self._storage.push([frame], self._tenant_id)
def execute_measurements_rating(self, end_time, start_time, usage_data):
frame = dataframe.DataFrame(
start=start_time,
end=end_time,
usage=usage_data,
)
for processor in self._processors:
original_data = copy.deepcopy(frame)
frame = processor.obj.process(frame)
LOG.debug("Results [%s] for processing [%s] of data points [%s].",
frame, processor.obj.process, original_data)
return frame
def update_scope_processing_state_db(self, timestamp):
self._state.set_state(self._tenant_id, timestamp)
class ReprocessingWorker(Worker):
def __init__(self, collector, storage, tenant_id, worker_id):
self.scope = tenant_id
self.scope_key = None
super(ReprocessingWorker, self).__init__(
collector, storage, self.scope.identifier, worker_id)
self.reprocessing_scheduler_db = state.ReprocessingSchedulerDb()
self.next_timestamp_to_process = self._next_timestamp_to_process
self.load_scope_key()
def load_scope_key(self):
scope_from_db = self._state.get_all(self._tenant_id)
if len(scope_from_db) < 1:
raise Exception("Scope [%s] scheduled for reprocessing does not "
"seem to exist anymore." % self.scope)
if len(scope_from_db) > 1:
raise Exception("Unexpected number of storage state entries found "
"for scope [%s]." % self.scope)
self.scope_key = scope_from_db[0].scope_key
def _next_timestamp_to_process(self):
db_item = self.reprocessing_scheduler_db.get_from_db(
identifier=self.scope.identifier,
start_reprocess_time=self.scope.start_reprocess_time,
end_reprocess_time=self.scope.end_reprocess_time)
if not db_item:
LOG.info("It seems that the processing for schedule [%s] was "
"finished by other worker.", self.scope)
return None
return ReprocessingWorker.generate_next_timestamp(
db_item, self._period)
@staticmethod
def generate_next_timestamp(db_item, processing_period_interval):
new_timestamp = db_item.start_reprocess_time
if db_item.current_reprocess_time:
period_delta = timedelta(seconds=processing_period_interval)
new_timestamp = db_item.current_reprocess_time + period_delta
LOG.debug("Current reprocessed time is [%s], therefore, the next "
"one to process is [%s] based on the processing "
"interval [%s].", db_item.start_reprocess_time,
new_timestamp, processing_period_interval)
else:
LOG.debug("There is no reprocessing for the schedule [%s]. "
"Therefore, we use the start time [%s] as the first "
"time to process.", db_item, new_timestamp)
if new_timestamp <= db_item.end_reprocess_time:
return tzutils.local_to_utc(new_timestamp)
else:
LOG.debug("No need to keep reprocessing schedule [%s] as we "
"processed all requested timestamps.", db_item)
return None
def do_execute_scope_processing(self, timestamp):
end_of_this_processing = timestamp + timedelta(seconds=self._period)
end_of_this_processing = tzutils.local_to_utc(end_of_this_processing)
LOG.debug("Cleaning backend [%s] data for reprocessing scope [%s] "
"for timeframe[start=%s, end=%s].",
self._storage, self.scope, timestamp, end_of_this_processing)
self._storage.delete(
begin=timestamp, end=end_of_this_processing,
filters={self.scope_key: self._tenant_id})
LOG.debug("Executing the reprocessing of scope [%s] for "
"timeframe[start=%s, end=%s].", self.scope, timestamp,
end_of_this_processing)
super(ReprocessingWorker, self).do_execute_scope_processing(timestamp)
def update_scope_processing_state_db(self, timestamp):
LOG.debug("After data is persisted in the storage backend [%s], we "
"will update the scope [%s] current processing time to "
"[%s].", self._storage, self.scope, timestamp)
self.reprocessing_scheduler_db.update_reprocessing_time(
identifier=self.scope.identifier,
start_reprocess_time=self.scope.start_reprocess_time,
end_reprocess_time=self.scope.end_reprocess_time,
new_current_time_stamp=timestamp)
class CloudKittyProcessor(cotyledon.Service):
def __init__(self, worker_id):
self._worker_id = worker_id
super(CloudKittyProcessor, self).__init__(self._worker_id)
self.tenants = []
self.fetcher = driver.DriverManager(
FETCHERS_NAMESPACE,
CONF.fetcher.backend,
invoke_on_load=True,
).driver
self.collector = collector.get_collector()
self.storage = storage.get_storage()
self._state = state.StateManager()
# RPC
self.server = None
self._rating_endpoint = RatingEndpoint(self)
self._scope_endpoint = ScopeEndpoint()
self._init_messaging()
# DLM
self.coord = coordination.get_coordinator(
CONF.orchestrator.coordination_url,
uuidutils.generate_uuid().encode('ascii'))
self.coord.start(start_heart=True)
self.next_timestamp_to_process = functools.partial(
_check_state, self, CONF.collect.period)
self.worker_class = Worker
self.log_worker_initiated()
def log_worker_initiated(self):
LOG.info("Processor worker ID [%s] is initiated as CloudKitty "
"rating processor.", self._worker_id)
def _init_messaging(self):
target = oslo_messaging.Target(topic='cloudkitty',
server=CONF.host,
version='1.0')
endpoints = [
self._rating_endpoint,
self._scope_endpoint,
]
self.server = messaging.get_server(target, endpoints)
self.server.start()
def process_messages(self):
# TODO(sheeprine): Code kept to handle threading and asynchronous
# reloading
# pending_reload = self._rating_endpoint.get_reload_list()
# pending_states = self._rating_endpoint.get_module_state()
pass
def run(self):
LOG.debug('Started worker {}.'.format(self._worker_id))
while True:
self.internal_run()
def terminate(self):
LOG.debug('Terminating worker {}.'.format(self._worker_id))
self.coord.stop()
LOG.debug('Terminated worker {}.'.format(self._worker_id))
def internal_run(self):
self.load_scopes_to_process()
for tenant_id in self.tenants:
lock_name, lock = get_lock(
self.coord, self.generate_lock_base_name(tenant_id))
LOG.debug('[Worker: {w}] Trying to acquire lock "{lock_name}".'
.format(w=self._worker_id, lock_name=lock_name))
lock_acquired = lock.acquire(blocking=False)
if lock_acquired:
LOG.debug('[Worker: {w}] Acquired lock "{lock_name}".'.format(
w=self._worker_id, lock_name=lock_name))
try:
self.process_scope(tenant_id)
finally:
lock.release()
LOG.debug("Finished processing scope [%s].", tenant_id)
else:
LOG.debug("Could not acquire lock [%s] for processing "
"scope [%s] with worker [%s].", lock_name,
tenant_id, self.worker_class)
LOG.debug("Finished processing all storage scopes with worker "
"[worker_id=%s, class=%s].",
self._worker_id, self.worker_class)
# FIXME(sheeprine): We may cause a drift here
time.sleep(CONF.collect.period)
def process_scope(self, scope_to_process):
timestamp = self.next_timestamp_to_process(scope_to_process)
LOG.debug("Next timestamp [%s] found for processing for "
"storage scope [%s].", state, scope_to_process)
if not timestamp:
LOG.debug("There is no next timestamp to process for scope [%s]",
scope_to_process)
return
worker = self.worker_class(
self.collector,
self.storage,
scope_to_process,
self._worker_id,
)
worker.run()
def generate_lock_base_name(self, tenant_id):
return tenant_id
def load_scopes_to_process(self):
self.tenants = self.fetcher.get_tenants()
random.shuffle(self.tenants)
LOG.info('[Worker: {w}] Tenants loaded for fetcher {f}'.format(
w=self._worker_id, f=self.fetcher.name))
class CloudKittyReprocessor(CloudKittyProcessor):
def __init__(self, worker_id):
super(CloudKittyReprocessor, self).__init__(worker_id)
self.next_timestamp_to_process = self._next_timestamp_to_process
self.worker_class = ReprocessingWorker
self.reprocessing_scheduler_db = state.ReprocessingSchedulerDb()
def log_worker_initiated(self):
LOG.info("Processor worker ID [%s] is initiated as CloudKitty "
"rating reprocessor.", self._worker_id)
def _next_timestamp_to_process(self, scope):
scope_db = self.reprocessing_scheduler_db.get_from_db(
identifier=scope.identifier,
start_reprocess_time=scope.start_reprocess_time,
end_reprocess_time=scope.end_reprocess_time)
if scope_db:
return ReprocessingWorker.generate_next_timestamp(
scope_db, CONF.collect.period)
else:
LOG.debug("It seems that the processing for schedule [%s] was "
"finished by other CloudKitty reprocessor.", scope)
return None
def load_scopes_to_process(self):
self.tenants = self.reprocessing_scheduler_db.get_all()
random.shuffle(self.tenants)
LOG.info('Reprocessing worker [%s] loaded [%s] schedules to process.',
self._worker_id, len(self.tenants))
def generate_lock_base_name(self, scope):
return "%s-id=%s-start=%s-end=%s-current=%s" % (
self.worker_class, scope.identifier, scope.start_reprocess_time,
scope.end_reprocess_time, scope.current_reprocess_time)
class CloudKittyServiceManager(cotyledon.ServiceManager):
def __init__(self):
super(CloudKittyServiceManager, self).__init__()
if CONF.orchestrator.max_workers:
self.cloudkitty_processor_service_id = self.add(
CloudKittyProcessor, workers=CONF.orchestrator.max_workers)
else:
LOG.info("No worker configured for CloudKitty processing.")
if CONF.orchestrator.max_workers_reprocessing:
self.cloudkitty_reprocessor_service_id = self.add(
CloudKittyReprocessor,
workers=CONF.orchestrator.max_workers_reprocessing)
else:
LOG.info("No worker configured for CloudKitty reprocessing.")
|
b87c8c7a96905f0a68077f62370c2c521bd41785
|
316e768ac2ba60fb393a8b914f5c761e077609d1
|
/archivebox/core/migrations/0002_auto_20200625_1521.py
|
4811282949cfded0214a3203456970bb8c0b83e1
|
[
"MIT",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
ArchiveBox/ArchiveBox
|
88fc98ac08800d9785d4333572627a7f354f3a43
|
73a5f74d3840284bceaabced9cf99575b8c15d54
|
refs/heads/dev
| 2023-09-03T15:31:13.265845
| 2023-08-31T22:17:45
| 2023-08-31T22:17:45
| 90,356,372
| 9,794
| 606
|
MIT
| 2023-09-04T05:04:41
| 2017-05-05T08:50:14
|
Python
|
UTF-8
|
Python
| false
| false
| 402
|
py
|
0002_auto_20200625_1521.py
|
# Generated by Django 3.0.7 on 2020-06-25 15:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='snapshot',
name='timestamp',
field=models.CharField(default=None, max_length=32, null=True),
),
]
|
ac7519d5b162791b861a9425baf1f5274bf77c43
|
d05ff6dda43729011b7d469b0a2bc02ed66b6342
|
/frappe/patches/v10_0/modify_smallest_currency_fraction.py
|
b86642e5f6fc2e33f203622784a7e28dbcd7c883
|
[
"MIT"
] |
permissive
|
frappe/frappe
|
520c14bed3810c3360629a81dcc33f0ebe21ac4d
|
dd8f314bf4a8a4739eebbfac741abc533ac58bc1
|
refs/heads/develop
| 2023-08-30T19:29:10.406706
| 2023-08-30T11:20:40
| 2023-08-30T11:20:40
| 1,864,194
| 5,955
| 3,735
|
MIT
| 2023-09-14T16:08:04
| 2011-06-08T08:14:16
|
Python
|
UTF-8
|
Python
| false
| false
| 213
|
py
|
modify_smallest_currency_fraction.py
|
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe
def execute():
frappe.db.set_value("Currency", "USD", "smallest_currency_fraction_value", "0.01")
|
1aea58145e88f08bb2a1b11b618fa479c0ef87e0
|
7860d9fba242d9bdcb7c06c32ee4064e4a7fa2f1
|
/litex_boards/platforms/qmtech_10cl006.py
|
c75d9781e5283dd6acdafdac7008cf149c642e11
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
litex-hub/litex-boards
|
ef1f200fd6d34c96621f4efa094ede874f4c34ab
|
b92c96b3a445fde31037f593a40fe621f85cb58c
|
refs/heads/master
| 2023-09-03T15:09:11.198560
| 2023-08-30T15:22:11
| 2023-08-30T15:22:11
| 191,191,221
| 291
| 283
|
BSD-2-Clause
| 2023-09-03T20:32:58
| 2019-06-10T15:09:10
|
Python
|
UTF-8
|
Python
| false
| false
| 5,476
|
py
|
qmtech_10cl006.py
|
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2021 Hans Baier <hansfbaier@gmail.com>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.altera import AlteraPlatform
from litex.build.altera.programmer import USBBlaster
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk
("clk50", 0, Pins("E1"), IOStandard("3.3-V LVTTL")),
# Button
("key", 0, Pins("F3"), IOStandard("3.3-V LVTTL")),
("key", 1, Pins("J6"), IOStandard("3.3-V LVTTL")),
# SPIFlash (W25Q64)
("spiflash", 0,
# clk
Subsignal("cs_n", Pins("D2")),
Subsignal("clk", Pins("H1")),
Subsignal("mosi", Pins("C1")),
Subsignal("miso", Pins("H2")),
IOStandard("3.3-V LVTTL"),
),
# SDR SDRAM
("sdram_clock", 0, Pins("P2"), IOStandard("3.3-V LVTTL")),
("sdram", 0,
Subsignal("a", Pins(
"R7 T7 R8 T8 R6 T5 R5 T4",
"R4 T3 T6 R3 T2")),
Subsignal("ba", Pins("N8 L8")),
Subsignal("cs_n", Pins("P8")),
Subsignal("cke", Pins("R1")),
Subsignal("ras_n", Pins("M8")),
Subsignal("cas_n", Pins("M7")),
Subsignal("we_n", Pins("P6")),
Subsignal("dq", Pins(
"K5 L3 L4 K6 N3 M6 P3 N5",
"N2 N1 L1 L2 K1 K2 J1 J2")),
Subsignal("dm", Pins("N6 P1")),
IOStandard("3.3-V LVTTL")
),
]
# The connectors are named after the daughterboard, not the core board
# because on the different core boards the names vary, but on the
# daughterboard they stay the same, which we need to connect the
# daughterboard peripherals to the core board.
# On this board J2 is U7 and J3 is U8
_connectors = [
("J2", {
# odd row even row
7: "G1", 8: "G2",
9: "D1", 10: "C2",
11: "B1", 12: "F5",
13: "D3", 14: "C3",
15: "B3", 16: "A3",
17: "B4", 18: "A4",
19: "E5", 20: "A2",
21: "D4", 22: "E6",
23: "C6", 24: "D6",
25: "B5", 26: "A5",
27: "B6", 28: "A6",
29: "B7", 30: "A7",
31: "D8", 32: "C8",
33: "D9", 34: "C9",
35: "B8", 36: "A8",
37: "B9", 38: "A9",
39: "E9", 40: "E8",
41: "E11", 42: "E10",
43: "A10", 44: "B10",
45: "D12", 46: "D11",
47: "B11", 48: "A11",
49: "B12", 50: "A12",
51: "B13", 52: "A13",
53: "B14", 54: "A14",
55: "D14", 56: "C14",
57: "B16", 58: "A15",
59: "C16", 60: "C15",
}),
("J3", {
# odd row even row
7: "R9", 8: "T9",
9: "R10", 10: "T10",
11: "R11", 12: "T11",
13: "R12", 14: "T12",
15: "N9", 16: "M9",
17: "M10", 18: "P9",
19: "P11", 20: "N11",
21: "R13", 22: "T13",
23: "T15", 24: "T14",
25: "N12", 26: "M11",
27: "R14", 28: "N13",
29: "N14", 30: "P14",
31: "P16", 32: "R16",
33: "N16", 34: "N15",
35: "M16", 36: "M15",
37: "L16", 38: "L15",
39: "P15", 40: "M12",
41: "L14", 42: "L13",
43: "K16", 44: "K15",
45: "K12", 46: "J12",
47: "J14", 48: "J13",
49: "K11", 50: "J11",
51: "G11", 52: "F11",
53: "F13", 54: "F14",
55: "F10", 56: "F9",
57: "E16", 58: "E15",
59: "D16", 60: "D15",
})
]
# Platform -----------------------------------------------------------------------------------------
class Platform(AlteraPlatform):
default_clk_name = "clk50"
default_clk_period = 1e9/50e6
core_resources = [
("user_led", 0, Pins("L9"), IOStandard("3.3-V LVTTL")),
("serial", 0,
# Compatible with cheap FT232 based cables (ex: Gaoominy 6Pin Ftdi Ft232Rl Ft232)
Subsignal("tx", Pins("J3:7"), IOStandard("3.3-V LVTTL")), # GPIO_07 (JP1 Pin 10)
Subsignal("rx", Pins("J3:8"), IOStandard("3.3-V LVTTL")) # GPIO_05 (JP1 Pin 8)
),
]
def __init__(self, toolchain="quartus", with_daughterboard=False):
device = "10CL006YU256C8G"
io = _io
connectors = _connectors
if with_daughterboard:
from litex_boards.platforms.qmtech_daughterboard import QMTechDaughterboard
daughterboard = QMTechDaughterboard(IOStandard("3.3-V LVTTL"))
io += daughterboard.io
connectors += daughterboard.connectors
else:
io += self.core_resources
AlteraPlatform.__init__(self, device, io, connectors, toolchain=toolchain)
if with_daughterboard:
# an ethernet pin takes K22, so make it available
self.add_platform_command("set_global_assignment -name CYCLONEII_RESERVE_NCEO_AFTER_CONFIGURATION \"USE AS REGULAR IO\"")
# Generate PLL clock in STA
self.toolchain.additional_sdc_commands.append("derive_pll_clocks")
# Calculates clock uncertainties
self.toolchain.additional_sdc_commands.append("derive_clock_uncertainty")
def create_programmer(self):
return USBBlaster()
def do_finalize(self, fragment):
AlteraPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk50", loose=True), 1e9/50e6)
|
30d796046327b46c656b499ea9dbeae0292ff6f0
|
93713f46f16f1e29b725f263da164fed24ebf8a8
|
/Library/lib/python3.7/site-packages/networkx/algorithms/sparsifiers.py
|
8dbfc80cd21c50aa03b485eddf95e1736a55fe11
|
[
"BSD-3-Clause"
] |
permissive
|
holzschu/Carnets
|
b83d15136d25db640cea023abb5c280b26a9620e
|
1ad7ec05fb1e3676ac879585296c513c3ee50ef9
|
refs/heads/master
| 2023-02-20T12:05:14.980685
| 2023-02-13T15:59:23
| 2023-02-13T15:59:23
| 167,671,526
| 541
| 36
|
BSD-3-Clause
| 2022-11-29T03:08:22
| 2019-01-26T09:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 10,075
|
py
|
sparsifiers.py
|
# Copyright (C) 2018
# Robert Gmyr <robert@gmyr.net>
# All rights reserved.
# BSD license.
"""Functions for computing sparsifiers of graphs."""
from __future__ import division
import math
import networkx as nx
from networkx.utils import not_implemented_for, py_random_state
__all__ = ['spanner']
@py_random_state(3)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def spanner(G, stretch, weight=None, seed=None):
"""Returns a spanner of the given graph with the given stretch.
A spanner of a graph G = (V, E) with stretch t is a subgraph
H = (V, E_S) such that E_S is a subset of E and the distance between
any pair of nodes in H is at most t times the distance between the
nodes in G.
Parameters
----------
G : NetworkX graph
An undirected simple graph.
stretch : float
The stretch of the spanner.
weight : object
The edge attribute to use as distance.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
NetworkX graph
A spanner of the given graph with the given stretch.
Raises
------
ValueError
If a stretch less than 1 is given.
Notes
-----
This function implements the spanner algorithm by Baswana and Sen,
see [1].
This algorithm is a randomized las vegas algorithm: The expected
running time is O(km) where k = (stretch + 1) // 2 and m is the
number of edges in G. The returned graph is always a spanner of the
given graph with the specified stretch. For weighted graphs the
number of edges in the spanner is O(k * n^(1 + 1 / k)) where k is
defined as above and n is the number of nodes in G. For unweighted
graphs the number of edges is O(n^(1 + 1 / k) + kn).
References
----------
[1] S. Baswana, S. Sen. A Simple and Linear Time Randomized
Algorithm for Computing Sparse Spanners in Weighted Graphs.
Random Struct. Algorithms 30(4): 532-563 (2007).
"""
if stretch < 1:
raise ValueError('stretch must be at least 1')
k = (stretch + 1) // 2
# initialize spanner H with empty edge set
H = nx.empty_graph()
H.add_nodes_from(G.nodes)
# phase 1: forming the clusters
# the residual graph has V' from the paper as its node set
# and E' from the paper as its edge set
residual_graph = _setup_residual_graph(G, weight)
# clustering is a dictionary that maps nodes in a cluster to the
# cluster center
clustering = {v: v for v in G.nodes}
sample_prob = math.pow(G.number_of_nodes(), - 1 / k)
size_limit = 2 * math.pow(G.number_of_nodes(), 1 + 1 / k)
i = 0
while i < k - 1:
# step 1: sample centers
sampled_centers = set()
for center in set(clustering.values()):
if seed.random() < sample_prob:
sampled_centers.add(center)
# combined loop for steps 2 and 3
edges_to_add = set()
edges_to_remove = set()
new_clustering = {}
for v in residual_graph.nodes:
if clustering[v] in sampled_centers:
continue
# step 2: find neighboring (sampled) clusters and
# lightest edges to them
lightest_edge_neighbor, lightest_edge_weight =\
_lightest_edge_dicts(residual_graph, clustering, v)
neighboring_sampled_centers =\
set(lightest_edge_weight.keys()) & sampled_centers
# step 3: add edges to spanner
if not neighboring_sampled_centers:
# connect to each neighboring center via lightest edge
for neighbor in lightest_edge_neighbor.values():
edges_to_add.add((v, neighbor))
# remove all incident edges
for neighbor in residual_graph.adj[v]:
edges_to_remove.add((v, neighbor))
else: # there is a neighboring sampled center
closest_center = min(neighboring_sampled_centers,
key=lightest_edge_weight.get)
closest_center_weight = lightest_edge_weight[closest_center]
closest_center_neighbor =\
lightest_edge_neighbor[closest_center]
edges_to_add.add((v, closest_center_neighbor))
new_clustering[v] = closest_center
# connect to centers with edge weight less than
# closest_center_weight
for center, edge_weight in lightest_edge_weight.items():
if edge_weight < closest_center_weight:
neighbor = lightest_edge_neighbor[center]
edges_to_add.add((v, neighbor))
# remove edges to centers with edge weight less than
# closest_center_weight
for neighbor in residual_graph.adj[v]:
neighbor_cluster = clustering[neighbor]
neighbor_weight = lightest_edge_weight[neighbor_cluster]
if neighbor_cluster == closest_center or neighbor_weight < closest_center_weight:
edges_to_remove.add((v, neighbor))
# check whether iteration added too many edges to spanner,
# if so repeat
if len(edges_to_add) > size_limit:
# an iteration is repeated O(1) times on expectation
continue
# iteration succeeded
i = i + 1
# actually add edges to spanner
for u, v in edges_to_add:
_add_edge_to_spanner(H, residual_graph, u, v, weight)
# actually delete edges from residual graph
residual_graph.remove_edges_from(edges_to_remove)
# copy old clustering data to new_clustering
for node, center in clustering.items():
if center in sampled_centers:
new_clustering[node] = center
clustering = new_clustering
# step 4: remove intra-cluster edges
for u in residual_graph.nodes:
for v in list(residual_graph.adj[u]):
if clustering[u] == clustering[v]:
residual_graph.remove_edge(u, v)
# update residual graph node set
for v in list(residual_graph.nodes):
if v not in clustering:
residual_graph.remove_node(v)
# phase 2: vertex-cluster joining
for v in residual_graph.nodes:
lightest_edge_neighbor, _ =\
_lightest_edge_dicts(residual_graph, clustering, v)
for neighbor in lightest_edge_neighbor.values():
_add_edge_to_spanner(H, residual_graph, v, neighbor, weight)
return H
def _setup_residual_graph(G, weight):
"""Setup residual graph as a copy of G with unique edges weights.
The node set of the residual graph corresponds to the set V' from
the Baswana-Sen paper and the edge set corresponds to the set E'
from the paper.
This function associates distinct weights to the edges of the
residual graph (even for unweighted input graphs), as required by
the algorithm.
Parameters
----------
G : NetworkX graph
An undirected simple graph.
weight : object
The edge attribute to use as distance.
Returns
-------
NetworkX graph
The residual graph used for the Baswana-Sen algorithm.
"""
residual_graph = G.copy()
# establish unique edge weights, even for unweighted graphs
for u, v in G.edges():
if not weight:
residual_graph[u][v]['weight'] = (id(u), id(v))
else:
residual_graph[u][v]['weight'] = (G[u][v][weight], id(u), id(v))
return residual_graph
def _lightest_edge_dicts(residual_graph, clustering, node):
"""Find the lightest edge to each cluster.
Searches for the minimum-weight edge to each cluster adjacent to
the given node.
Parameters
----------
residual_graph : NetworkX graph
The residual graph used by the Baswana-Sen algorithm.
clustering : dictionary
The current clustering of the nodes.
node : node
The node from which the search originates.
Returns
-------
lightest_edge_neighbor, lightest_edge_weight : dictionary, dictionary
lightest_edge_neighbor is a dictionary that maps a center C to
a node v in the corresponding cluster such that the edge from
the given node to v is the lightest edge from the given node to
any node in cluster. lightest_edge_weight maps a center C to the
weight of the aforementioned edge.
Notes
-----
If a cluster has no node that is adjacent to the given node in the
residual graph then the center of the cluster is not a key in the
returned dictionaries.
"""
lightest_edge_neighbor = {}
lightest_edge_weight = {}
for neighbor in residual_graph.adj[node]:
neighbor_center = clustering[neighbor]
weight = residual_graph[node][neighbor]['weight']
if neighbor_center not in lightest_edge_weight or\
weight < lightest_edge_weight[neighbor_center]:
lightest_edge_neighbor[neighbor_center] = neighbor
lightest_edge_weight[neighbor_center] = weight
return lightest_edge_neighbor, lightest_edge_weight
def _add_edge_to_spanner(H, residual_graph, u, v, weight):
"""Add the edge {u, v} to the spanner H and take weight from
the residual graph.
Parameters
----------
H : NetworkX graph
The spanner under construction.
residual_graph : NetworkX graph
The residual graph used by the Baswana-Sen algorithm. The weight
for the edge is taken from this graph.
u : node
One endpoint of the edge.
v : node
The other endpoint of the edge.
weight : object
The edge attribute to use as distance.
"""
H.add_edge(u, v)
if weight:
H[u][v][weight] = residual_graph[u][v]['weight'][0]
|
50c76724b4067613ae6281bed3ec2af1a22183bd
|
ee8a7d7e502c1152cc80906c698b0a64b8f2c17a
|
/tests/utils/test_fs.py
|
13d9b9098b4fb0e7357e7b9c9697cdbecb75af39
|
[
"BSD-3-Clause"
] |
permissive
|
datafolklabs/cement
|
0623378caf560e02121f5ca4b53621d1a98b0af0
|
b4e482aa4528c9dd9e999081a6c730180da1b108
|
refs/heads/main
| 2023-08-12T00:12:00.008601
| 2023-08-09T13:39:13
| 2023-08-09T13:39:13
| 398,515
| 944
| 125
|
BSD-3-Clause
| 2022-11-26T11:40:19
| 2009-12-05T04:04:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,007
|
py
|
test_fs.py
|
import os
from pytest import raises
from cement.utils import fs
def test_abspath(tmp):
path = fs.abspath('.')
assert path.startswith('/')
def test_join(tmp, rando):
full_path = os.path.abspath(os.path.join(tmp.dir, rando))
assert fs.join(tmp.dir, rando) == full_path
def test_join_exists(tmp, rando):
full_path = os.path.abspath(os.path.join(tmp.dir, rando))
res = fs.join_exists(tmp.dir, rando)
assert res[0] == full_path
assert res[1] is False
with open(full_path, 'w') as f:
f.write('data')
res = fs.join_exists(tmp.dir, rando)
assert res[1] is True
def test_ensure_dir_exists(tmp, rando):
fs.ensure_dir_exists(fs.join(tmp.dir, rando))
assert os.path.exists(fs.join(tmp.dir, rando))
with raises(AssertionError, match='(.*)exists but is not a directory(.*)'):
fs.ensure_dir_exists(tmp.file)
def test_ensure_parent_dir_exists(tmp, rando):
fs.ensure_parent_dir_exists(fs.join(tmp.dir, 'parent', rando))
assert os.path.exists(fs.join(tmp.dir, 'parent'))
def test_tmp(tmp, rando):
t1 = fs.Tmp()
assert os.path.exists(t1.dir)
assert os.path.exists(t1.file)
with fs.Tmp() as t2:
pass
assert not os.path.exists(t2.dir)
assert not os.path.exists(t2.file)
def test_backup(tmp):
bkfile = fs.backup(tmp.file)
assert "%s.bak" % os.path.basename(tmp.file) == os.path.basename(bkfile)
bkfile = fs.backup(tmp.file)
assert "%s.bak.0" % os.path.basename(tmp.file) == os.path.basename(bkfile)
bkfile = fs.backup(tmp.file)
assert "%s.bak.1" % os.path.basename(tmp.file) == os.path.basename(bkfile)
bkdir = fs.backup(tmp.dir)
assert "%s.bak" % os.path.basename(tmp.dir) == os.path.basename(bkdir)
assert fs.backup('someboguspath') is None
def test_backup_dir_trailing_slash(tmp):
# https://github.com/datafolklabs/cement/issues/610
bkdir = fs.backup("%s/" % tmp.dir)
assert "%s.bak" % os.path.basename(tmp.dir) == os.path.basename(bkdir)
|
7039eb946f9b894833334a2f76f5ba34b4158ada
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/core/timepattern.py
|
511fc6253bbf96126f0a6ed2b636701fe1ad601b
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 5,440
|
py
|
timepattern.py
|
# ---------------------------------------------------------------------
# Time Patterns DSL compiler
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
RC = re.compile
# Day of weeks declarations
DoW = ["mon", "tue", "wen", "thu", "fri", "sat", "sun"]
DoWRE = "(%s)" % ("|".join(DoW))
# Day part patterns
DAY_PATTERNS = [
(RC(r"^(\d{2})$"), lambda day: "(T.day == %d)" % int(day)),
(
RC(r"^(\d{2})-(\d{2})$"),
lambda from_day, to_day: "(%d <= T.day <= %d)" % (int(from_day), int(to_day)),
),
(
RC(r"^(\d{2})\.(\d{2})$"),
lambda day, month: "(T.day == %d and T.month == %d)" % (int(day), int(month)),
),
(
RC(r"^(\d{2})\.(\d{2})-(\d{2})\.(\d{2})$"),
lambda from_day, from_month, to_day, to_month: "('%s%s' <= ('%%02d%%02d' %% (T.month, T.day)) <= '%s%s')"
% (from_month, from_day, to_month, to_day),
),
(
RC(r"^(\d{2})\.(\d{2})\.(\d{4})$"),
lambda day, month, year: "(T.day == %d and T.month == %d and T.year == %d)"
% (int(day), int(month), int(year)),
),
(
RC(r"^(\d{2})\.(\d{2})\.(\d{4})-(\d{2})\.(\d{2})\.(\d{4})$"),
lambda from_day, from_month, from_year, to_day, to_month, to_year: "('%s%s%s' <= ('%%04d%%02d%%02d' %% (T.year, T.month, T.day)) <= '%s%s%s')"
% (from_year, from_month, from_day, to_year, to_month, to_day),
),
(
RC(r"^%s$" % DoWRE, re.IGNORECASE),
lambda dow: "(T.weekday() == %d)" % DoW.index(dow.lower()),
),
(
RC(r"^%s-%s$" % (DoWRE, DoWRE), re.IGNORECASE),
lambda from_dow, to_dow: "(%d <= T.weekday() <= %d)"
% (DoW.index(from_dow.lower()), DoW.index(to_dow)),
),
]
# Time part patterns
TIME_PATTERNS = [
(
RC(r"^(\d{2}):(\d{2})$"),
lambda hour, minute: "(T.hour == %d and T.minute == %d)" % (int(hour), int(minute)),
),
(
RC(r"^(\d{2}):(\d{2})-(\d{2}):(\d{2})$"),
lambda from_hour, from_minute, to_hour, to_minute: "(%d <= (T.hour * 60 + T.minute) <= %d)"
% (int(from_hour) * 60 + int(from_minute), int(to_hour) * 60 + int(to_minute)),
),
]
class TimePattern(object):
"""
>>> import datetime
>>> TimePattern("13").match(datetime.datetime(year=2005,month=3,day=13))
True
>>> TimePattern("02").match(datetime.datetime(year=2005,month=3,day=13))
False
>>> TimePattern("01-15").match(datetime.datetime(year=2005,month=3,day=13))
True
>>> TimePattern("01.03").match(datetime.datetime(year=2005,month=3,day=13))
False
>>> TimePattern("13.03").match(datetime.datetime(year=2005,month=3,day=13))
True
>>> TimePattern("01.03-02.04").match(datetime.datetime(year=2005,month=3,day=13))
True
>>> TimePattern("13.03.2005").match(datetime.datetime(year=2005,month=3,day=13))
True
>>> TimePattern("01.03.2005-15.03.2005").match(datetime.datetime(year=2005,month=3,day=13))
True
>>> TimePattern("sun").match(datetime.datetime(year=2005,month=3,day=13))
True
>>> TimePattern("fri").match(datetime.datetime(year=2005,month=3,day=13))
False
>>> TimePattern("fri-sun").match(datetime.datetime(year=2005,month=3,day=13))
True
>>> TimePattern("zho")
Traceback (most recent call last):
...
SyntaxError: Invalid expression 'zho'
>>> TimePattern(None).match(datetime.datetime(year=2005,month=3,day=13))
True
"""
def __init__(self, pattern):
self.code = compile(self.compile_to_python(pattern), "<string>", "eval")
def match(self, d):
"""
Check datetime object matches time pattern
:param d:
:return: Boolean result
"""
return eval(self.code, {"T": d})
@classmethod
def compile_to_python(cls, tp):
"""
Convert a string of a list of time pattern declarations
to the python expression
:param tp:
:return:
"""
def compile_pattern(P, p):
for l, r in P:
match = l.match(p)
if match:
return r(*match.groups())
raise SyntaxError("Invalid expression '%s'" % p)
if tp is None:
return "True"
if isinstance(tp, (list, tuple)):
if not tp:
return "True"
return "(%s)" % (" or ".join([cls.compile_to_python(p) for p in tp]))
tp = tp.strip()
if "|" in tp:
day_pattern, time_pattern = tp.split("|")
else:
day_pattern = tp
time_pattern = ""
dpl = " or ".join(
[compile_pattern(DAY_PATTERNS, x.strip()) for x in day_pattern.split(",") if x]
)
tpl = " or ".join(
[compile_pattern(TIME_PATTERNS, x.strip()) for x in time_pattern.split(",") if x]
)
x = " and ".join(["(%s)" % x for x in [dpl, tpl] if x])
if not x:
return "True"
else:
return x
class TimePatternList(object):
"""
Enclosure for a list of time patterns
"""
def __init__(self, patterns):
self.patterns = patterns
def match(self, d):
for tp in self.patterns:
if not tp.match(d):
return False
return True
|
2cb88eaf45bed47c03daa7fccc4eaea29b6b9572
|
4bcc9806152542ab43fc2cf47c499424f200896c
|
/tensorflow/python/tools/optimize_for_inference_test.py
|
f15c75c1556a51a9cccd58a86ed8a2d36f78dd67
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
tensorflow/tensorflow
|
906276dbafcc70a941026aa5dc50425ef71ee282
|
a7f3934a67900720af3d3b15389551483bee50b8
|
refs/heads/master
| 2023-08-25T04:24:41.611870
| 2023-08-25T04:06:24
| 2023-08-25T04:14:08
| 45,717,250
| 208,740
| 109,943
|
Apache-2.0
| 2023-09-14T20:55:50
| 2015-11-07T01:19:20
|
C++
|
UTF-8
|
Python
| false
| false
| 16,380
|
py
|
optimize_for_inference_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.graph_util."""
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import math_ops # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
from tensorflow.python.tools import optimize_for_inference_lib
class OptimizeForInferenceTest(test.TestCase):
def create_node_def(self, op, name, inputs):
new_node = node_def_pb2.NodeDef()
new_node.op = op
new_node.name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node
def create_constant_node_def(self, name, value, dtype, shape=None):
node = self.create_node_def("Const", name, [])
self.set_attr_dtype(node, "dtype", dtype)
self.set_attr_tensor(node, "value", value, dtype, shape)
return node
def set_attr_dtype(self, node, key, value):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(type=value.as_datatype_enum))
def set_attr_tensor(self, node, key, value, dtype, shape=None):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
value, dtype=dtype, shape=shape)))
def testOptimizeForInference(self):
self.maxDiff = 1000
unused_constant_name = "unused_constant"
unconnected_add_name = "unconnected_add"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
unused_output_add_name = "unused_output_add"
graph_def = graph_pb2.GraphDef()
unused_constant = self.create_constant_node_def(
unused_constant_name, value=0, dtype=dtypes.float32, shape=[])
graph_def.node.extend([unused_constant])
unconnected_add_node = self.create_node_def(
"Add", unconnected_add_name,
[unused_constant_name, unused_constant_name])
self.set_attr_dtype(unconnected_add_node, "T", dtypes.float32)
graph_def.node.extend([unconnected_add_node])
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = self.create_node_def("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = self.create_node_def(
"Identity", a_identity_name, [a_constant_name, "^" + a_check_name])
graph_def.node.extend([a_identity_node])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = self.create_node_def("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = self.create_node_def(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = self.create_node_def("Add", add_name,
[a_identity_name, b_identity_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
unused_output_add_node = self.create_node_def("Add", unused_output_add_name,
[add_name, b_constant_name])
self.set_attr_dtype(unused_output_add_node, "T", dtypes.float32)
graph_def.node.extend([unused_output_add_node])
expected_output = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = self.create_node_def("Add", add_name,
[a_constant_name, b_constant_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
output = optimize_for_inference_lib.optimize_for_inference(
graph_def, [], [add_name], dtypes.float32.as_datatype_enum)
self.assertProtoEquals(expected_output, output)
@test_util.run_deprecated_v1
def testFoldBatchNorms(self):
with self.cached_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 1, 6, 2], dtype=dtypes.float32)
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
conv_op = nn_ops.conv2d(
input_op, weights_op, [1, 1, 1, 1], padding="SAME", name="conv_op")
mean_op = constant_op.constant(
np.array([10, 20]), shape=[2], dtype=dtypes.float32)
variance_op = constant_op.constant(
np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32)
beta_op = constant_op.constant(
np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32)
gamma_op = constant_op.constant(
np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32)
test_util.set_producer_version(ops.get_default_graph(), 8)
gen_nn_ops._batch_norm_with_global_normalization(
conv_op,
mean_op,
variance_op,
beta_op,
gamma_op,
0.00001,
False,
name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fold_batch_norms(
original_graph_def)
with self.cached_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("BatchNormWithGlobalNormalization", node.op)
@test_util.run_deprecated_v1
def testFoldFusedBatchNorms(self):
for data_format, use_gpu, conv2d_func in [
("NHWC", False, nn_ops.conv2d), ("NCHW", True, nn_ops.conv2d),
("NHWC", False, nn_ops.depthwise_conv2d_native),
("NCHW", True, nn_ops.depthwise_conv2d_native)
]:
with self.cached_session(use_gpu=use_gpu) as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs),
shape=[1, 1, 6, 2] if data_format == "NHWC" else [1, 2, 1, 6],
dtype=dtypes.float32)
if conv2d_func == nn_ops.conv2d:
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
else:
weights = [1, 2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 1], dtype=dtypes.float32)
conv_op = conv2d_func(
input_op,
weights_op, [1, 1, 1, 1],
padding="SAME",
data_format=data_format,
name="conv_op")
mean_op = constant_op.constant(
np.array([10, 20]), shape=[2], dtype=dtypes.float32)
variance_op = constant_op.constant(
np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32)
beta_op = constant_op.constant(
np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32)
gamma_op = constant_op.constant(
np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32)
ops.get_default_graph().graph_def_versions.producer = 9
gen_nn_ops._fused_batch_norm(
conv_op,
gamma_op,
beta_op,
mean_op,
variance_op,
0.00001,
is_training=False,
data_format=data_format,
name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fold_batch_norms(
original_graph_def)
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(
original_result, optimized_result, rtol=1e-04, atol=1e-06)
for node in optimized_graph_def.node:
self.assertNotEqual("FusedBatchNorm", node.op)
@test_util.run_deprecated_v1
def testFoldFusedBatchNormsV3(self):
for data_format, conv2d_func in [("NHWC", nn_ops.conv2d),
("NCHW", nn_ops.conv2d),
("NHWC", nn_ops.depthwise_conv2d_native),
("NCHW", nn_ops.depthwise_conv2d_native)]:
with self.cached_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs),
shape=[1, 1, 6, 2] if data_format == "NHWC" else [1, 2, 1, 6],
dtype=dtypes.float32)
if conv2d_func == nn_ops.conv2d:
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
else:
weights = [1, 2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 1], dtype=dtypes.float32)
mean_op = constant_op.constant(
np.array([10, 20]), shape=[2], dtype=dtypes.float32)
variance_op = constant_op.constant(
np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32)
beta_op = constant_op.constant(
np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32)
gamma_op = constant_op.constant(
np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32)
ops.get_default_graph().graph_def_versions.producer = 9
conv_op = conv2d_func(
input_op,
weights_op, [1, 1, 1, 1],
padding="SAME",
data_format=data_format,
name="conv_op")
gen_nn_ops.fused_batch_norm_v3(
conv_op,
gamma_op,
beta_op,
mean_op,
variance_op,
0.00001,
is_training=False,
data_format=data_format,
name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fold_batch_norms(
original_graph_def)
with self.cached_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(
original_result, optimized_result, rtol=1e-04, atol=1e-06)
for node in optimized_graph_def.node:
self.assertNotEqual("FusedBatchNormV3", node.op)
@test_util.run_deprecated_v1
def testFuseResizePadAndConv(self):
with self.cached_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
resize_op = image_ops.resize_bilinear(
input_op, [12, 4], align_corners=False)
pad_op = array_ops.pad(resize_op, [[0, 0], [1, 1], [2, 2], [0, 0]],
mode="REFLECT")
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
nn_ops.conv2d(
pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
original_graph_def, ["output"])
with self.cached_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("Conv2D", node.op)
self.assertNotEqual("MirrorPad", node.op)
self.assertNotEqual("ResizeBilinear", node.op)
@test_util.run_deprecated_v1
def testFuseResizeAndConv(self):
with self.cached_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
resize_op = image_ops.resize_bilinear(
input_op, [12, 4], align_corners=False)
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
nn_ops.conv2d(
resize_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
original_graph_def, ["output"])
with self.cached_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("Conv2D", node.op)
self.assertNotEqual("MirrorPad", node.op)
@test_util.run_deprecated_v1
def testFusePadAndConv(self):
with self.cached_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
pad_op = array_ops.pad(input_op, [[0, 0], [1, 1], [2, 2], [0, 0]],
mode="REFLECT")
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
nn_ops.conv2d(
pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
original_graph_def, ["output"])
with self.cached_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("Conv2D", node.op)
self.assertNotEqual("ResizeBilinear", node.op)
if __name__ == "__main__":
test.main()
|
b1959cdd9c53aec546d6a875f343aeef321bc67f
|
b8bbdfc593b6d816e67a344f720f90ec05236778
|
/airflow/providers/amazon/aws/triggers/rds.py
|
ebc80ba700a2c2164be8bb0f1d8e56d3ab26ffa5
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
apache/airflow
|
ed78db0a8bab7e096990e143926e52f518e288ab
|
1b122c15030e99cef9d4ff26d3781a7a9d6949bc
|
refs/heads/main
| 2023-09-01T08:37:34.556097
| 2023-09-01T06:49:05
| 2023-09-01T06:49:05
| 33,884,891
| 22,756
| 11,558
|
Apache-2.0
| 2023-09-14T20:12:36
| 2015-04-13T18:04:58
|
Python
|
UTF-8
|
Python
| false
| false
| 10,084
|
py
|
rds.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, Any
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.amazon.aws.hooks.rds import RdsHook
from airflow.providers.amazon.aws.triggers.base import AwsBaseWaiterTrigger
from airflow.providers.amazon.aws.utils.rds import RdsDbType
from airflow.providers.amazon.aws.utils.waiter_with_logging import async_wait
from airflow.triggers.base import BaseTrigger, TriggerEvent
if TYPE_CHECKING:
from airflow.providers.amazon.aws.hooks.base_aws import AwsGenericHook
class RdsDbInstanceTrigger(BaseTrigger):
"""
Deprecated Trigger for RDS operations. Do not use.
:param waiter_name: Name of the waiter to use, for instance 'db_instance_available'
or 'db_instance_deleted'.
:param db_instance_identifier: The DB instance identifier for the DB instance to be polled.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param region_name: AWS region where the DB is located, if different from the default one.
:param response: The response from the RdsHook, to be passed back to the operator.
"""
def __init__(
self,
waiter_name: str,
db_instance_identifier: str,
waiter_delay: int,
waiter_max_attempts: int,
aws_conn_id: str,
region_name: str | None,
response: dict[str, Any],
):
warnings.warn(
"This trigger is deprecated, please use the other RDS triggers "
"such as RdsDbDeletedTrigger, RdsDbStoppedTrigger or RdsDbAvailableTrigger",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.db_instance_identifier = db_instance_identifier
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.waiter_name = waiter_name
self.response = response
def serialize(self) -> tuple[str, dict[str, Any]]:
return (
# dynamically generate the fully qualified name of the class
self.__class__.__module__ + "." + self.__class__.__qualname__,
{
"db_instance_identifier": self.db_instance_identifier,
"waiter_delay": str(self.waiter_delay),
"waiter_max_attempts": str(self.waiter_max_attempts),
"aws_conn_id": self.aws_conn_id,
"region_name": self.region_name,
"waiter_name": self.waiter_name,
"response": self.response,
},
)
async def run(self):
self.hook = RdsHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
async with self.hook.async_conn as client:
waiter = client.get_waiter(self.waiter_name)
await async_wait(
waiter=waiter,
waiter_delay=int(self.waiter_delay),
waiter_max_attempts=int(self.waiter_max_attempts),
args={"DBInstanceIdentifier": self.db_instance_identifier},
failure_message="Error checking DB Instance status",
status_message="DB instance status is",
status_args=["DBInstances[0].DBInstanceStatus"],
)
yield TriggerEvent({"status": "success", "response": self.response})
_waiter_arg = {
RdsDbType.INSTANCE: "DBInstanceIdentifier",
RdsDbType.CLUSTER: "DBClusterIdentifier",
}
_status_paths = {
RdsDbType.INSTANCE: ["DBInstances[].DBInstanceStatus", "DBInstances[].StatusInfos"],
RdsDbType.CLUSTER: ["DBClusters[].Status"],
}
class RdsDbAvailableTrigger(AwsBaseWaiterTrigger):
"""
Trigger to wait asynchronously for a DB instance or cluster to be available.
:param db_identifier: The DB identifier for the DB instance or cluster to be polled.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param region_name: AWS region where the DB is located, if different from the default one.
:param response: The response from the RdsHook, to be passed back to the operator.
:param db_type: The type of DB: instance or cluster.
"""
def __init__(
self,
db_identifier: str,
waiter_delay: int,
waiter_max_attempts: int,
aws_conn_id: str,
response: dict[str, Any],
db_type: RdsDbType,
region_name: str | None = None,
) -> None:
super().__init__(
serialized_fields={
"db_identifier": db_identifier,
"response": response,
"db_type": db_type,
},
waiter_name=f"db_{db_type.value}_available",
waiter_args={_waiter_arg[db_type]: db_identifier},
failure_message="Error while waiting for DB to be available",
status_message="DB initialization in progress",
status_queries=_status_paths[db_type],
return_key="response",
return_value=response,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
region_name=region_name,
)
def hook(self) -> AwsGenericHook:
return RdsHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
class RdsDbDeletedTrigger(AwsBaseWaiterTrigger):
"""
Trigger to wait asynchronously for a DB instance or cluster to be deleted.
:param db_identifier: The DB identifier for the DB instance or cluster to be polled.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param region_name: AWS region where the DB is located, if different from the default one.
:param response: The response from the RdsHook, to be passed back to the operator.
:param db_type: The type of DB: instance or cluster.
"""
def __init__(
self,
db_identifier: str,
waiter_delay: int,
waiter_max_attempts: int,
aws_conn_id: str,
response: dict[str, Any],
db_type: RdsDbType,
region_name: str | None = None,
) -> None:
super().__init__(
serialized_fields={
"db_identifier": db_identifier,
"response": response,
"db_type": db_type,
},
waiter_name=f"db_{db_type.value}_deleted",
waiter_args={_waiter_arg[db_type]: db_identifier},
failure_message="Error while deleting DB",
status_message="DB deletion in progress",
status_queries=_status_paths[db_type],
return_key="response",
return_value=response,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
region_name=region_name,
)
def hook(self) -> AwsGenericHook:
return RdsHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
class RdsDbStoppedTrigger(AwsBaseWaiterTrigger):
"""
Trigger to wait asynchronously for a DB instance or cluster to be stopped.
:param db_identifier: The DB identifier for the DB instance or cluster to be polled.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param region_name: AWS region where the DB is located, if different from the default one.
:param response: The response from the RdsHook, to be passed back to the operator.
:param db_type: The type of DB: instance or cluster.
"""
def __init__(
self,
db_identifier: str,
waiter_delay: int,
waiter_max_attempts: int,
aws_conn_id: str,
response: dict[str, Any],
db_type: RdsDbType,
region_name: str | None = None,
) -> None:
super().__init__(
serialized_fields={
"db_identifier": db_identifier,
"response": response,
"db_type": db_type,
},
waiter_name=f"db_{db_type.value}_stopped",
waiter_args={_waiter_arg[db_type]: db_identifier},
failure_message="Error while stopping DB",
status_message="DB is being stopped",
status_queries=_status_paths[db_type],
return_key="response",
return_value=response,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
region_name=region_name,
)
def hook(self) -> AwsGenericHook:
return RdsHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
|
6045af10cee975050042317348776ea1950cec4c
|
9654e0c7628c2607f191438cdea1782309bb3c2a
|
/smartsim/entity/model.py
|
1fd6b18612aeb15364bd090e5792d66b2abfc54e
|
[
"BSD-2-Clause"
] |
permissive
|
CrayLabs/SmartSim
|
85482706627378be01c6c4adf1ba28193c8439bb
|
f9e17f00ed1109fd09610111d54ac9cb82bccaa7
|
refs/heads/develop
| 2023-08-18T02:11:52.167416
| 2023-08-16T17:07:58
| 2023-08-16T17:07:58
| 311,268,879
| 177
| 33
|
BSD-2-Clause
| 2023-09-14T19:52:08
| 2020-11-09T08:19:12
|
Python
|
UTF-8
|
Python
| false
| false
| 25,291
|
py
|
model.py
|
# BSD 2-Clause License
#
# Copyright (c) 2021-2023, Hewlett Packard Enterprise
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import annotations
import collections.abc
import sys
import typing as t
import warnings
from .._core.utils.helpers import cat_arg_and_value, init_default
from ..error import EntityExistsError, SSUnsupportedError
from .dbobject import DBModel, DBScript
from .entity import SmartSimEntity
from .files import EntityFiles
from ..settings.base import BatchSettings, RunSettings
from ..log import get_logger
logger = get_logger(__name__)
class Model(SmartSimEntity):
def __init__(
self,
name: str,
params: t.Dict[str, str],
path: str,
run_settings: RunSettings,
params_as_args: t.Optional[t.List[str]] = None,
batch_settings: t.Optional[BatchSettings] = None,
):
"""Initialize a ``Model``
:param name: name of the model
:type name: str
:param params: model parameters for writing into configuration files or
to be passed as command line arguments to executable.
:type params: dict
:param path: path to output, error, and configuration files
:type path: str
:param run_settings: launcher settings specified in the experiment
:type run_settings: RunSettings
:param params_as_args: list of parameters which have to be
interpreted as command line arguments to
be added to run_settings
:type params_as_args: list[str]
:param batch_settings: Launcher settings for running the individual
model as a batch job, defaults to None
:type batch_settings: BatchSettings | None
"""
super().__init__(name, path, run_settings)
self.params = params
self.params_as_args = params_as_args
self.incoming_entities: t.List[SmartSimEntity] = []
self._key_prefixing_enabled = False
self.batch_settings = batch_settings
self._db_models: t.List[DBModel] = []
self._db_scripts: t.List[DBScript] = []
self.files: t.Optional[EntityFiles] = None
@property
def colocated(self) -> bool:
"""Return True if this Model will run with a colocated Orchestrator"""
return bool(self.run_settings.colocated_db_settings)
def register_incoming_entity(self, incoming_entity: SmartSimEntity) -> None:
"""Register future communication between entities.
Registers the named data sources that this entity
has access to by storing the key_prefix associated
with that entity
:param incoming_entity: The entity that data will be received from
:type incoming_entity: SmartSimEntity
:raises SmartSimError: if incoming entity has already been registered
"""
if incoming_entity.name in [
in_entity.name for in_entity in self.incoming_entities
]:
raise EntityExistsError(
f"'{incoming_entity.name}' has already "
+ "been registered as an incoming entity"
)
self.incoming_entities.append(incoming_entity)
def enable_key_prefixing(self) -> None:
"""If called, the entity will prefix its keys with its own model name"""
self._key_prefixing_enabled = True
def disable_key_prefixing(self) -> None:
"""If called, the entity will not prefix its keys with its own model name"""
self._key_prefixing_enabled = False
def query_key_prefixing(self) -> bool:
"""Inquire as to whether this entity will prefix its keys with its name"""
return self._key_prefixing_enabled
def attach_generator_files(
self,
to_copy: t.Optional[t.List[str]] = None,
to_symlink: t.Optional[t.List[str]] = None,
to_configure: t.Optional[t.List[str]] = None,
) -> None:
"""Attach files to an entity for generation
Attach files needed for the entity that, upon generation,
will be located in the path of the entity. Invoking this method
after files have already been attached will overwrite
the previous list of entity files.
During generation, files "to_copy" are copied into
the path of the entity, and files "to_symlink" are
symlinked into the path of the entity.
Files "to_configure" are text based model input files where
parameters for the model are set. Note that only models
support the "to_configure" field. These files must have
fields tagged that correspond to the values the user
would like to change. The tag is settable but defaults
to a semicolon e.g. THERMO = ;10;
:param to_copy: files to copy, defaults to []
:type to_copy: list, optional
:param to_symlink: files to symlink, defaults to []
:type to_symlink: list, optional
:param to_configure: input files with tagged parameters, defaults to []
:type to_configure: list, optional
"""
to_copy = init_default([], to_copy, (list, str))
to_symlink = init_default([], to_symlink, (list, str))
to_configure = init_default([], to_configure, (list, str))
self.files = EntityFiles(to_configure, to_copy, to_symlink)
def colocate_db(self, *args: t.Any, **kwargs: t.Any) -> None:
"""An alias for ``Model.colocate_db_tcp``"""
warnings.warn(
(
"`colocate_db` has been deprecated and will be removed in a \n"
"future release. Please use `colocate_db_tcp` or `colocate_db_uds`."
), FutureWarning
)
self.colocate_db_tcp(*args, **kwargs)
def colocate_db_uds(
self,
unix_socket: str = "/tmp/redis.socket",
socket_permissions: int = 755,
db_cpus: int = 1,
custom_pinning: t.Optional[t.Iterable[t.Union[int, t.Iterable[int]]]] = None,
debug: bool = False,
**kwargs: t.Any,
) -> None:
"""Colocate an Orchestrator instance with this Model over UDS.
This method will initialize settings which add an unsharded
database to this Model instance. Only this Model will be able to communicate
with this colocated database by using Unix Domain sockets.
Extra parameters for the db can be passed through kwargs. This includes
many performance, caching and inference settings.
.. highlight:: python
.. code-block:: python
example_kwargs = {
"maxclients": 100000,
"threads_per_queue": 1,
"inter_op_threads": 1,
"intra_op_threads": 1,
"server_threads": 2 # keydb only
}
Generally these don't need to be changed.
:param unix_socket: path to where the socket file will be created
:type unix_socket: str, optional
:param socket_permissions: permissions for the socketfile
:type socket_permissions: int, optional
:param db_cpus: number of cpus to use for orchestrator, defaults to 1
:type db_cpus: int, optional
:param custom_pinning: CPUs to pin the orchestrator to. Passing an empty
iterable disables pinning
:type custom_pinning: iterable of ints or iterable of ints, optional
:param debug: launch Model with extra debug information about the colocated db
:type debug: bool, optional
:param kwargs: additional keyword arguments to pass to the orchestrator database
:type kwargs: dict, optional
"""
uds_options = {
"unix_socket": unix_socket,
"socket_permissions": socket_permissions,
"port": 0, # This is hardcoded to 0 as recommended by redis for UDS
}
common_options = {
"cpus": db_cpus,
"custom_pinning": custom_pinning,
"debug": debug,
}
self._set_colocated_db_settings(uds_options, common_options, **kwargs)
def colocate_db_tcp(
self,
port: int = 6379,
ifname: t.Union[str, list[str]] = "lo",
db_cpus: int = 1,
custom_pinning: t.Optional[t.Iterable[t.Union[int, t.Iterable[int]]]] = None,
debug: bool = False,
**kwargs: t.Any,
) -> None:
"""Colocate an Orchestrator instance with this Model over TCP/IP.
This method will initialize settings which add an unsharded
database to this Model instance. Only this Model will be able to communicate
with this colocated database by using the loopback TCP interface.
Extra parameters for the db can be passed through kwargs. This includes
many performance, caching and inference settings.
.. highlight:: python
.. code-block:: python
ex. kwargs = {
maxclients: 100000,
threads_per_queue: 1,
inter_op_threads: 1,
intra_op_threads: 1,
server_threads: 2 # keydb only
}
Generally these don't need to be changed.
:param port: port to use for orchestrator database, defaults to 6379
:type port: int, optional
:param ifname: interface to use for orchestrator, defaults to "lo"
:type ifname: str | list[str], optional
:param db_cpus: number of cpus to use for orchestrator, defaults to 1
:type db_cpus: int, optional
:param custom_pinning: CPUs to pin the orchestrator to. Passing an empty
iterable disables pinning
:type custom_pinning: iterable of ints or iterable of ints, optional
:param debug: launch Model with extra debug information about the colocated db
:type debug: bool, optional
:param kwargs: additional keyword arguments to pass to the orchestrator database
:type kwargs: dict, optional
"""
tcp_options = {"port": port, "ifname": ifname}
common_options = {
"cpus": db_cpus,
"custom_pinning": custom_pinning,
"debug": debug,
}
self._set_colocated_db_settings(tcp_options, common_options, **kwargs)
def _set_colocated_db_settings(
self,
connection_options: t.Dict[str, t.Any],
common_options: t.Dict[str, t.Any],
**kwargs: t.Any,
) -> None:
"""
Ingest the connection-specific options (UDS/TCP) and set the final settings
for the colocated database
"""
if hasattr(self.run_settings, "mpmd") and len(self.run_settings.mpmd) > 0:
raise SSUnsupportedError(
"Models colocated with databases cannot be run as a mpmd workload"
)
if hasattr(self.run_settings, "_prep_colocated_db"):
# pylint: disable-next=protected-access
self.run_settings._prep_colocated_db(common_options["cpus"])
if "limit_app_cpus" in kwargs:
raise SSUnsupportedError(
"Pinning app CPUs via limit_app_cpus is not supported. Modify "
"RunSettings using the correct binding option for your launcher."
)
# TODO list which db settings can be extras
common_options["custom_pinning"] = self._create_pinning_string(
common_options["custom_pinning"],
common_options["cpus"]
)
colo_db_config = {}
colo_db_config.update(connection_options)
colo_db_config.update(common_options)
# redisai arguments for inference settings
colo_db_config["rai_args"] = {
"threads_per_queue": kwargs.get("threads_per_queue", None),
"inter_op_parallelism": kwargs.get("inter_op_parallelism", None),
"intra_op_parallelism": kwargs.get("intra_op_parallelism", None),
}
colo_db_config["extra_db_args"] = {
k: str(v) for k, v in kwargs.items() if k not in colo_db_config["rai_args"]
}
self._check_db_objects_colo()
colo_db_config["db_models"] = self._db_models
colo_db_config["db_scripts"] = self._db_scripts
self.run_settings.colocated_db_settings = colo_db_config
@staticmethod
def _create_pinning_string(
pin_ids: t.Optional[t.Iterable[t.Union[int, t.Iterable[int]]]],
cpus: int
) -> t.Optional[str]:
"""Create a comma-separated string CPU ids. By default, None returns
0,1,...,cpus-1; an empty iterable will disable pinning altogether,
and an iterable constructs a comma separate string (e.g. 0,2,5)
"""
def _stringify_id(_id: int) -> str:
"""Return the cPU id as a string if an int, otherwise raise a ValueError"""
if isinstance(_id, int):
if _id < 0:
raise ValueError("CPU id must be a nonnegative number")
return str(_id)
raise TypeError(f"Argument is of type '{type(_id)}' not 'int'")
_invalid_input_message = (
"Expected a cpu pinning specification of type iterable of ints or "
f"iterables of ints. Instead got type `{type(pin_ids)}`"
)
# Deal with MacOSX limitations first. The "None" (default) disables pinning
# and is equivalent to []. The only invalid option is an iterable
if sys.platform == "darwin":
if pin_ids is None or not pin_ids:
return None
if isinstance(pin_ids, collections.abc.Iterable):
warnings.warn(
"CPU pinning is not supported on MacOSX. Ignoring pinning "
"specification.",
RuntimeWarning
)
return None
raise TypeError(_invalid_input_message)
# Flatten the iterable into a list and check to make sure that the resulting
# elements are all ints
if pin_ids is None:
return ','.join(_stringify_id(i) for i in range(cpus))
if not pin_ids:
return None
if isinstance(pin_ids, collections.abc.Iterable):
pin_list = []
for pin_id in pin_ids:
if isinstance(pin_id, collections.abc.Iterable):
pin_list.extend([_stringify_id(j) for j in pin_id])
else:
pin_list.append(_stringify_id(pin_id))
return ','.join(sorted(set(pin_list)))
raise TypeError(_invalid_input_message)
def params_to_args(self) -> None:
"""Convert parameters to command line arguments and update run settings."""
if self.params_as_args is not None:
for param in self.params_as_args:
if not param in self.params:
raise ValueError(
f"Tried to convert {param} to command line argument for Model "
f"{self.name}, but its value was not found in model params"
)
if self.run_settings is None:
raise ValueError(
"Tried to configure command line parameter for Model "
f"{self.name}, but no RunSettings are set."
)
self.run_settings.add_exe_args(
cat_arg_and_value(param, self.params[param])
)
def add_ml_model(
self,
name: str,
backend: str,
model: t.Optional[str] = None,
model_path: t.Optional[str] = None,
device: t.Literal["CPU","GPU"] = "CPU",
devices_per_node: int = 1,
batch_size: int = 0,
min_batch_size: int = 0,
tag: str = "",
inputs: t.Optional[t.List[str]] = None,
outputs: t.Optional[t.List[str]] = None,
) -> None:
"""A TF, TF-lite, PT, or ONNX model to load into the DB at runtime
Each ML Model added will be loaded into an
orchestrator (converged or not) prior to the execution
of this Model instance
One of either model (in memory representation) or model_path (file)
must be provided
:param name: key to store model under
:type name: str
:param backend: name of the backend (TORCH, TF, TFLITE, ONNX)
:type backend: str
:param model: A model in memory (only supported for non-colocated orchestrators)
:type model: byte string, optional
:param model_path: serialized model
:type model_path: file path to model
:param device: name of device for execution, defaults to "CPU"
:type device: str, optional
:param devices_per_node: The number of GPU devices available on the host.
This parameter only applies to GPU devices and will be ignored if device
is specified as GPU.
:type devices_per_node: int
:param batch_size: batch size for execution, defaults to 0
:type batch_size: int, optional
:param min_batch_size: minimum batch size for model execution, defaults to 0
:type min_batch_size: int, optional
:param tag: additional tag for model information, defaults to ""
:type tag: str, optional
:param inputs: model inputs (TF only), defaults to None
:type inputs: list[str], optional
:param outputs: model outupts (TF only), defaults to None
:type outputs: list[str], optional
"""
db_model = DBModel(
name=name,
backend=backend,
model=model,
model_file=model_path,
device=device,
devices_per_node=devices_per_node,
batch_size=batch_size,
min_batch_size=min_batch_size,
tag=tag,
inputs=inputs,
outputs=outputs,
)
self._append_db_model(db_model)
def add_script(
self,
name: str,
script: t.Optional[str] = None,
script_path: t.Optional[str] = None,
device: t.Literal["CPU","GPU"] = "CPU",
devices_per_node: int = 1,
) -> None:
"""TorchScript to launch with this Model instance
Each script added to the model will be loaded into an
orchestrator (converged or not) prior to the execution
of this Model instance
Device selection is either "GPU" or "CPU". If many devices are
present, a number can be passed for specification e.g. "GPU:1".
Setting ``devices_per_node=N``, with N greater than one will result
in the model being stored in the first N devices of type ``device``.
One of either script (in memory string representation) or script_path (file)
must be provided
:param name: key to store script under
:type name: str
:param script: TorchScript code (only supported for non-colocated orchestrators)
:type script: str, optional
:param script_path: path to TorchScript code
:type script_path: str, optional
:param device: device for script execution, defaults to "CPU"
:type device: str, optional
:param devices_per_node: The number of GPU devices available on the host.
This parameter only applies to GPU devices and will be ignored if device
is specified as GPU.
:type devices_per_node: int
"""
db_script = DBScript(
name=name,
script=script,
script_path=script_path,
device=device,
devices_per_node=devices_per_node,
)
self._append_db_script(db_script)
def add_function(
self,
name: str,
function: t.Optional[str] = None,
device: t.Literal["CPU","GPU"] = "CPU",
devices_per_node: int = 1,
) -> None:
"""TorchScript function to launch with this Model instance
Each script function to the model will be loaded into a
non-converged orchestrator prior to the execution
of this Model instance.
For converged orchestrators, the :meth:`add_script` method should be used.
Device selection is either "GPU" or "CPU". If many devices are
present, a number can be passed for specification e.g. "GPU:1".
Setting ``devices_per_node=N``, with N greater than one will result
in the model being stored in the first N devices of type ``device``.
:param name: key to store function under
:type name: str
:param function: TorchScript function code
:type function: str, optional
:param device: device for script execution, defaults to "CPU"
:type device: str, optional
:param devices_per_node: The number of GPU devices available on the host.
This parameter only applies to GPU devices and will be ignored if device
is specified as GPU.
:type devices_per_node: int
"""
db_script = DBScript(
name=name, script=function, device=device, devices_per_node=devices_per_node
)
self._append_db_script(db_script)
def __hash__(self) -> int:
return hash(self.name)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Model):
return False
if self.name == other.name:
return True
return False
def __str__(self) -> str: # pragma: no cover
entity_str = "Name: " + self.name + "\n"
entity_str += "Type: " + self.type + "\n"
entity_str += str(self.run_settings) + "\n"
if self._db_models:
entity_str += "DB Models: \n" + str(len(self._db_models)) + "\n"
if self._db_scripts:
entity_str += "DB Scripts: \n" + str(len(self._db_scripts)) + "\n"
return entity_str
def _append_db_model(self, db_model: DBModel) -> None:
if not db_model.is_file and self.colocated:
err_msg = "ML model can not be set from memory for colocated databases.\n"
err_msg += (
f"Please store the ML model named {db_model.name} in binary format "
)
err_msg += "and add it to the SmartSim Model as file."
raise SSUnsupportedError(err_msg)
self._db_models.append(db_model)
def _append_db_script(self, db_script: DBScript) -> None:
if db_script.func and self.colocated:
if not isinstance(db_script.func, str):
err_msg = (
"Functions can not be set from memory for colocated databases.\n"
f"Please convert the function named {db_script.name} "
"to a string or store it as a text file and add it to the "
"SmartSim Model with add_script."
)
raise SSUnsupportedError(err_msg)
self._db_scripts.append(db_script)
def _check_db_objects_colo(self) -> None:
for db_model in self._db_models:
if not db_model.is_file:
err_msg = (
"ML model can not be set from memory for colocated databases.\n"
f"Please store the ML model named {db_model.name} in binary "
"format and add it to the SmartSim Model as file."
)
raise SSUnsupportedError(err_msg)
for db_script in self._db_scripts:
if db_script.func:
if not isinstance(db_script.func, str):
err_msg = (
"Functions can not be set from memory for colocated "
"databases.\nPlease convert the function named "
f"{db_script.name} to a string or store it as a text"
"file and add it to the SmartSim Model with add_script."
)
raise SSUnsupportedError(err_msg)
|
1d70e64bebbb696b2cda054f4102b5d1aa35daab
|
56a77194fc0cd6087b0c2ca1fb6dc0de64b8a58a
|
/applications/TrilinosApplication/tests/test_trilinos_matrix.py
|
7393cafc185ef90959cb87d519dfc495b1260890
|
[
"BSD-3-Clause"
] |
permissive
|
KratosMultiphysics/Kratos
|
82b902a2266625b25f17239b42da958611a4b9c5
|
366949ec4e3651702edc6ac3061d2988f10dd271
|
refs/heads/master
| 2023-08-30T20:31:37.818693
| 2023-08-30T18:01:01
| 2023-08-30T18:01:01
| 81,815,495
| 994
| 285
|
NOASSERTION
| 2023-09-14T13:22:43
| 2017-02-13T10:58:24
|
C++
|
UTF-8
|
Python
| false
| false
| 702
|
py
|
test_trilinos_matrix.py
|
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics
import KratosMultiphysics.TrilinosApplication as KratosTrilinos
class TestTrilinosMatrix(KratosUnittest.TestCase):
def test_resize(self):
comm = KratosTrilinos.CreateEpetraCommunicator(KratosMultiphysics.DataCommunicator.GetDefault())
space = KratosTrilinos.TrilinosSparseSpace()
pb = space.CreateEmptyVectorPointer(comm)
space.ResizeVector(pb,2)
n = space.Size(pb.GetReference())
self.assertEqual(n,2)
if __name__ == '__main__':
KratosMultiphysics.Logger.GetDefaultOutput().SetSeverity(KratosMultiphysics.Logger.Severity.WARNING)
KratosUnittest.main()
|
e9198c6f88837caded5b851a734aef5d34a8d252
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/SPPNet/eval.py
|
7ea3ca84dc1f699a87b5e2a499ef19ffed561575
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,736
|
py
|
eval.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
######################## eval sppnet example ########################
eval sppnet according to model file:
python eval.py --data_path /YourDataPath --ckpt_path Your.ckpt --device_id YourAscendId --train_model model
"""
import ast
import argparse
import mindspore.nn as nn
from mindspore import context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.train import Model
from src.config import sppnet_mult_cfg, sppnet_single_cfg, zfnet_cfg
from src.dataset import create_dataset_imagenet
from src.sppnet import SppNet
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='MindSpore SPPNet Example')
parser.add_argument('--device_target', type=str, default="Ascend",
help='device where the code will be implemented (default: Ascend)')
parser.add_argument('--test_model', type=str, default='sppnet_single', help='chose the training model',
choices=['sppnet_single', 'sppnet_mult', 'zfnet'])
parser.add_argument('--data_path', type=str, default="", help='path where the dataset is saved')
parser.add_argument('--ckpt_path', type=str, default="./ckpt", help='if is test, must provide\
path where the trained ckpt file')
parser.add_argument('--dataset_sink_mode', type=ast.literal_eval,
default=True, help='dataset_sink_mode is False or True')
parser.add_argument('--device_id', type=int, default=0, help='device id of Ascend. (Default: 0)')
args = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
context.set_context(device_id=args.device_id)
print("============== Starting Testing ==============")
if args.test_model == "zfnet":
cfg = zfnet_cfg
ds_eval = create_dataset_imagenet(args.data_path, 'zfnet', cfg.batch_size, training=False)
network = SppNet(cfg.num_classes, phase='test', train_model=args.test_model)
elif args.test_model == "sppnet_single":
cfg = sppnet_single_cfg
ds_eval = create_dataset_imagenet(args.data_path, cfg.batch_size, training=False)
network = SppNet(cfg.num_classes, phase='test', train_model=args.test_model)
else:
cfg = sppnet_mult_cfg
ds_eval = create_dataset_imagenet(args.data_path, cfg.batch_size, training=False)
network = SppNet(cfg.num_classes, phase='test', train_model=args.test_model)
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
param_dict = load_checkpoint(args.ckpt_path)
print("load checkpoint from [{}].".format(args.ckpt_path))
load_param_into_net(network, param_dict)
network.set_train(False)
model = Model(network, loss_fn=loss, metrics={'top_1_accuracy', 'top_5_accuracy'})
if ds_eval.get_dataset_size() == 0:
raise ValueError("Please check dataset size > 0 and batch_size <= dataset size")
result = model.eval(ds_eval, dataset_sink_mode=args.dataset_sink_mode)
print("result : {}".format(result))
|
1b12e2e4d392dbfc54fa528c60222fe919020d49
|
3eb3c4046b17e265930aaf89fa93f41896f243cb
|
/spynnaker/pyNN/spynnaker_external_device_plugin_manager.py
|
064c63b4e120d9a416c60cdba4091fd08ed215bb
|
[
"Apache-2.0"
] |
permissive
|
SpiNNakerManchester/sPyNNaker
|
b177613a114cfc7e7687ec36c1f72a2f07f66977
|
891cfb3046f66185fd8df52d270380fa94c32eab
|
refs/heads/master
| 2023-09-01T11:28:21.252266
| 2023-08-17T08:07:43
| 2023-08-17T08:07:43
| 20,801,613
| 101
| 53
|
Apache-2.0
| 2023-09-14T18:39:29
| 2014-06-13T11:07:19
|
Python
|
UTF-8
|
Python
| false
| false
| 12,861
|
py
|
spynnaker_external_device_plugin_manager.py
|
# Copyright (c) 2017 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from spinn_utilities.socket_address import SocketAddress
from pacman.model.graphs.application import ApplicationEdge
from spinn_utilities.config_holder import (get_config_int, get_config_str)
from spinnman.messages.eieio import EIEIOType
from spinn_front_end_common.utility_models import (
ReverseIpTagMultiCastSource)
from spinn_front_end_common.utilities.utility_objs import (
LivePacketGatherParameters)
from spynnaker.pyNN.data import SpynnakerDataView
from spynnaker.pyNN.utilities.constants import (
LIVE_POISSON_CONTROL_PARTITION_ID, SPIKE_PARTITION_ID)
from spynnaker.pyNN.models.populations import Population
class SpynnakerExternalDevicePluginManager(object):
"""
User-level interface for the external device plug-in manager.
"""
__slots__ = []
@staticmethod
def add_database_socket_address(
database_notify_host, database_notify_port_num,
database_ack_port_num):
"""
:param database_notify_host:
Host to talk to tell that the database (and application) is ready.
:type database_notify_host: str or None
:param database_notify_port_num:
Port to talk to tell that the database (and application) is ready.
:type database_notify_port_num: int or None
:param database_ack_port_num:
Port on which to listen for an acknowledgement that the
simulation should start.
:type database_ack_port_num: int or None
"""
# build the database socket address used by the notification interface
database_socket = SocketAddress(
listen_port=database_ack_port_num,
notify_host_name=database_notify_host,
notify_port_no=database_notify_port_num)
# update socket interface with new demands.
SpynnakerDataView.add_database_socket_address(database_socket)
@staticmethod
def activate_live_output_for(
population, database_notify_host=None,
database_notify_port_num=None,
database_ack_port_num=None, port=None, host=None, tag=None,
strip_sdp=True, use_prefix=False, key_prefix=None,
prefix_type=None, message_type=EIEIOType.KEY_32_BIT,
right_shift=0, payload_as_time_stamps=True, notify=True,
use_payload_prefix=True, payload_prefix=None,
payload_right_shift=0, number_of_packets_sent_per_time_step=0,
translate_keys=False):
"""
Output the spikes from a given population from SpiNNaker as they
occur in the simulation.
:param ~spynnaker.pyNN.models.populations.Population population:
The population to activate the live output for
:param str database_notify_host:
The hostname for the device which is listening to the database
notification.
:param int database_ack_port_num:
The port number to which a external device will acknowledge that
they have finished reading the database and are ready for it to
start execution
:param int database_notify_port_num:
The port number to which a external device will receive the
database is ready command
:param key_prefix: the prefix to be applied to the key
:type key_prefix: int or None
:param ~spinnman.messages.eieio.EIEIOPrefix prefix_type:
if the prefix type is 32 bit or 16 bit
:param ~spinnman.messages.eieio.EIEIOType message_type:
If the message is a EIEIO command message, or an EIEIO data
message with 16 bit or 32 bit keys.
:param bool payload_as_time_stamps:
:param int right_shift:
:param bool use_payload_prefix:
:param bool notify:
:param payload_prefix:
:type payload_prefix: int or None
:param int payload_right_shift:
:param int number_of_packets_sent_per_time_step:
:param int port:
The UDP port to which the live spikes will be sent. If not
specified, the port will be taken from the "live_spike_port"
parameter in the "Recording" section of the sPyNNaker
configuration file.
:param str host:
The host name or IP address to which the live spikes will be
sent. If not specified, the host will be taken from the
"live_spike_host" parameter in the "Recording" section of the
sPyNNaker configuration file.
:param int tag:
The IP tag to be used for the spikes. If not specified, one will
be automatically assigned
:param bool strip_sdp:
Determines if the SDP headers will be stripped from the
transmitted packet.
:param bool use_prefix:
Determines if the spike packet will contain a common prefix for
the spikes
:param str label: The label of the gatherer vertex
:param list(str) partition_ids:
The names of the partitions to create edges for
:param bool translate_keys:
Whether the incoming keys from the cores should be translated
to global keys rather than core-based keys
"""
# pylint: disable=too-many-arguments, too-many-locals, protected-access
# get default params if none set
if port is None:
port = get_config_int("Recording", "live_spike_port")
if host is None:
host = get_config_str("Recording", "live_spike_host")
# Use the right-shift to remove the colour from translated keys
n_colour_bits = population._vertex.n_colour_bits
translated_key_right_shift = n_colour_bits
# Use the mask to remove the colour from non-translated keys
received_key_mask = 0xFFFFFFFF & ~((2 ** n_colour_bits) - 1)
# pylint: disable=too-many-arguments, too-many-locals
params = LivePacketGatherParameters(
port=port, hostname=host, tag=tag, strip_sdp=strip_sdp,
use_prefix=use_prefix, key_prefix=key_prefix,
prefix_type=prefix_type, message_type=message_type,
right_shift=right_shift, payload_prefix=payload_prefix,
payload_as_time_stamps=payload_as_time_stamps,
use_payload_prefix=use_payload_prefix,
payload_right_shift=payload_right_shift,
number_of_packets_sent_per_time_step=(
number_of_packets_sent_per_time_step),
label="LiveSpikeReceiver", received_key_mask=received_key_mask,
translate_keys=translate_keys,
translated_key_right_shift=translated_key_right_shift)
SpynnakerExternalDevicePluginManager.update_live_packet_gather_tracker(
population._vertex, params, [SPIKE_PARTITION_ID])
if notify:
SpynnakerExternalDevicePluginManager.add_database_socket_address(
database_notify_host, database_notify_port_num,
database_ack_port_num)
@staticmethod
def activate_live_output_to(
population, device, partition_id=SPIKE_PARTITION_ID):
"""
Activate the output of spikes from a population to an external device.
.. note::
All spikes will be sent to the device.
:param ~spynnaker.pyNN.models.populations.Population population:
The pyNN population object from which spikes will be sent.
:param device:
The pyNN population or external device to which the spikes will be
sent.
:type device:
~spynnaker.pyNN.models.populations.Population or
~pacman.model.graphs.application.ApplicationVertex
:param str partition_id:
The partition ID to activate live output to.
"""
device_vertex = device
# pylint: disable=protected-access
if isinstance(device, Population):
device_vertex = device._vertex
SpynnakerExternalDevicePluginManager.add_edge(
population._vertex, device_vertex, partition_id)
@staticmethod
def update_live_packet_gather_tracker(
vertex_to_record_from, params, partition_ids):
"""
Add an edge from a vertex to the live packet gatherer, builds as
needed and has all the parameters for the creation of the live
packet gatherer if needed.
:param vertex_to_record_from:
:type vertex_to_record_from:
~pacman.model.graphs.application.ApplicationVertex or
~pacman.model.graphs.machine.MachineVertex
:param params:
:type params:
~spinn_front_end_common.utilities.utility_objs.LivePacketGatherParameters
:param list(str) partition_ids:
:param bool translate_keys:
"""
# add to the tracker
SpynnakerDataView.add_live_packet_gatherer_parameters(
params, vertex_to_record_from, partition_ids)
@staticmethod
def add_poisson_live_rate_control(
poisson_population, control_label_extension="_control",
receive_port=None, database_notify_host=None,
database_notify_port_num=None,
database_ack_port_num=None, notify=True,
reserve_reverse_ip_tag=False):
"""
Add a live rate controller to a Poisson population.
:param poisson_population: The population to control
:type poisson_population:
~spynnaker.pyNN.models.populations.Population
:param str control_label_extension:
An extension to add to the label of the Poisson source. Must match
up with the equivalent in the SpynnakerPoissonControlConnection
:param int receive_port:
The port that the SpiNNaker board should listen on
:param str database_notify_host: the hostname for the device which is
listening to the database notification.
:param int database_ack_port_num: the port number to which a external
device will acknowledge that they have finished reading the
database and are ready for it to start execution
:param int database_notify_port_num: The port number to which an
external device will receive the database is ready command
:param bool notify: adds to the notification protocol if set.
:param bool reserve_reverse_ip_tag: True if a reverse IP tag is to be
used, False if SDP is to be used (default)
"""
# pylint: disable=too-many-arguments, protected-access
vertex = poisson_population._vertex
control_label = f"{vertex.label}{control_label_extension}"
controller = ReverseIpTagMultiCastSource(
n_keys=vertex.n_atoms, label=control_label,
receive_port=receive_port,
reserve_reverse_ip_tag=reserve_reverse_ip_tag,
injection_partition_id=LIVE_POISSON_CONTROL_PARTITION_ID)
SpynnakerExternalDevicePluginManager.add_application_vertex(controller)
edge = SpynnakerExternalDevicePluginManager.add_edge(
controller, vertex, LIVE_POISSON_CONTROL_PARTITION_ID)
vertex.set_live_poisson_control_edge(edge)
if notify:
SpynnakerExternalDevicePluginManager.add_database_socket_address(
database_notify_host, database_notify_port_num,
database_ack_port_num)
@staticmethod
def add_edge(vertex, device_vertex, partition_id):
"""
Add an edge between two vertices (often a vertex and a external
device) on a given partition.
:param ~pacman.model.graphs.application.ApplicationVertex vertex:
the pre-population vertex to connect the edge from
:param device_vertex:
the post-population vertex to connect the edge to
:type device_vertex:
~pacman.model.graphs.application.ApplicationVertex
:param str partition_id: the partition identifier for making nets
"""
edge = ApplicationEdge(vertex, device_vertex)
SpynnakerDataView.add_edge(edge, partition_id)
return edge
@staticmethod
def add_application_vertex(vertex):
SpynnakerDataView.add_vertex(vertex)
|
d28234b3261d54ba49cef50fa647c5201cabf719
|
60ba5cc2f817471dd0ff84a15996b46b1dbfa6ba
|
/park/utils/ordered_set.py
|
a7e4383bf7e7930e185a3eb1df2beedcdf2f8aa5
|
[
"MIT"
] |
permissive
|
park-project/park
|
dd15d27e5859fe421c878a90627716623892b6f9
|
08f8f7f0dea14e011af2d5ce2a72410084eb8713
|
refs/heads/master
| 2023-06-28T11:38:30.359938
| 2022-04-07T14:42:21
| 2022-04-07T14:42:21
| 184,142,889
| 216
| 55
|
MIT
| 2023-06-14T16:10:38
| 2019-04-29T20:55:25
|
Python
|
UTF-8
|
Python
| false
| false
| 741
|
py
|
ordered_set.py
|
from collections import OrderedDict
class OrderedSet(object):
def __init__(self, contents=()):
self.set = OrderedDict((c, None) for c in contents)
def __contains__(self, item):
return item in self.set
def __iter__(self):
return iter(self.set.keys())
def __len__(self):
return len(self.set)
def __reversed__(self):
return iter(reversed(self.set.keys()))
def add(self, item):
self.set[item] = None
def clear(self):
self.set.clear()
def pop(self):
item = next(iter(self.set))
del self.set[item]
return item
def remove(self, item):
del self.set[item]
def to_list(self):
return [k for k in self.set]
|
ac3ad65cbd64e8d2833523d033126cc032ae6fb7
|
e061ab21018ac80573d03ef0c3cba8f448c4b7cc
|
/backend/tests/test_tree_manager_config.py
|
ae51775d96aa83fafd0ad434f6469794c4cb2d92
|
[
"Apache-2.0"
] |
permissive
|
LAION-AI/Open-Assistant
|
8b82c24fac954da421d66c3e90fbae6776ae6280
|
8c0e1a31bea1542dd39716b1dbbecd46785d9d23
|
refs/heads/main
| 2023-08-25T23:33:38.114219
| 2023-08-22T21:04:33
| 2023-08-22T21:04:33
| 577,603,990
| 34,014
| 3,206
|
Apache-2.0
| 2023-09-11T19:13:48
| 2022-12-13T05:24:17
|
Python
|
UTF-8
|
Python
| false
| false
| 181
|
py
|
test_tree_manager_config.py
|
from oasst_backend.config import TreeManagerConfiguration
def test_tree_manager_config():
"""
Just test that we can create a config
"""
TreeManagerConfiguration()
|
90065d70f6a2160d9b371b6f5646af47b8803798
|
64bac57486b832d8f322845bad0a27431fd6a686
|
/tests/types/channel.py
|
33cebcf098b2da8eb0dc114b5dde20e4c7ba48f2
|
[] |
no_license
|
b1naryth1ef/disco
|
291161348705a723937e870809f84944913e2339
|
7a2e86a2a3dc58e207d2c6e9a686a3296d652864
|
refs/heads/master
| 2023-08-19T10:37:02.579582
| 2020-10-27T20:52:46
| 2020-10-27T20:52:46
| 64,710,850
| 342
| 169
| null | 2020-11-16T14:00:19
| 2016-08-02T00:29:23
|
Python
|
UTF-8
|
Python
| false
| false
| 579
|
py
|
channel.py
|
from disco.types.channel import Channel, ChannelType
def test_deprecated_nsfw_channel():
channel = Channel(
name='nsfw-testing',
type=ChannelType.GUILD_TEXT)
assert channel.is_nsfw
channel = Channel(
name='nsfw-testing',
type=ChannelType.GUILD_VOICE)
assert not channel.is_nsfw
channel = Channel(
name='nsfw_testing',
type=ChannelType.GUILD_TEXT)
assert not channel.is_nsfw
def test_nsfw_channel():
channel = Channel(name='test', nsfw=True, type=ChannelType.GUILD_TEXT)
assert channel.is_nsfw
|
a3176bdfeeb9d054c08561e1a95a7f58b7e79191
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/KoubeiCateringBizMovehomeSyncModel.py
|
f304d1a0b7c6a377688608a95dee454749c28093
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
KoubeiCateringBizMovehomeSyncModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KcpLeadsInfo import KcpLeadsInfo
class KoubeiCateringBizMovehomeSyncModel(object):
def __init__(self):
self._leads_info_list = None
self._partner_id = None
@property
def leads_info_list(self):
return self._leads_info_list
@leads_info_list.setter
def leads_info_list(self, value):
if isinstance(value, list):
self._leads_info_list = list()
for i in value:
if isinstance(i, KcpLeadsInfo):
self._leads_info_list.append(i)
else:
self._leads_info_list.append(KcpLeadsInfo.from_alipay_dict(i))
@property
def partner_id(self):
return self._partner_id
@partner_id.setter
def partner_id(self, value):
self._partner_id = value
def to_alipay_dict(self):
params = dict()
if self.leads_info_list:
if isinstance(self.leads_info_list, list):
for i in range(0, len(self.leads_info_list)):
element = self.leads_info_list[i]
if hasattr(element, 'to_alipay_dict'):
self.leads_info_list[i] = element.to_alipay_dict()
if hasattr(self.leads_info_list, 'to_alipay_dict'):
params['leads_info_list'] = self.leads_info_list.to_alipay_dict()
else:
params['leads_info_list'] = self.leads_info_list
if self.partner_id:
if hasattr(self.partner_id, 'to_alipay_dict'):
params['partner_id'] = self.partner_id.to_alipay_dict()
else:
params['partner_id'] = self.partner_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiCateringBizMovehomeSyncModel()
if 'leads_info_list' in d:
o.leads_info_list = d['leads_info_list']
if 'partner_id' in d:
o.partner_id = d['partner_id']
return o
|
baabd781a51ace59a2560c6c7b6c1a571aaaa1af
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/third_party/incubator-tvm/tests/python/unittest/test_codegen_opencl.py
|
71fc4f9a7f350c64ef49f3139453a13593c65a60
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,744
|
py
|
test_codegen_opencl.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
target = 'opencl'
def test_opencl_ternary_expression():
def check_if_then_else(ctx, n, dtype):
A = tvm.placeholder((n,), name='A', dtype=dtype)
true_value = tvm.const(1, dtype=dtype)
false_value = tvm.const(3, dtype=dtype)
max_lhs = tvm.const(2, dtype=dtype)
max_rhs = tvm.if_then_else(A[0] > 0, true_value, false_value)
C = tvm.compute((n,), lambda i: tvm.max(max_lhs, max_rhs), name='C')
s = tvm.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], tvm.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, C], target)
a = tvm.nd.empty((n,), A.dtype, ctx)
c = tvm.nd.empty((n,), A.dtype, ctx)
# Only need to test compiling here
fun(a, c)
def check_select(ctx, n, dtype):
A = tvm.placeholder((n,), name='A', dtype=dtype)
true_value = tvm.const(1, dtype=dtype)
false_value = tvm.const(3, dtype=dtype)
max_lhs = tvm.const(2, dtype=dtype)
max_rhs = tvm.expr.Select(A[0] > 0, true_value, false_value)
C = tvm.compute((n,), lambda i: tvm.max(max_lhs, max_rhs), name='C')
s = tvm.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], tvm.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, C], target)
a = tvm.nd.empty((n,), A.dtype, ctx)
c = tvm.nd.empty((n,), A.dtype, ctx)
# Only need to test compiling here
fun(a, c)
if not tvm.module.enabled(target):
print("skip because opencl is not enabled..")
return
ctx = tvm.context(target, 0)
check_if_then_else(ctx, 1, 'int8')
check_if_then_else(ctx, 1, 'uint8')
check_if_then_else(ctx, 1, 'int16')
check_if_then_else(ctx, 1, 'uint16')
check_select(ctx, 1, 'int8')
check_select(ctx, 1, 'uint8')
check_select(ctx, 1, 'int16')
check_select(ctx, 1, 'uint16')
def test_opencl_inf_nan():
def check_inf_nan(ctx, n, value, dtype):
A = tvm.placeholder((n,), name='A', dtype=dtype)
inf_value = tvm.const(value, dtype=dtype)
C = tvm.compute((n,), lambda i: inf_value, name='C')
s = tvm.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], tvm.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, C], target)
a = tvm.nd.empty((n,), A.dtype, ctx)
c = tvm.nd.empty((n,), A.dtype, ctx)
# Only need to test compiling here
fun(a, c)
if not tvm.module.enabled(target):
print("skip because opencl is not enabled..")
return
ctx = tvm.context(target, 0)
check_inf_nan(ctx, 1, -float('inf'), 'float32')
check_inf_nan(ctx, 1, -float('inf'), 'float64')
check_inf_nan(ctx, 1, float('inf'), 'float32')
check_inf_nan(ctx, 1, float('inf'), 'float64')
check_inf_nan(ctx, 1, float('nan'), 'float32')
check_inf_nan(ctx, 1, float('nan'), 'float64')
if __name__ == "__main__":
test_opencl_ternary_expression()
test_opencl_inf_nan()
|
f71b66762f2479ac77d16775c2cbe8d6d68a0b6d
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/tests/vm_impl/nn_ops_vm_impl.py
|
3c262605451f73e679663c313347729edb9c9238
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 10,681
|
py
|
nn_ops_vm_impl.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Generate vm_impl function for nn ops"""
import numpy as np
from mindspore.common.tensor import Tensor
from mindspore.ops import operations as P
from mindspore.ops.operations import _grad_ops as G
from mindspore.ops.vm_impl_registry import vm_impl_registry as vm_impl_getters
from .vm_interface import vm
# pylint: disable=unused-argument
@vm_impl_getters.register(P.ScalarSummary)
def vm_impl_scalar_summary(self):
"""Generate vm_impl function for ScalarSummary"""
def vm_impl(string_in, scalar):
"""Implement by vm mode."""
return scalar
return vm_impl
@vm_impl_getters.register(P.ReLU)
def vm_impl_relu(self):
"""Generate vm_impl function for ReLU"""
def vm_impl(x):
x = x.asnumpy()
output = Tensor(vm.relu(x))
return output
return vm_impl
@vm_impl_getters.register(P.Flatten)
def vm_impl_flatten(self):
"""Generate vm_impl function for Flatten"""
def vm_impl(x):
x = x.asnumpy()
return Tensor(vm.flatten_batch(x))
return vm_impl
@vm_impl_getters.register(P.Softmax)
def vm_impl_softmax(self):
"""Generate vm_impl function for Softmax"""
def vm_impl(x):
x = x.asnumpy()
return Tensor(vm.softmax(x))
return vm_impl
@vm_impl_getters.register(P.LogSoftmax)
def vm_impl_log_softmax(self):
"""Generate vm_impl function for LogSoftmax"""
def vm_impl(x):
x = x.asnumpy()
return Tensor(vm.logsoftmax(x))
return vm_impl
@vm_impl_getters.register(P.Tanh)
def vm_impl_tanh(self):
"""Generate vm_impl function for Tanh"""
def vm_impl(x):
x = x.asnumpy()
return Tensor(vm.tanh(x))
return vm_impl
@vm_impl_getters.register(P.BatchNorm)
def vm_impl_batch_norm(self):
"""Generate vm_impl function for BatchNorm"""
def vm_impl(x, scale, b, mean, variance):
# pylint: disable=unused-argument
x = x.asnumpy()
scale = scale.asnumpy()
b = b.asnumpy()
mean = mean.asnumpy()
variance = variance.asnumpy()
out, x_mean, x_var, running_mean, running_var = vm.batch_norm(x, scale, b, mean, \
variance, \
eps=self.epsilon)
return Tensor(out), Tensor(x_mean), Tensor(x_var), \
Tensor(running_mean), Tensor(running_var)
return vm_impl
@vm_impl_getters.register(P.Conv2D)
def vm_impl_conv2d(self):
"""Generate vm_impl function for Conv2D"""
def vm_impl(x, w):
x = x.asnumpy()
weight = w.asnumpy()
bias = None
out = vm.conv2d(x, weight, bias, self.stride, self.pad, self.dilation)
return Tensor(out)
return vm_impl
@vm_impl_getters.register(G.MaxPoolGradWithArgmax)
def vm_impl_max_pool_grad_with_argmax(self):
"""Generate vm_impl function for MaxPoolGradWithArgmax"""
def vm_impl(x, dout, argmax):
x = x.asnumpy()
dout = dout.asnumpy()
arg_max = argmax.asnumpy()
dx = vm.max_pool_grad_with_argmax(x, dout, arg_max,
self.kernel_size[1], self.kernel_size[2], self.strides[1])
return Tensor(dx)
return vm_impl
@vm_impl_getters.register(P.MaxPoolWithArgmax)
def vm_impl_max_pool_with_argmax(self):
"""Generate vm_impl function for MaxPoolWithArgmax"""
def vm_impl(x):
x = x.asnumpy()
out, out_argmax = vm.max_pool_with_argmax(x, self.kernel_size[1], self.kernel_size[2], self.strides[1])
return Tensor(out), Tensor(out_argmax)
return vm_impl
@vm_impl_getters.register(P.MaxPool)
def vm_impl_max_pool(self):
"""Generate vm_impl function for MaxPool"""
def vm_impl(x):
x = x.asnumpy()
out = vm.max_pooling(x, self.kernel_size[-2], self.kernel_size[-1], self.strides[-2])
return Tensor(out)
return vm_impl
@vm_impl_getters.register(G.MaxPoolGrad)
def vm_impl_max_pool_grad(self):
"""Generate vm_impl function for MaxPoolGrad"""
def vm_impl(x, out, dout):
x = x.asnumpy()
dout = dout.asnumpy()
out = vm.max_pool_grad(x, dout, self.kernel_size[-2], self.kernel_size[-1], self.strides[-2])
return Tensor(out)
return vm_impl
@vm_impl_getters.register(P.AvgPool)
def vm_impl_avg_pool(self):
"""Generate vm_impl function for AvgPool"""
def vm_impl(x):
x = x.asnumpy()
out = vm.avg_pooling(x, self.kernel_size[-2], self.kernel_size[-1], self.strides[-2])
return Tensor(out)
return vm_impl
@vm_impl_getters.register(G.AvgPoolGrad)
def vm_impl_avg_pool_grad(self):
"""Generate vm_impl function for AvgPoolGrad"""
def vm_impl(dout, origin_shape):
dout = dout.asnumpy()
out = vm.avg_pool_grad(dout, origin_shape, self.kernel_size[-2], self.kernel_size[-1], self.strides[-2])
return Tensor(out)
return vm_impl
# pylint: disable=function-redefined
@vm_impl_getters.register(G.BatchNormGrad)
def vm_impl_fused_batch_norm_grad(self):
"""Generate vm_impl function for BatchNormGrad"""
def vm_impl(dy, x, scale, save_mean, save_inv_variance):
dy = dy.asnumpy()
x = x.asnumpy()
scale = scale.asnumpy()
save_mean = save_mean.asnumpy()
save_inv_variance = save_inv_variance.asnumpy()
dx, dscale, dshift = vm.batch_norm_grad(dy, x, scale, save_mean, save_inv_variance)
return (Tensor(dx), Tensor(dscale), Tensor(dshift))
return vm_impl
@vm_impl_getters.register(G.ReluGrad)
def vm_impl_relu_grad(self):
"""Generate vm_impl function for ReluGrad"""
def vm_impl(y_backprop, x):
x = x.asnumpy()
y_backprop = y_backprop.asnumpy()
y_backprop = vm.relu_grad(x.copy()) * y_backprop
return Tensor(y_backprop)
return vm_impl
@vm_impl_getters.register(P.Conv2DBackpropInput)
def vm_impl_conv2d_backprop_input(self):
"""Generate vm_impl function for Conv2DBackpropInput"""
def vm_impl(dout, w, x_size):
dout = dout.asnumpy()
w = w.asnumpy()
dx = vm.conv2d_backprop_input(dout, x_size, w, self.stride, self.pad)
return Tensor(dx)
return vm_impl
@vm_impl_getters.register(G.Conv2DBackpropFilter)
def vm_impl_conv2d_backprop_filter(self):
"""Generate vm_impl function for Conv2DBackpropFilter"""
def vm_impl(dout, x, w_size):
x = x.asnumpy()
dout = dout.asnumpy()
dw = vm.conv2d_backprop_filter(dout, x, w_size, self.stride, self.pad)
return Tensor(dw)
return vm_impl
@vm_impl_getters.register(G.FlattenGrad)
def vm_impl_flatten_grad(self):
"""Generate vm_impl function for FlattenGrad"""
def vm_impl(dout, x):
dout = dout.asnumpy()
dout = vm.flatten_grad(dout, x)
return Tensor(dout)
return vm_impl
@vm_impl_getters.register(P.BiasAdd)
def vm_impl_bias_add(self):
"""Generate vm_impl function for BiasAdd"""
def vm_impl(wx, bias):
wx = wx.asnumpy()
bias = bias.asnumpy()
out = wx + bias
return Tensor(out)
return vm_impl
@vm_impl_getters.register(G.BiasAddGrad)
def vm_impl_bias_add_grad(self):
"""Generate vm_impl function for BiasAddGrad"""
def vm_impl(dout):
dout = dout.asnumpy()
shape = np.shape(dout)
return Tensor(np.add.reduce(dout, axis=tuple(range(len(shape) - 1))))
return vm_impl
@vm_impl_getters.register(P.SoftmaxCrossEntropyWithLogits)
def vm_impl_softmax_cross_entropy_with_logits(self):
"""Generate vm_impl function for SoftmaxCrossEntropyWithLogits"""
def vm_impl(logits, labels):
logits = logits.asnumpy()
labels = labels.asnumpy()
loss, dx = vm.softmax_cross_entropy_with_logits(logits, labels)
return (Tensor(np.array(loss)), Tensor(dx))
return vm_impl
@vm_impl_getters.register(P.SparseSoftmaxCrossEntropyWithLogits)
def vm_impl_sparse_softmax_cross_entropy_with_logits(self):
"""Generate vm_impl function for SparseSoftmaxCrossEntropyWithLogits"""
def vm_impl(logits, labels):
logits = logits.asnumpy()
labels = labels.asnumpy()
n_class = labels.max() + 1
n_sample = labels.shape[0]
one_hot_label = np.zeros((n_sample, n_class)) # 3个样本,4个类别
one_hot_label[:, labels] = 1 # 非零列赋值为1
loss, dx = vm.softmax_cross_entropy_with_logits(logits, one_hot_label)
if self.is_grad:
return (Tensor(dx),)
return (Tensor(np.array(loss)),)
return vm_impl
@vm_impl_getters.register(P.ApplyMomentum)
def vm_impl_momentum(self):
"""Generate vm_impl function for Momentum"""
def vm_impl(variable,
accumulation,
learning_rate,
gradient,
momentum,
use_nesterov=False):
gradient = gradient.asnumpy()
accumulation = accumulation.asnumpy()
variable = variable.asnumpy()
shape = accumulation.shape
learning_rate = np.full(shape, learning_rate.asnumpy())
momentum = np.full(shape, momentum.asnumpy())
accumulation = accumulation * momentum + gradient
if use_nesterov:
variable -= gradient * learning_rate + accumulation * momentum * learning_rate
else:
variable -= accumulation * learning_rate
return Tensor(variable)
return vm_impl
@vm_impl_getters.register(P.ResizeBilinear)
def vm_impl_resize_bilinear(self):
"""Generate vm_impl function for ResizeBilinear"""
def vm_impl(x):
out = vm.ResizeBilinear(x)
return Tensor(out)
return vm_impl
@vm_impl_getters.register(G.ResizeBilinearGrad)
def vm_impl_resize_bilinear_grad(self):
"""Generate vm_impl function for ResizeBilinearGrad"""
def vm_impl(dout, original_image):
out = vm.ResizeBilinearGrad(dout, original_image)
return Tensor(out)
return vm_impl
|
b293dbb0d1ed788464c874255b8ef7bb4acba220
|
f324cd2cbebd303fd34cd2e26fe1a51c44202d55
|
/vint/linting/policy/prohibit_invalid_map_call.py
|
841e2d69d1d9a1bc5b88b9d1ddef9aa7d8da004b
|
[
"MIT"
] |
permissive
|
Vimjas/vint
|
d71579154d177daf458ec68423a66055f90fa308
|
e12091830f0ae7311066b9d1417951182fb32eb5
|
refs/heads/master
| 2023-09-02T07:31:31.299270
| 2022-10-24T13:06:33
| 2022-10-24T13:06:33
| 20,857,415
| 191
| 11
|
MIT
| 2022-10-24T13:10:00
| 2014-06-15T14:38:32
|
Python
|
UTF-8
|
Python
| false
| false
| 877
|
py
|
prohibit_invalid_map_call.py
|
from vint.ast.node_type import NodeType
from vint.linting.level import Level
from vint.linting.policy.abstract_policy import AbstractPolicy
from vint.linting.policy_registry import register_policy
@register_policy
class ProhibitInvalidMapCall(AbstractPolicy):
def __init__(self):
super(ProhibitInvalidMapCall, self).__init__()
self.description = 'Number of arguments for map() must be 2 (if not, it will throw E118 or E119)'
self.reference = ':help map()'
self.level = Level.ERROR
def listen_node_types(self):
return [NodeType.CALL]
def is_valid(self, node, lint_context):
left_node = node['left']
if NodeType(left_node['type']) != NodeType.IDENTIFIER:
return True
if left_node['value'] != 'map':
return True
args = node['rlist']
return len(args) == 2
|
82f1b286fbba2300d751191f9c42908692cf48fd
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/py/py/_io/terminalwriter.py
|
442ca2395e0a15e8cddf2da9565d02b2c807faaf
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 14,714
|
py
|
terminalwriter.py
|
"""
Helper functions for writing to terminals and files.
"""
import sys, os, unicodedata
import py
py3k = sys.version_info[0] >= 3
py33 = sys.version_info >= (3, 3)
from py.builtin import text, bytes
win32_and_ctypes = False
colorama = None
if sys.platform == "win32":
try:
import colorama
except ImportError:
try:
import ctypes
win32_and_ctypes = True
except ImportError:
pass
def _getdimensions():
if py33:
import shutil
size = shutil.get_terminal_size()
return size.lines, size.columns
else:
import termios, fcntl, struct
call = fcntl.ioctl(1, termios.TIOCGWINSZ, "\000" * 8)
height, width = struct.unpack("hhhh", call)[:2]
return height, width
def get_terminal_width():
width = 0
try:
_, width = _getdimensions()
except py.builtin._sysex:
raise
except:
# pass to fallback below
pass
if width == 0:
# FALLBACK:
# * some exception happened
# * or this is emacs terminal which reports (0,0)
width = int(os.environ.get('COLUMNS', 80))
# XXX the windows getdimensions may be bogus, let's sanify a bit
if width < 40:
width = 80
return width
terminal_width = get_terminal_width()
char_width = {
'A': 1, # "Ambiguous"
'F': 2, # Fullwidth
'H': 1, # Halfwidth
'N': 1, # Neutral
'Na': 1, # Narrow
'W': 2, # Wide
}
def get_line_width(text):
text = unicodedata.normalize('NFC', text)
return sum(char_width.get(unicodedata.east_asian_width(c), 1) for c in text)
# XXX unify with _escaped func below
def ansi_print(text, esc, file=None, newline=True, flush=False):
if file is None:
file = sys.stderr
text = text.rstrip()
if esc and not isinstance(esc, tuple):
esc = (esc,)
if esc and sys.platform != "win32" and file.isatty():
text = (''.join(['\x1b[%sm' % cod for cod in esc]) +
text +
'\x1b[0m') # ANSI color code "reset"
if newline:
text += '\n'
if esc and win32_and_ctypes and file.isatty():
if 1 in esc:
bold = True
esc = tuple([x for x in esc if x != 1])
else:
bold = False
esctable = {() : FOREGROUND_WHITE, # normal
(31,): FOREGROUND_RED, # red
(32,): FOREGROUND_GREEN, # green
(33,): FOREGROUND_GREEN|FOREGROUND_RED, # yellow
(34,): FOREGROUND_BLUE, # blue
(35,): FOREGROUND_BLUE|FOREGROUND_RED, # purple
(36,): FOREGROUND_BLUE|FOREGROUND_GREEN, # cyan
(37,): FOREGROUND_WHITE, # white
(39,): FOREGROUND_WHITE, # reset
}
attr = esctable.get(esc, FOREGROUND_WHITE)
if bold:
attr |= FOREGROUND_INTENSITY
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
if file is sys.stderr:
handle = GetStdHandle(STD_ERROR_HANDLE)
else:
handle = GetStdHandle(STD_OUTPUT_HANDLE)
oldcolors = GetConsoleInfo(handle).wAttributes
attr |= (oldcolors & 0x0f0)
SetConsoleTextAttribute(handle, attr)
while len(text) > 32768:
file.write(text[:32768])
text = text[32768:]
if text:
file.write(text)
SetConsoleTextAttribute(handle, oldcolors)
else:
file.write(text)
if flush:
file.flush()
def should_do_markup(file):
if os.environ.get('PY_COLORS') == '1':
return True
if os.environ.get('PY_COLORS') == '0':
return False
if 'NO_COLOR' in os.environ:
return False
return hasattr(file, 'isatty') and file.isatty() \
and os.environ.get('TERM') != 'dumb' \
and not (sys.platform.startswith('java') and os._name == 'nt')
class TerminalWriter(object):
_esctable = dict(black=30, red=31, green=32, yellow=33,
blue=34, purple=35, cyan=36, white=37,
Black=40, Red=41, Green=42, Yellow=43,
Blue=44, Purple=45, Cyan=46, White=47,
bold=1, light=2, blink=5, invert=7)
# XXX deprecate stringio argument
def __init__(self, file=None, stringio=False, encoding=None):
if file is None:
if stringio:
self.stringio = file = py.io.TextIO()
else:
from sys import stdout as file
elif py.builtin.callable(file) and not (
hasattr(file, "write") and hasattr(file, "flush")):
file = WriteFile(file, encoding=encoding)
if hasattr(file, "isatty") and file.isatty() and colorama:
file = colorama.AnsiToWin32(file).stream
self.encoding = encoding or getattr(file, 'encoding', "utf-8")
self._file = file
self.hasmarkup = should_do_markup(file)
self._lastlen = 0
self._chars_on_current_line = 0
self._width_of_current_line = 0
@property
def fullwidth(self):
if hasattr(self, '_terminal_width'):
return self._terminal_width
return get_terminal_width()
@fullwidth.setter
def fullwidth(self, value):
self._terminal_width = value
@property
def chars_on_current_line(self):
"""Return the number of characters written so far in the current line.
Please note that this count does not produce correct results after a reline() call,
see #164.
.. versionadded:: 1.5.0
:rtype: int
"""
return self._chars_on_current_line
@property
def width_of_current_line(self):
"""Return an estimate of the width so far in the current line.
.. versionadded:: 1.6.0
:rtype: int
"""
return self._width_of_current_line
def _escaped(self, text, esc):
if esc and self.hasmarkup:
text = (''.join(['\x1b[%sm' % cod for cod in esc]) +
text +'\x1b[0m')
return text
def markup(self, text, **kw):
esc = []
for name in kw:
if name not in self._esctable:
raise ValueError("unknown markup: %r" %(name,))
if kw[name]:
esc.append(self._esctable[name])
return self._escaped(text, tuple(esc))
def sep(self, sepchar, title=None, fullwidth=None, **kw):
if fullwidth is None:
fullwidth = self.fullwidth
# the goal is to have the line be as long as possible
# under the condition that len(line) <= fullwidth
if sys.platform == "win32":
# if we print in the last column on windows we are on a
# new line but there is no way to verify/neutralize this
# (we may not know the exact line width)
# so let's be defensive to avoid empty lines in the output
fullwidth -= 1
if title is not None:
# we want 2 + 2*len(fill) + len(title) <= fullwidth
# i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth
# 2*len(sepchar)*N <= fullwidth - len(title) - 2
# N <= (fullwidth - len(title) - 2) // (2*len(sepchar))
N = max((fullwidth - len(title) - 2) // (2*len(sepchar)), 1)
fill = sepchar * N
line = "%s %s %s" % (fill, title, fill)
else:
# we want len(sepchar)*N <= fullwidth
# i.e. N <= fullwidth // len(sepchar)
line = sepchar * (fullwidth // len(sepchar))
# in some situations there is room for an extra sepchar at the right,
# in particular if we consider that with a sepchar like "_ " the
# trailing space is not important at the end of the line
if len(line) + len(sepchar.rstrip()) <= fullwidth:
line += sepchar.rstrip()
self.line(line, **kw)
def write(self, msg, **kw):
if msg:
if not isinstance(msg, (bytes, text)):
msg = text(msg)
self._update_chars_on_current_line(msg)
if self.hasmarkup and kw:
markupmsg = self.markup(msg, **kw)
else:
markupmsg = msg
write_out(self._file, markupmsg)
def _update_chars_on_current_line(self, text_or_bytes):
newline = b'\n' if isinstance(text_or_bytes, bytes) else '\n'
current_line = text_or_bytes.rsplit(newline, 1)[-1]
if isinstance(current_line, bytes):
current_line = current_line.decode('utf-8', errors='replace')
if newline in text_or_bytes:
self._chars_on_current_line = len(current_line)
self._width_of_current_line = get_line_width(current_line)
else:
self._chars_on_current_line += len(current_line)
self._width_of_current_line += get_line_width(current_line)
def line(self, s='', **kw):
self.write(s, **kw)
self._checkfill(s)
self.write('\n')
def reline(self, line, **kw):
if not self.hasmarkup:
raise ValueError("cannot use rewrite-line without terminal")
self.write(line, **kw)
self._checkfill(line)
self.write('\r')
self._lastlen = len(line)
def _checkfill(self, line):
diff2last = self._lastlen - len(line)
if diff2last > 0:
self.write(" " * diff2last)
class Win32ConsoleWriter(TerminalWriter):
def write(self, msg, **kw):
if msg:
if not isinstance(msg, (bytes, text)):
msg = text(msg)
self._update_chars_on_current_line(msg)
oldcolors = None
if self.hasmarkup and kw:
handle = GetStdHandle(STD_OUTPUT_HANDLE)
oldcolors = GetConsoleInfo(handle).wAttributes
default_bg = oldcolors & 0x00F0
attr = default_bg
if kw.pop('bold', False):
attr |= FOREGROUND_INTENSITY
if kw.pop('red', False):
attr |= FOREGROUND_RED
elif kw.pop('blue', False):
attr |= FOREGROUND_BLUE
elif kw.pop('green', False):
attr |= FOREGROUND_GREEN
elif kw.pop('yellow', False):
attr |= FOREGROUND_GREEN|FOREGROUND_RED
else:
attr |= oldcolors & 0x0007
SetConsoleTextAttribute(handle, attr)
write_out(self._file, msg)
if oldcolors:
SetConsoleTextAttribute(handle, oldcolors)
class WriteFile(object):
def __init__(self, writemethod, encoding=None):
self.encoding = encoding
self._writemethod = writemethod
def write(self, data):
if self.encoding:
data = data.encode(self.encoding, "replace")
self._writemethod(data)
def flush(self):
return
if win32_and_ctypes:
TerminalWriter = Win32ConsoleWriter
import ctypes
from ctypes import wintypes
# ctypes access to the Windows console
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
FOREGROUND_BLACK = 0x0000 # black text
FOREGROUND_BLUE = 0x0001 # text color contains blue.
FOREGROUND_GREEN = 0x0002 # text color contains green.
FOREGROUND_RED = 0x0004 # text color contains red.
FOREGROUND_WHITE = 0x0007
FOREGROUND_INTENSITY = 0x0008 # text color is intensified.
BACKGROUND_BLACK = 0x0000 # background color black
BACKGROUND_BLUE = 0x0010 # background color contains blue.
BACKGROUND_GREEN = 0x0020 # background color contains green.
BACKGROUND_RED = 0x0040 # background color contains red.
BACKGROUND_WHITE = 0x0070
BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
SHORT = ctypes.c_short
class COORD(ctypes.Structure):
_fields_ = [('X', SHORT),
('Y', SHORT)]
class SMALL_RECT(ctypes.Structure):
_fields_ = [('Left', SHORT),
('Top', SHORT),
('Right', SHORT),
('Bottom', SHORT)]
class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
_fields_ = [('dwSize', COORD),
('dwCursorPosition', COORD),
('wAttributes', wintypes.WORD),
('srWindow', SMALL_RECT),
('dwMaximumWindowSize', COORD)]
_GetStdHandle = ctypes.windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [wintypes.DWORD]
_GetStdHandle.restype = wintypes.HANDLE
def GetStdHandle(kind):
return _GetStdHandle(kind)
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
SetConsoleTextAttribute.argtypes = [wintypes.HANDLE, wintypes.WORD]
SetConsoleTextAttribute.restype = wintypes.BOOL
_GetConsoleScreenBufferInfo = \
ctypes.windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [wintypes.HANDLE,
ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
def GetConsoleInfo(handle):
info = CONSOLE_SCREEN_BUFFER_INFO()
_GetConsoleScreenBufferInfo(handle, ctypes.byref(info))
return info
def _getdimensions():
handle = GetStdHandle(STD_OUTPUT_HANDLE)
info = GetConsoleInfo(handle)
# Substract one from the width, otherwise the cursor wraps
# and the ending \n causes an empty line to display.
return info.dwSize.Y, info.dwSize.X - 1
def write_out(fil, msg):
# XXX sometimes "msg" is of type bytes, sometimes text which
# complicates the situation. Should we try to enforce unicode?
try:
# on py27 and above writing out to sys.stdout with an encoding
# should usually work for unicode messages (if the encoding is
# capable of it)
fil.write(msg)
except UnicodeEncodeError:
# on py26 it might not work because stdout expects bytes
if fil.encoding:
try:
fil.write(msg.encode(fil.encoding))
except UnicodeEncodeError:
# it might still fail if the encoding is not capable
pass
else:
fil.flush()
return
# fallback: escape all unicode characters
msg = msg.encode("unicode-escape").decode("ascii")
fil.write(msg)
fil.flush()
|
21df02922e76b9bb18f2acb224ae93f097829660
|
854b94d7be92582bd191a7cb63143a95e5b5c337
|
/hyfetch/distros/glaucus.py
|
26447f33053782ece2c4c2dcd2407acd8f78b572
|
[
"MIT"
] |
permissive
|
hykilpikonna/hyfetch
|
673c0c999d0f3f542349824495ad6004f450ebac
|
98863df16d70b030696f4b94080d114396320f35
|
refs/heads/master
| 2023-08-17T10:41:10.289997
| 2023-08-17T03:37:23
| 2023-08-17T03:37:23
| 479,913,941
| 447
| 78
|
MIT
| 2023-09-14T14:39:18
| 2022-04-10T04:38:15
|
Shell
|
UTF-8
|
Python
| false
| false
| 580
|
py
|
glaucus.py
|
# This file is automatically generated. Please do not modify.
from . import AsciiArt
glaucus = AsciiArt(match=r'''"glaucus"*''', color='5', ascii=r"""
${c1} ,, ,d88P
,d8P ,ad8888*
,888P d88888* ,,ad8888P*
d d888P a88888P* ,ad8888888*
.d8 d8888: d888888* ,d888888P*
.888; 88888b d8888888b8888888P
d8888J888888a88888888888888P* ,d
88888888888888888888888888P ,,d8*
888888888888888888888888888888888*
*8888888888888888888888888888888*
Y888888888P* `*``*888888888888*
*^888^* *Y888P**
""")
|
09311e18e7d4d6f6f8b03ed8a3e58350a34f15c7
|
827b0c8c48407a1c4a4f89bacd4afcbe1be1dc83
|
/tests/unit/utils/test_populate.py
|
8865076cf69580b39184d9392c09d985669aedd2
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
flaskbb/flaskbb
|
154ffa6476d094594c77f2fca28447b264b34779
|
bc999f1b9baf129dc06126940880a01ac94ba405
|
refs/heads/master
| 2023-08-22T19:47:34.877376
| 2023-03-13T06:51:20
| 2023-03-13T06:51:20
| 12,751,570
| 1,443
| 448
|
NOASSERTION
| 2023-07-26T22:36:21
| 2013-09-11T08:39:17
|
Python
|
UTF-8
|
Python
| false
| false
| 6,052
|
py
|
test_populate.py
|
import pytest
from sqlalchemy.exc import OperationalError
from sqlalchemy_utils.functions import create_database, drop_database
from flaskbb.extensions import alembic, db
from flaskbb.utils.populate import delete_settings_from_fixture, \
create_settings_from_fixture, update_settings_from_fixture, \
create_default_groups, create_test_data, insert_bulk_data, \
create_welcome_forum, create_user
from flaskbb.fixtures.groups import fixture as group_fixture
from flaskbb.fixtures.settings import fixture as settings_fixture
from flaskbb.user.models import Group, User
from flaskbb.forum.models import Category, Topic, Post
from flaskbb.management.models import Setting, SettingsGroup
def _individual_settings(update_result):
"""Helper that returns the number of settings that were updated."""
return sum(
len(settings_in_a_group)
for settings_in_a_group in update_result.values()
)
def test_delete_settings_from_fixture(default_settings):
groups_count = SettingsGroup.query.count()
assert len(settings_fixture) == groups_count
deleted = delete_settings_from_fixture(settings_fixture)
assert len(settings_fixture) == len(deleted)
assert not SettingsGroup.query.count()
assert not Setting.query.count()
def test_create_settings_from_fixture(database):
assert not SettingsGroup.query.count()
assert not Setting.query.count()
created = create_settings_from_fixture(settings_fixture)
assert len(settings_fixture) == len(created)
assert SettingsGroup.query.count() == len(created)
def test_update_settings_from_fixture(database):
settings_fixture_group_count = len(settings_fixture)
settings_fixture_setting_count = sum(
len(settings_fixture[k][1]['settings'])
for k in range(len(settings_fixture))
)
assert not SettingsGroup.query.count()
assert not Setting.query.count()
# No force-overwrite - the fixtures will be created because they
# do not exist.
updated = update_settings_from_fixture(settings_fixture)
assert settings_fixture_group_count == len(updated)
assert settings_fixture_group_count == SettingsGroup.query.count()
assert settings_fixture_setting_count == _individual_settings(updated)
assert settings_fixture_setting_count == Setting.query.count()
def test_update_settings_from_fixture_overwrite(database, default_settings,
updated_fixture):
# should add groups: testgroup
# should add testgroup/monty_python, general/test_fixture
pre_update_group_count = SettingsGroup.query.count()
pre_update_setting_count = Setting.query.count()
updated = update_settings_from_fixture(updated_fixture)
assert len(updated) == 2
assert _individual_settings(updated) == 2
assert pre_update_group_count + 1 == SettingsGroup.query.count()
assert pre_update_setting_count + 2 == Setting.query.count()
def test_update_settings_from_fixture_force(database, default_settings,
updated_fixture):
# force-overwrite - nothing changed so nothing should happen here
pre_update_group_count = SettingsGroup.query.count()
pre_update_setting_count = Setting.query.count()
force_updated = update_settings_from_fixture(settings_fixture,
overwrite_group=True,
overwrite_setting=True)
assert len(force_updated) == 0
assert _individual_settings(force_updated) == 0
assert pre_update_group_count == SettingsGroup.query.count()
assert pre_update_setting_count == Setting.query.count()
# should update groups: general
# should update settings: 2 in general, 1 in testgroup
force_updated_1 = update_settings_from_fixture(updated_fixture,
overwrite_group=True,
overwrite_setting=True)
assert len(force_updated_1) == 2
assert _individual_settings(force_updated_1) == 3
assert pre_update_group_count + 1 == SettingsGroup.query.count()
assert pre_update_setting_count + 2 == Setting.query.count()
def test_create_user(default_groups):
user = User.query.filter_by(username="admin").first()
assert not user
user = create_user(username="admin", password="test",
email="test@example.org", groupname="admin")
assert user.username == "admin"
assert user.permissions["admin"]
def test_create_welcome_forum(default_groups):
assert not create_welcome_forum()
create_user(username="admin", password="test",
email="test@example.org", groupname="admin")
assert create_welcome_forum()
def test_create_test_data(database):
assert Category.query.count() == 0
data_created = create_test_data()
assert Category.query.count() == data_created['categories']
def test_insert_bulk_data(database):
assert not insert_bulk_data(topic_count=1, post_count=1)
create_test_data(categories=1, forums=1, topics=0)
assert Topic.query.count() == 0
topics, posts = insert_bulk_data(topic_count=1, post_count=1)
assert Topic.query.count() == topics
# -1 bc the topic post also counts as post
assert Post.query.count() - 1 == posts
def test_create_default_groups(database):
"""Test that the default groups are created correctly."""
assert Group.query.count() == 0
create_default_groups()
assert Group.query.count() == len(group_fixture)
for key, attributes in group_fixture.items():
group = Group.query.filter_by(name=key).first()
for attribute, value in attributes.items():
assert getattr(group, attribute) == value
def test_migrations_upgrade():
with pytest.raises(OperationalError):
User.query.all()
# ensure that the database is created
create_database(db.engine.url)
alembic.upgrade()
assert len(User.query.all()) == 0
drop_database(db.engine.url)
|
687b2ba8667fcf20d8ab43c60c3627d5ae3f8209
|
bdfd36be0da37a335cabefd41db9df44c31f3b42
|
/scripts/test_vistas_single_gpu.py
|
466ff642687b497aa2dfb4bb22480094f8acf041
|
[
"BSD-3-Clause",
"HPND",
"CC-BY-NC-SA-4.0"
] |
permissive
|
mapillary/inplace_abn
|
54435a7b688da075e37d36d4c2fc09df43a9983d
|
d7dd3e1f22164083734ad6ac42365a4cc99a5051
|
refs/heads/main
| 2023-08-19T19:46:05.503129
| 2023-01-03T10:31:21
| 2023-01-03T10:31:21
| 111,797,719
| 1,377
| 216
|
BSD-3-Clause
| 2023-07-17T08:13:53
| 2017-11-23T10:47:45
|
Python
|
UTF-8
|
Python
| false
| false
| 10,677
|
py
|
test_vistas_single_gpu.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
from functools import partial
from os import path
import models
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as functional
from dataset.dataset import SegmentationDataset, segmentation_collate
from dataset.transform import SegmentationTransform
from inplace_abn import InPlaceABN
from modules import DeeplabV3
from PIL import Image, ImagePalette
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
parser = argparse.ArgumentParser(
description="Testing script for the Vistas segmentation model"
)
parser.add_argument(
"--scales", metavar="LIST", type=str, default="[0.7, 1, 1.2]", help="List of scales"
)
parser.add_argument("--flip", action="store_true", help="Use horizontal flipping")
parser.add_argument(
"--fusion-mode",
metavar="NAME",
type=str,
choices=["mean", "voting", "max"],
default="mean",
help="How to fuse the outputs. Options: 'mean', 'voting', 'max'",
)
parser.add_argument(
"--output-mode",
metavar="NAME",
type=str,
choices=["palette", "raw", "prob"],
default="final",
help="How the output files are formatted."
" -- palette: color coded predictions"
" -- raw: gray-scale predictions"
" -- prob: gray-scale predictions plus probabilities",
)
parser.add_argument(
"snapshot", metavar="SNAPSHOT_FILE", type=str, help="Snapshot file to load"
)
parser.add_argument("data", metavar="IN_DIR", type=str, help="Path to dataset")
parser.add_argument("output", metavar="OUT_DIR", type=str, help="Path to output folder")
parser.add_argument(
"--world-size", metavar="WS", type=int, default=1, help="Number of GPUs"
)
parser.add_argument("--rank", metavar="RANK", type=int, default=0, help="GPU id")
def flip(x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(
x.size(dim) - 1, -1, -1, dtype=torch.long, device=x.device
)
return x[tuple(indices)]
class SegmentationModule(nn.Module):
_IGNORE_INDEX = 255
class _MeanFusion:
def __init__(self, x, classes):
self.buffer = x.new_zeros(x.size(0), classes, x.size(2), x.size(3))
self.counter = 0
def update(self, sem_logits):
probs = functional.softmax(sem_logits, dim=1)
self.counter += 1
self.buffer.add_((probs - self.buffer) / self.counter)
def output(self):
probs, cls = self.buffer.max(1)
return probs, cls
class _VotingFusion:
def __init__(self, x, classes):
self.votes = x.new_zeros(x.size(0), classes, x.size(2), x.size(3))
self.probs = x.new_zeros(x.size(0), classes, x.size(2), x.size(3))
def update(self, sem_logits):
probs = functional.softmax(sem_logits, dim=1)
probs, cls = probs.max(1, keepdim=True)
self.votes.scatter_add_(1, cls, self.votes.new_ones(cls.size()))
self.probs.scatter_add_(1, cls, probs)
def output(self):
cls, idx = self.votes.max(1, keepdim=True)
probs = self.probs / self.votes.clamp(min=1)
probs = probs.gather(1, idx)
return probs.squeeze(1), cls.squeeze(1)
class _MaxFusion:
def __init__(self, x, _):
self.buffer_cls = x.new_zeros(
x.size(0), x.size(2), x.size(3), dtype=torch.long
)
self.buffer_prob = x.new_zeros(x.size(0), x.size(2), x.size(3))
def update(self, sem_logits):
probs = functional.softmax(sem_logits, dim=1)
max_prob, max_cls = probs.max(1)
replace_idx = max_prob > self.buffer_prob
self.buffer_cls[replace_idx] = max_cls[replace_idx]
self.buffer_prob[replace_idx] = max_prob[replace_idx]
def output(self):
return self.buffer_prob, self.buffer_cls
def __init__(self, body, head, head_channels, classes, fusion_mode="mean"):
super(SegmentationModule, self).__init__()
self.body = body
self.head = head
self.cls = nn.Conv2d(head_channels, classes, 1)
self.classes = classes
if fusion_mode == "mean":
self.fusion_cls = SegmentationModule._MeanFusion
elif fusion_mode == "voting":
self.fusion_cls = SegmentationModule._VotingFusion
elif fusion_mode == "max":
self.fusion_cls = SegmentationModule._MaxFusion
def _network(self, x, scale):
if scale != 1:
scaled_size = [round(s * scale) for s in x.shape[-2:]]
x_up = functional.upsample(x, size=scaled_size, mode="bilinear")
else:
x_up = x
x_up = self.body(x_up)
x_up = self.head(x_up)
sem_logits = self.cls(x_up)
del x_up
return sem_logits
def forward(self, x, scales, do_flip=True):
out_size = x.shape[-2:]
fusion = self.fusion_cls(x, self.classes)
for scale in scales:
# Main orientation
sem_logits = self._network(x, scale)
sem_logits = functional.upsample(sem_logits, size=out_size, mode="bilinear")
fusion.update(sem_logits)
# Flipped orientation
if do_flip:
# Main orientation
sem_logits = self._network(flip(x, -1), scale)
sem_logits = functional.upsample(
sem_logits, size=out_size, mode="bilinear"
)
fusion.update(flip(sem_logits, -1))
return fusion.output()
def main():
# Load configuration
args = parser.parse_args()
# Torch stuff
torch.cuda.set_device(args.rank)
cudnn.benchmark = True
# Create model by loading a snapshot
body, head, cls_state = load_snapshot(args.snapshot)
model = SegmentationModule(body, head, 256, 65, args.fusion_mode)
model.cls.load_state_dict(cls_state)
model = model.cuda().eval()
print(model)
# Create data loader
transformation = SegmentationTransform(
2048,
(0.41738699, 0.45732192, 0.46886091),
(0.25685097, 0.26509955, 0.29067996),
)
dataset = SegmentationDataset(args.data, transformation)
data_loader = DataLoader(
dataset,
batch_size=1,
pin_memory=True,
sampler=DistributedSampler(dataset, args.world_size, args.rank),
num_workers=2,
collate_fn=segmentation_collate,
shuffle=False,
)
# Run testing
scales = eval(args.scales)
with torch.no_grad():
for batch_i, rec in enumerate(data_loader):
print("Testing batch [{:3d}/{:3d}]".format(batch_i + 1, len(data_loader)))
img = rec["img"].cuda(non_blocking=True)
probs, preds = model(img, scales, args.flip)
for i, (prob, pred) in enumerate(
zip(torch.unbind(probs, dim=0), torch.unbind(preds, dim=0))
):
out_size = rec["meta"][i]["size"]
img_name = rec["meta"][i]["idx"]
# Save prediction
prob = prob.cpu()
pred = pred.cpu()
pred_img = get_pred_image(pred, out_size, args.output_mode == "palette")
pred_img.save(path.join(args.output, img_name + ".png"))
# Optionally save probabilities
if args.output_mode == "prob":
prob_img = get_prob_image(prob, out_size)
prob_img.save(path.join(args.output, img_name + "_prob.png"))
def load_snapshot(snapshot_file):
"""Load a training snapshot"""
print("--- Loading model from snapshot")
# Create network
norm_act = partial(InPlaceABN, activation="leaky_relu", activation_param=0.01)
body = models.__dict__["net_wider_resnet38_a2"](
norm_act=norm_act, dilation=(1, 2, 4, 4)
)
head = DeeplabV3(4096, 256, 256, norm_act=norm_act, pooling_size=(84, 84))
# Load snapshot and recover network state
data = torch.load(snapshot_file)
body.load_state_dict(data["state_dict"]["body"])
head.load_state_dict(data["state_dict"]["head"])
return body, head, data["state_dict"]["cls"]
_PALETTE = np.array(
[
[165, 42, 42],
[0, 192, 0],
[196, 196, 196],
[190, 153, 153],
[180, 165, 180],
[90, 120, 150],
[102, 102, 156],
[128, 64, 255],
[140, 140, 200],
[170, 170, 170],
[250, 170, 160],
[96, 96, 96],
[230, 150, 140],
[128, 64, 128],
[110, 110, 110],
[244, 35, 232],
[150, 100, 100],
[70, 70, 70],
[150, 120, 90],
[220, 20, 60],
[255, 0, 0],
[255, 0, 100],
[255, 0, 200],
[200, 128, 128],
[255, 255, 255],
[64, 170, 64],
[230, 160, 50],
[70, 130, 180],
[190, 255, 255],
[152, 251, 152],
[107, 142, 35],
[0, 170, 30],
[255, 255, 128],
[250, 0, 30],
[100, 140, 180],
[220, 220, 220],
[220, 128, 128],
[222, 40, 40],
[100, 170, 30],
[40, 40, 40],
[33, 33, 33],
[100, 128, 160],
[142, 0, 0],
[70, 100, 150],
[210, 170, 100],
[153, 153, 153],
[128, 128, 128],
[0, 0, 80],
[250, 170, 30],
[192, 192, 192],
[220, 220, 0],
[140, 140, 20],
[119, 11, 32],
[150, 0, 255],
[0, 60, 100],
[0, 0, 142],
[0, 0, 90],
[0, 0, 230],
[0, 80, 100],
[128, 64, 64],
[0, 0, 110],
[0, 0, 70],
[0, 0, 192],
[32, 32, 32],
[120, 10, 10],
],
dtype=np.uint8,
)
_PALETTE = np.concatenate(
[_PALETTE, np.zeros((256 - _PALETTE.shape[0], 3), dtype=np.uint8)], axis=0
)
_PALETTE = ImagePalette.ImagePalette(
palette=list(_PALETTE[:, 0]) + list(_PALETTE[:, 1]) + list(_PALETTE[:, 2]),
mode="RGB",
)
def get_pred_image(tensor, out_size, with_palette):
tensor = tensor.numpy()
if with_palette:
img = Image.fromarray(tensor.astype(np.uint8), mode="P")
img.putpalette(_PALETTE)
else:
img = Image.fromarray(tensor.astype(np.uint8), mode="L")
return img.resize(out_size, Image.NEAREST)
def get_prob_image(tensor, out_size):
tensor = (tensor * 255).to(torch.uint8)
img = Image.fromarray(tensor.numpy(), mode="L")
return img.resize(out_size, Image.NEAREST)
if __name__ == "__main__":
main()
|
9f97c36955087957dd2342471e0de2f39e5a1886
|
5e601244fbf32ee5190fb5210a0cd334473a0abe
|
/projects/backupOps/FileSystem/clean-old-backups-with-given-directory.py
|
41f543a2f6744cf135a1eff9e915504961ad6f1f
|
[] |
no_license
|
DingGuodong/LinuxBashShellScriptForOps
|
69ebe45cf3f92b741a078b9b78c2600328ce9b9e
|
b2ca1e4c870626dd078d447e2d1479b08602bdf6
|
refs/heads/master
| 2023-08-21T20:53:40.617397
| 2023-07-17T01:41:05
| 2023-07-17T01:41:05
| 57,015,255
| 453
| 343
| null | 2023-02-16T01:29:23
| 2016-04-25T05:55:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,962
|
py
|
clean-old-backups-with-given-directory.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created by PyCharm.
File Name: LinuxBashShellScriptForOps:clean-old-backups-with-given-directory.py
Version: 0.0.1
Author: Guodong
Author Email: dgdenterprise@gmail.com
URL: https://github.com/DingGuodong/LinuxBashShellScriptForOps
Download URL: https://github.com/DingGuodong/LinuxBashShellScriptForOps/tarball/master
Create Date: 2019/3/12
Create Time: 15:12
Description: clean old backups with given directory
Long Description:
References:
Prerequisites: pip install -U python-dateutil
Development Status: 3 - Alpha, 5 - Production/Stable
Environment: Console
Intended Audience: System Administrators, Developers, End Users/Desktop
License: Freeware, Freely Distributable
Natural Language: English, Chinese (Simplified)
Operating System: POSIX :: Linux, Microsoft :: Windows
Programming Language: Python :: 2.6
Programming Language: Python :: 2.7
Topic: Utilities
"""
import datetime
import os
import time
from dateutil.relativedelta import relativedelta
def to_unicode_or_bust(obj, encoding='utf-8'):
"""
convert non-unicode object to unicode object
:param obj: str object or unicode
:param encoding:
:return:
"""
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding)
return obj
def to_str_or_bust(obj, encoding='utf-8'):
"""
convert unicode object to str object
:param obj: unicode object or str
:param encoding:
:return:
"""
if isinstance(obj, basestring):
if isinstance(obj, unicode):
obj = obj.encode(encoding)
return obj
def clean_old_backups(path, ext=u"bak", days=30):
"""
clean old backups with given directory, return counts of files deleted
:param path: backup directory
:param ext: extension of backup file
:param days: days of backup saves
:return:
"""
path = to_unicode_or_bust(path)
if not os.path.exists(path):
raise RuntimeError("Error: cannot access \'%s\': No such file or directory" % path)
timestamp_before_save_days = time.mktime((datetime.datetime.today() + relativedelta(days=-days)).timetuple())
count_removed = 0
for top, dirs, nondirs in os.walk(path):
for filename in nondirs:
if filename.endswith(ext):
filepath = os.path.join(top, filename)
if os.path.getmtime(filepath) < timestamp_before_save_days:
count_removed += 1
os.remove(filepath)
return count_removed
if __name__ == '__main__':
backup_source = r'D:\Microsoft SQL Server Backup'
backup_extension = u"bak"
save_days = 30
print(clean_old_backups(backup_source, backup_extension, save_days))
|
0e680eac96e231f8fe998fca2007965515ba463e
|
bca44c32b850c297d45136c54b93882a144e87d3
|
/tests/integration/abi_router_test.py
|
8f53db916c211498bf69fee999649840787dcf43
|
[
"MIT"
] |
permissive
|
algorand/pyteal
|
b7f541ee6484f6bac94bc528ffa429196d8cd79a
|
670e637644630534883b4c2e6837ab34c56546b6
|
refs/heads/master
| 2023-06-11T04:37:33.645398
| 2023-06-06T20:18:01
| 2023-06-06T20:18:01
| 212,449,337
| 282
| 134
|
MIT
| 2023-06-06T18:15:23
| 2019-10-02T22:00:39
|
Python
|
UTF-8
|
Python
| false
| false
| 18,022
|
py
|
abi_router_test.py
|
import json
import re
from collections import defaultdict
from dataclasses import asdict
from pathlib import Path
import pytest
from graviton.abi_strategy import (
ABIArgsMod,
RandomABIStrategy,
RandomABIStrategyHalfSized,
)
from graviton.blackbox import DryRunEncoder
from graviton.invariant import DryRunProperty as DRProp
from graviton.invariant import PredicateKind
import pyteal as pt
from pyteal.compiler.compiler_test import router_app_tester
from tests.blackbox import (
CLEAR_STATE_CALL,
ABICallConfigs,
Predicates,
RouterCallType,
RouterSimulation,
negate_cc,
)
NUM_ROUTER_DRYRUNS = 7
FIXTURES = Path.cwd() / "tests" / "teal" / "router"
ALL_ROUTER_CASES, ROUTER_SOURCES = router_app_tester()
ROUTER_CASES, NONTRIV_CLEAR_ROUTER_CASES = ALL_ROUTER_CASES[:-2], ALL_ROUTER_CASES[-2:]
TYPICAL_IAC_OC = pt.MethodConfig(no_op=pt.CallConfig.CALL)
# TEST DRIVERS LEGEND - combines method_configs + predicates
# * @0 - method: RouterCallType
# method == None indicates bare app call
# method == CLEAR_STATE_CALL indicates clear state app call
#
# * @1 - method_config: MethodConfig
# defines how to call the method
#
# * @2 - predicates: Predicates ~ dict[DRProp, Any]
# these are being asserted after being processed into Invariant's
#
# NOTE: the "yacc" routers will simply ignore the case with method `None`
# as they do not have any bare-app-calls
QUESTIONABLE_DRIVER: list[tuple[RouterCallType, pt.MethodConfig, Predicates]] = [
(
"add",
TYPICAL_IAC_OC,
{DRProp.passed: True, DRProp.lastLog: lambda args: args[1] + args[2]},
),
(
"sub",
TYPICAL_IAC_OC,
{
DRProp.passed: lambda args: args[1] >= args[2],
DRProp.lastLog: (
lambda args, actual: True
if args[1] < args[2]
else actual == args[1] - args[2]
),
},
),
(
"mul",
TYPICAL_IAC_OC,
{DRProp.passed: True, DRProp.lastLog: lambda args: args[1] * args[2]},
),
(
"div",
TYPICAL_IAC_OC,
{DRProp.passed: True, DRProp.lastLog: lambda args: args[1] // args[2]},
),
(
"mod",
TYPICAL_IAC_OC,
{DRProp.passed: True, DRProp.lastLog: lambda args: args[1] % args[2]},
),
(
"all_laid_to_args",
TYPICAL_IAC_OC,
{DRProp.passed: True, DRProp.lastLog: lambda args: sum(args[1:])},
),
(
"empty_return_subroutine",
pt.MethodConfig(
no_op=pt.CallConfig.CALL,
opt_in=pt.CallConfig.ALL,
),
{
DRProp.passed: True,
DRProp.lastLog: DryRunEncoder.hex(
"appear in both approval and clear state"
),
},
),
(
"log_1",
pt.MethodConfig(
no_op=pt.CallConfig.CALL,
opt_in=pt.CallConfig.CALL,
# clear_state=pt.CallConfig.CALL,
),
{DRProp.passed: True, DRProp.lastLog: 1},
),
(
"log_creation",
pt.MethodConfig(no_op=pt.CallConfig.CREATE),
{DRProp.passed: True, DRProp.lastLog: "logging creation"},
),
(
None,
pt.MethodConfig(
opt_in=pt.CallConfig.CALL,
),
{
DRProp.passed: True,
DRProp.lastLog: lambda _, actual: actual
in (None, DryRunEncoder.hex("optin call")),
},
),
(
CLEAR_STATE_CALL,
pt.MethodConfig(), # ignored in this case
{
DRProp.passed: True,
DRProp.cost: 2,
},
),
]
YACC_DRIVER = [case for case in QUESTIONABLE_DRIVER if case[0]]
DRIVERS = {
"questionable": QUESTIONABLE_DRIVER,
"yacc": YACC_DRIVER,
"nontriv_clear": QUESTIONABLE_DRIVER,
}
def split_driver2predicates_methconfigs(driver) -> tuple[Predicates, ABICallConfigs]:
predicates = {}
methconfigs = {}
for meth, meth_config, predicate in driver:
predicates[meth] = predicate
if meth != CLEAR_STATE_CALL:
methconfigs[meth] = meth_config
return predicates, methconfigs
def assert_full_method_coverage(router, methconfigs):
assert len(methconfigs) == len(rmc := router.method_configs)
for meth_sig, meth_config in rmc.items():
k = meth_sig
if k:
k = meth_sig.split("(")[0]
assert k in methconfigs, f"{k=} (derived from {meth_sig=} not in methconfigs"
assert meth_config == methconfigs[k]
@pytest.mark.parametrize("case, version, router", ROUTER_CASES)
def test_abi_router_positive(case, version, router):
"""
Test the _positive_ version of a case. In other words, ensure that for each
router encountered and its driver, iterate through the driver as follows:
* consider each method or bare call
* consider each (OnComplete, CallConfig) combination
* assert that all predicates hold for this call
"""
driver = DRIVERS[case]
predicates, methconfigs = split_driver2predicates_methconfigs(driver)
assert_full_method_coverage(router, methconfigs)
rsim = RouterSimulation(router, predicates)
def msg():
return f"""test_abi_router_positive()
{case=}
{version=}
{router.name=}"""
results = rsim.simulate_and_assert(
approval_args_strat_type=RandomABIStrategyHalfSized,
clear_args_strat_type_or_inputs=RandomABIStrategy,
approval_abi_args_mod=None,
version=version,
method_configs=methconfigs,
num_dryruns=NUM_ROUTER_DRYRUNS,
msg=msg(),
)
# won't even get here if there was an error, but some extra sanity checks:
assert (sim_results := results.results) and all(
sim.succeeded for meth in sim_results.values() for sim in meth.values()
)
print("\nstats:", json.dumps(stats := results.stats, indent=2))
assert stats and all(stats.values())
# TODO: add these assertions after the flakiness of issue #199 is fixed for good
# These fail because of differing scratch slot assignments:
# pregen_approval, pregen_clear = ROUTER_SOURCES[(case, version)]
# assert pregen_clear == results.clear_simulator.simulate_dre.program
# assert pregen_approval == results.approval_simulator.simulate_dre.program
# cf. https://death.andgravity.com/f-re for an explanation of verbose regex'es
EXPECTED_ERR_PATTERN = r"""
err\ opcode # pyteal generated err's ok
| assert\ failed\ pc= # pyteal generated assert's ok
| invalid\ ApplicationArgs\ index # failing because an app arg wasn't provided
| extraction\ end\ 16\ is\ beyond\ length # failing because couldn't extract when omitted final arg or jammed in tuple
"""
APPROVAL_NEGATIVE_PREDS = {
DRProp.rejected: True,
DRProp.error: True,
DRProp.errorMessage: lambda _, actual: (
bool(re.search(EXPECTED_ERR_PATTERN, actual, re.VERBOSE))
),
}
CLEAR_NEGATIVE_INVARIANTS_MUST_APPROVE = [
inv for m, _, inv in QUESTIONABLE_DRIVER if m == CLEAR_STATE_CALL
][0]
def scenario_assert_stats(scenario, results, totals):
part_a = f"""
SCENARIO: {scenario}
"""
if results:
part_b = json.dumps(stats := results.stats, indent=2)
assert stats and all(stats.values())
for k, v in stats.items():
if isinstance(v, int):
totals[k] += v
else:
part_b = "SKIPPED"
print(f"{part_a}stats:", part_b)
@pytest.mark.parametrize("case, version, router", ROUTER_CASES)
def test_abi_router_negative(case, version, router):
totals = defaultdict(int)
contract = router.contract_construct()
driver = DRIVERS[case]
pos_predicates, pos_mconfigs = split_driver2predicates_methconfigs(driver)
# assert FULL coverage (before modifying the dict):
assert_full_method_coverage(router, pos_mconfigs)
if None not in pos_mconfigs:
pos_mconfigs[None] = pt.MethodConfig()
pos_predicates[None] = APPROVAL_NEGATIVE_PREDS
pure_meth_mconfigs = {
meth: methconfig
for meth, methconfig in pos_mconfigs.items()
if meth is not None
}
neg_predicates = {
meth: (
APPROVAL_NEGATIVE_PREDS
if meth != CLEAR_STATE_CALL
else CLEAR_NEGATIVE_INVARIANTS_MUST_APPROVE
)
for meth in pos_predicates
}
rsim = RouterSimulation(router, neg_predicates)
def msg():
return f"""test_abi_router_negative()
{scenario=}
{case=}
{version=}
{router.name=}"""
scenario = "I. explore all UNEXPECTED (is_app_create, on_complete) combos"
# NOTE: We're NOT including clear_state calls for the approval program
# as they would never be applied.
# Also, we're ONLY including clear_state for the clear program.
neg_mconfigs = {
meth: pt.MethodConfig(
**{k: negate_cc(v) for k, v in asdict(mc).items() if k != "clear_state"}
)
for meth, mc in pos_mconfigs.items()
}
results = rsim.simulate_and_assert(
approval_args_strat_type=RandomABIStrategyHalfSized,
clear_args_strat_type_or_inputs=RandomABIStrategy,
approval_abi_args_mod=None,
version=version,
method_configs=neg_mconfigs,
num_dryruns=NUM_ROUTER_DRYRUNS,
executor_validation=False,
msg=msg(),
)
# won't even get here if there was an error, but some extra sanity checks:
assert (sim_results := results.results) and all(
sim.succeeded for meth in sim_results.values() for sim in meth.values()
)
scenario_assert_stats(scenario, results, totals)
# II. the case of bare-app-calls
scenario = "II. adding an argument to a bare app call"
if None in pos_mconfigs and not pos_mconfigs[None].is_never():
bare_only_methconfigs = {None: pos_mconfigs[None]}
results = rsim.simulate_and_assert(
approval_args_strat_type=RandomABIStrategyHalfSized,
clear_args_strat_type_or_inputs=None,
approval_abi_args_mod=ABIArgsMod.parameter_append,
version=version,
method_configs=bare_only_methconfigs,
omit_clear_call=True,
num_dryruns=NUM_ROUTER_DRYRUNS,
executor_validation=False,
msg=msg(),
)
assert (sim_results := results.results) and all(
sim.succeeded for meth in sim_results.values() for sim in meth.values()
)
scenario_assert_stats(scenario, results, totals)
else:
scenario_assert_stats(scenario, None, totals)
# For the rest, we may assume method calls (i.e. non bare-app calls)
# III. explore changing method selector arg[0] by edit distance 1
# NOTE: We don't test the case of adding an argument to method calls
# because the SDK's will guard against this case.
# However, we should re-think this assumption.
# Cf. https://github.com/algorand/go-algorand-internal/issues/2772
# Cf. https://github.com/algorand/algorand-sdk-testing/issues/190
scenario = "III(a). inserting an extra random byte into method selector"
results = rsim.simulate_and_assert(
approval_args_strat_type=RandomABIStrategyHalfSized,
clear_args_strat_type_or_inputs=None,
approval_abi_args_mod=ABIArgsMod.selector_byte_insert,
version=version,
method_configs=pure_meth_mconfigs,
omit_clear_call=True,
num_dryruns=NUM_ROUTER_DRYRUNS,
executor_validation=False,
msg=msg(),
)
assert (sim_results := results.results) and all(
sim.succeeded for meth in sim_results.values() for sim in meth.values()
)
scenario_assert_stats(scenario, results, totals)
scenario = "III(b). removing a random byte from method selector"
results = rsim.simulate_and_assert(
approval_args_strat_type=RandomABIStrategyHalfSized,
clear_args_strat_type_or_inputs=None,
approval_abi_args_mod=ABIArgsMod.selector_byte_delete,
version=version,
method_configs=pure_meth_mconfigs,
omit_clear_call=True,
num_dryruns=NUM_ROUTER_DRYRUNS,
executor_validation=False,
msg=msg(),
)
assert (sim_results := results.results) and all(
sim.succeeded for meth in sim_results.values() for sim in meth.values()
)
scenario_assert_stats(scenario, results, totals)
scenario = "III(c). replacing a random byte in method selector"
results = rsim.simulate_and_assert(
approval_args_strat_type=RandomABIStrategyHalfSized,
clear_args_strat_type_or_inputs=None,
approval_abi_args_mod=ABIArgsMod.selector_byte_replace,
version=version,
method_configs=pure_meth_mconfigs,
omit_clear_call=True,
num_dryruns=NUM_ROUTER_DRYRUNS,
executor_validation=False,
msg=msg(),
)
assert (sim_results := results.results) and all(
sim.succeeded for meth in sim_results.values() for sim in meth.values()
)
scenario_assert_stats(scenario, results, totals)
# IV. explore changing the number of args over the 'good' call_types
# NOTE: We don't test the case of adding an argument to method calls
# We also remove methods with 0 arguments, as these degenerate to the
# already tested bare-app call case.
scenario = "IV. removing the final argument"
atleast_one_param_mconfigs = {
meth: mconfig
for meth, mconfig in pure_meth_mconfigs.items()
if len(contract.get_method_by_name(meth).args) > 0
}
results = rsim.simulate_and_assert(
approval_args_strat_type=RandomABIStrategyHalfSized,
clear_args_strat_type_or_inputs=None,
approval_abi_args_mod=ABIArgsMod.parameter_delete,
version=version,
method_configs=atleast_one_param_mconfigs,
omit_clear_call=True,
num_dryruns=NUM_ROUTER_DRYRUNS,
executor_validation=False,
msg=msg(),
)
assert (sim_results := results.results) and all(
sim.succeeded for meth in sim_results.values() for sim in meth.values()
)
scenario_assert_stats(scenario, results, totals)
print("SUMMARY STATS: ", json.dumps(totals, indent=2))
IDENTITY_PREDICATES = {
DRProp.lastLog: PredicateKind.IdenticalPair,
DRProp.status: PredicateKind.IdenticalPair,
DRProp.error: PredicateKind.IdenticalPair,
}
def test_nontriv_clear():
totals = defaultdict(int)
questionable = [
r for name, ver, r in ALL_ROUTER_CASES if name == "questionable" and ver == 6
][0]
nontriv_clear = [
r for name, ver, r in ALL_ROUTER_CASES if name == "nontriv_clear" and ver == 6
][0]
_, mconfigs = split_driver2predicates_methconfigs(DRIVERS["nontriv_clear"])
predicates = {meth: IDENTITY_PREDICATES for meth in mconfigs}
rsim_nt_vs_q = RouterSimulation(
nontriv_clear, predicates, model_router=questionable
)
# Sanity check the approval programs (_POSITIVE_ cases only):
msg = "APPROVAL nontriv@v6 vs. questionable@v8"
version = 6
results = rsim_nt_vs_q.simulate_and_assert(
approval_args_strat_type=RandomABIStrategyHalfSized,
clear_args_strat_type_or_inputs=None,
approval_abi_args_mod=None,
version=version,
method_configs=mconfigs,
num_dryruns=NUM_ROUTER_DRYRUNS,
omit_clear_call=True,
model_version=8,
msg=msg,
)
assert (sim_results := results.results) and all(
sim.succeeded for meth in sim_results.values() for sim in meth.values()
)
scenario_assert_stats(msg, results, totals)
msg = "APPROVAL nontriv@v8 vs. questionable@v8"
version = 8
results = rsim_nt_vs_q.simulate_and_assert(
approval_args_strat_type=RandomABIStrategyHalfSized,
clear_args_strat_type_or_inputs=None,
approval_abi_args_mod=None,
version=version,
method_configs=mconfigs,
num_dryruns=NUM_ROUTER_DRYRUNS,
omit_clear_call=True,
model_version=8,
msg=msg,
)
assert (sim_results := results.results) and all(
sim.succeeded for meth in sim_results.values() for sim in meth.values()
)
scenario_assert_stats(msg, results, totals)
print("PARTIAL SUMMARY STATS: ", json.dumps(totals, indent=2))
# Finally, a bespoke test for the non-trivial clear program:
bespoke = {
DRProp.passed: {
(): True,
(b"random bytes",): True,
(b"CLEANUP",): True,
(b"CLEANUP", b"random bytes"): True,
(b"CLEANUP", b"ABORTING"): False,
(b"CLEANUP", b"ABORTING", b"random bytes"): False,
}
}
inputs = list(bespoke[DRProp.passed].keys())
clear_preds = {CLEAR_STATE_CALL: bespoke}
msg = "CLEAR nontriv@v6"
version = 6
clear_rsim = RouterSimulation(nontriv_clear, clear_preds)
results = clear_rsim.simulate_and_assert(
approval_args_strat_type=None,
clear_args_strat_type_or_inputs=inputs,
approval_abi_args_mod=None,
version=version,
method_configs=None,
msg=msg,
omit_approval_call=True,
skip_validation=True,
)
assert (sim_results := results.results) and all(
sim.succeeded for meth in sim_results.values() for sim in meth.values()
)
scenario_assert_stats(msg, results, totals)
msg = "CLEAR nontriv@v8"
version = 8
clear_rsim = RouterSimulation(nontriv_clear, clear_preds)
results = clear_rsim.simulate_and_assert(
approval_args_strat_type=None,
clear_args_strat_type_or_inputs=inputs,
approval_abi_args_mod=None,
version=version,
method_configs=None,
msg=msg,
omit_approval_call=True,
skip_validation=True,
)
assert (sim_results := results.results) and all(
sim.succeeded for meth in sim_results.values() for sim in meth.values()
)
scenario_assert_stats(msg, results, totals)
|
a5c4d3b024ac01d48d1aef2ee75f160b82f44d91
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/glm/all/conanfile.py
|
b86499dcc4c3d22b03fd3343a895e81fce5ccd63
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,157
|
py
|
conanfile.py
|
from conan import ConanFile
from conan.tools.files import copy, get, load, save
from conan.tools.layout import basic_layout
from conan.tools.scm import Version
import os
required_conan_version = ">=1.50.0"
class GlmConan(ConanFile):
name = "glm"
description = "OpenGL Mathematics (GLM)"
topics = ("glm", "opengl", "mathematics")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/g-truc/glm"
license = "MIT"
settings = "os", "arch", "compiler", "build_type"
no_copy_source = True
def layout(self):
basic_layout(self, src_folder="src")
def package_id(self):
self.info.clear()
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def build(self):
pass
def package(self):
glm_version = self.version if self.version.startswith("cci") else Version(self._get_semver())
if glm_version == "0.9.8" or (glm_version == "0.9.9" and self._get_tweak_number() < 6):
save(self, os.path.join(self.package_folder, "licenses", "copying.txt"), self._get_license())
else:
copy(self, "copying.txt", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
for headers in ("*.hpp", "*.inl", "*.h"):
copy(self, headers, src=os.path.join(self.source_folder, "glm"),
dst=os.path.join(self.package_folder, "include", "glm"))
def _get_semver(self):
return self.version.rsplit(".", 1)[0]
def _get_tweak_number(self):
return int(self.version.rsplit(".", 1)[-1])
def _get_license(self):
manual = load(self, os.path.join(self.source_folder, "manual.md"))
begin = manual.find("### The Happy Bunny License (Modified MIT License)")
end = manual.find("\n", begin)
return manual[begin:end]
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "glm")
self.cpp_info.set_property("cmake_target_name", "glm::glm")
self.cpp_info.bindirs = []
self.cpp_info.libdirs = []
|
32222f79ef1bb75de54fb174b131ebb60ae81626
|
2dc24a356ebe7a362623780603379a5b35a65c2f
|
/terraform/stacks/bot/lambdas/python/slack_automation_bot/slack_bolt/authorization/authorize_result.py
|
0a80a3d7a2301cfaaa205836ee86d5ffd05163d1
|
[
"MIT"
] |
permissive
|
cloud-sniper/cloud-sniper
|
cef08402f9109211c33909bdb3de07b16952e308
|
4b026da33695b25033c7667679f3cf552c4bf3b5
|
refs/heads/master
| 2023-06-24T20:46:02.377409
| 2023-04-14T14:48:45
| 2023-04-14T14:48:45
| 210,739,453
| 184
| 36
|
MIT
| 2023-04-14T14:48:46
| 2019-09-25T02:34:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,457
|
py
|
authorize_result.py
|
from typing import Optional
from slack_sdk.web import SlackResponse
class AuthorizeResult(dict):
enterprise_id: Optional[str]
team_id: Optional[str]
bot_id: Optional[str]
bot_user_id: Optional[str]
bot_token: Optional[str]
user_id: Optional[str]
user_token: Optional[str]
def __init__(
self,
*,
enterprise_id: Optional[str],
team_id: Optional[str],
# bot
bot_user_id: Optional[str] = None,
bot_id: Optional[str] = None,
bot_token: Optional[str] = None,
# user
user_id: Optional[str] = None,
user_token: Optional[str] = None,
):
"""The `auth.test` API result for an incoming request.
:param enterprise_id: Organization ID (Enterprise Grid)
:param team_id: Workspace ID
:param bot_user_id: Bot user's User ID
:param bot_id: Bot ID
:param bot_token: Bot user access token starting with xoxb-
:param user_id: The request user ID
:param user_token: User access token starting with xoxp-
"""
self["enterprise_id"] = self.enterprise_id = enterprise_id
self["team_id"] = self.team_id = team_id
# bot
self["bot_user_id"] = self.bot_user_id = bot_user_id
self["bot_id"] = self.bot_id = bot_id
self["bot_token"] = self.bot_token = bot_token
# user
self["user_id"] = self.user_id = user_id
self["user_token"] = self.user_token = user_token
@classmethod
def from_auth_test_response(
cls,
*,
bot_token: Optional[str] = None,
user_token: Optional[str] = None,
auth_test_response: SlackResponse,
) -> "AuthorizeResult":
bot_user_id: Optional[str] = ( # type:ignore
auth_test_response.get("user_id")
if auth_test_response.get("bot_id") is not None
else None
)
user_id: Optional[str] = ( # type:ignore
auth_test_response.get("user_id")
if auth_test_response.get("bot_id") is None
else None
)
return AuthorizeResult(
enterprise_id=auth_test_response.get("enterprise_id"),
team_id=auth_test_response.get("team_id"),
bot_id=auth_test_response.get("bot_id"),
bot_user_id=bot_user_id,
user_id=user_id,
bot_token=bot_token,
user_token=user_token,
)
|
f23a94616b251db5b40294705a1e9ba0202d5ab0
|
a8ca3225e24c8b093056ce6baa1db6ba3aea8f97
|
/tutorials/12-seismic/plot_fwd_1_tomography_2D.py
|
c5a1fdff7acfa6371bbd197103a5b6e73763cc47
|
[
"MIT"
] |
permissive
|
simpeg/simpeg
|
3e8779392d7b26fe576a7a665205068989d8f4d8
|
ebde5856c318f7b4deb92d755b4fefe19012c48e
|
refs/heads/main
| 2023-09-03T18:49:03.545965
| 2023-08-27T15:45:50
| 2023-08-27T15:45:50
| 14,727,320
| 437
| 268
|
MIT
| 2023-09-10T18:16:22
| 2013-11-26T19:46:36
|
Python
|
UTF-8
|
Python
| false
| false
| 5,673
|
py
|
plot_fwd_1_tomography_2D.py
|
"""
Forward Simulation for Straight Ray Tomography in 2D
====================================================
Here we module *SimPEG.seismic.straight_ray_tomography* to predict arrival
time data for a synthetic velocity/slowness model. In this tutorial, we focus on the following:
- How to define the survey
- How to define the forward simulation
- How to predict arrival time data
"""
#########################################################################
# Import Modules
# --------------
#
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from discretize import TensorMesh
from SimPEG import maps
from SimPEG.seismic import straight_ray_tomography as tomo
from SimPEG.utils import model_builder
save_file = False
#############################################
# Defining the Survey
# -------------------
#
# Here, we define survey that will be used for the forward simulation. The survey
# consists of a horizontal line of point receivers at Y = 100 m and a horizontal
# line of point sources at Y = -100 m. The shot by each source is measured by
# all receivers.
#
# Define the locations for the sources and receivers.
x = np.linspace(-100, 100, 11)
y_receivers = 100 * np.ones(len(x))
y_sources = -100 * np.ones(len(x))
receiver_locations = np.c_[x, y_receivers]
source_locations = np.c_[x, y_sources]
# Define the list of receivers used by each source
receiver_list = [tomo.Rx(receiver_locations)]
# Define an empty list to store sources objects. Define each source and
# provide its corresponding receivers list
source_list = []
for ii in range(0, len(y_sources)):
source_list.append(
tomo.Src(location=source_locations[ii, :], receiver_list=receiver_list)
)
# Define they tomography survey
survey = tomo.Survey(source_list)
#############################################
# Defining a Tensor Mesh
# ----------------------
#
# Here, we create the tensor mesh that will be used to predict arrival time
# data.
#
dh = 10.0 # cell width
N = 21 # number of cells in X and Y direction
hx = [(dh, N)]
hy = [(dh, N)]
mesh = TensorMesh([hx, hy], "CC")
########################################################
# Model and Mapping on Tensor Mesh
# --------------------------------
#
# Here, we create the velocity model that will be used to predict the data. Since
# the physical parameter for straight ray tomography is slowness, we must define
# a mapping which converts velocity values to slowness values. The model
# consists of a lower velocity block within a higher velocity background.
#
# Define velocity of each unit in m/s
background_velocity = 3000.0
block_velocity = 1500.0
# Define the model. Models in SimPEG are vector arrays.
model = background_velocity * np.ones(mesh.nC)
ind_block = model_builder.getIndicesBlock(np.r_[-50, 20], np.r_[50, -20], mesh.gridCC)
model[ind_block] = block_velocity
# Define a mapping from the model (velocity) to the slowness. If your model
# consists of slowness values, you can use *maps.IdentityMap*.
model_mapping = maps.ReciprocalMap()
# Plot Velocity Model
fig = plt.figure(figsize=(6, 5.5))
ax1 = fig.add_axes([0.15, 0.15, 0.65, 0.75])
mesh.plot_image(model, ax=ax1, grid=True, pcolor_opts={"cmap": "viridis"})
ax1.set_xlabel("x (m)")
ax1.set_ylabel("y (m)")
ax1.plot(x, y_sources, "ro") # source locations
ax1.plot(x, y_receivers, "ko") # receiver locations
ax2 = fig.add_axes([0.82, 0.15, 0.05, 0.75])
norm = mpl.colors.Normalize(vmin=np.min(model), vmax=np.max(model))
cbar = mpl.colorbar.ColorbarBase(
ax2, norm=norm, orientation="vertical", cmap=mpl.cm.viridis
)
cbar.set_label("$Velocity (m/s)$", rotation=270, labelpad=15, size=12)
#######################################################
# Simulation: Arrival Time
# ------------------------
#
# Here we demonstrate how to predict arrival time data for the 2D straight
# ray tomography problem using the 2D Integral formulation.
#
# Define the forward simulation. To do this we need the mesh, the survey and
# the mapping from the model to the slowness values on the mesh.
simulation = tomo.Simulation(mesh, survey=survey, slownessMap=model_mapping)
# Compute predicted data for some model
dpred = simulation.dpred(model)
#######################################################
# Plotting
# -----------------------------------------------
#
n_source = len(source_list)
n_receiver = len(x)
dpred_plotting = dpred.reshape(n_receiver, n_source)
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
obs_string = []
for ii in range(0, n_source):
ax.plot(x, dpred_plotting[:, ii])
obs_string.append("source {}".format(ii + 1))
ax.set_xlim(np.min(x), np.max(x))
ax.set_xlabel("x (m)")
ax.set_ylabel("arrival time (s)")
ax.set_title("Positions vs. Arrival Time")
ax.legend(obs_string, loc="upper right")
#######################################################
# Optional: Exporting Results
# ---------------------------
#
# Write the data and true model
#
if save_file:
dir_path = os.path.dirname(tomo.__file__).split(os.path.sep)[:-3]
dir_path.extend(["tutorials", "seismic", "assets"])
dir_path = os.path.sep.join(dir_path) + os.path.sep
noise = 0.05 * dpred * np.random.rand(len(dpred))
data_array = np.c_[
np.kron(x, np.ones(n_receiver)),
np.kron(y_sources, np.ones(n_receiver)),
np.kron(np.ones(n_source), x),
np.kron(np.ones(n_source), y_receivers),
dpred + noise,
]
fname = dir_path + "tomography2D_data.obs"
np.savetxt(fname, data_array, fmt="%.4e")
output_model = model
fname = dir_path + "true_model_2D.txt"
np.savetxt(fname, output_model, fmt="%.4e")
|
9b5f47143deb98d37f88d255ba01197179517ea9
|
9b1eda0abdc5dea7c6e9695ff4e1098abe0a708b
|
/src/textual/drivers/headless_driver.py
|
695a0c196b83e310f9df8b7bf9a2b02979edb2c4
|
[
"MIT"
] |
permissive
|
Textualize/textual
|
b8cf4b5d18069fccc7623b3116436f479e1ef446
|
b74ac1e47fdd16133ca567390c99ea19de278c5a
|
refs/heads/main
| 2023-08-30T21:40:21.563823
| 2023-08-30T10:18:27
| 2023-08-30T10:18:27
| 355,959,597
| 14,818
| 588
|
MIT
| 2023-09-14T20:22:02
| 2021-04-08T15:24:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,871
|
py
|
headless_driver.py
|
from __future__ import annotations
import asyncio
from .. import events
from ..driver import Driver
from ..geometry import Size
class HeadlessDriver(Driver):
"""A do-nothing driver for testing."""
@property
def is_headless(self) -> bool:
"""Is the driver running in 'headless' mode?"""
return True
def _get_terminal_size(self) -> tuple[int, int]:
if self._size is not None:
return self._size
width: int | None = 80
height: int | None = 25
import shutil
try:
width, height = shutil.get_terminal_size()
except (AttributeError, ValueError, OSError):
try:
width, height = shutil.get_terminal_size()
except (AttributeError, ValueError, OSError):
pass
width = width or 80
height = height or 25
return width, height
def write(self, data: str) -> None:
"""Write data to the output device.
Args:
data: Raw data.
"""
# Nothing to write as this is a headless driver.
def start_application_mode(self) -> None:
"""Start application mode."""
loop = asyncio.get_running_loop()
def send_size_event() -> None:
"""Send first resize event."""
terminal_size = self._get_terminal_size()
width, height = terminal_size
textual_size = Size(width, height)
event = events.Resize(textual_size, textual_size)
asyncio.run_coroutine_threadsafe(
self._app._post_message(event),
loop=loop,
)
send_size_event()
def disable_input(self) -> None:
"""Disable further input."""
def stop_application_mode(self) -> None:
"""Stop application mode, restore state."""
# Nothing to do
|
8819e39189bf7e8c9406d3e345989ebd952ac9c5
|
c46754b9600a12df4f9d7a6320dfc19aa96b1e1d
|
/tests/models/bark/test_modeling_bark.py
|
6fc4cb58a639361b55a74f459d0949dae73af843
|
[
"Apache-2.0"
] |
permissive
|
huggingface/transformers
|
ccd52a0d7c59e5f13205f32fd96f55743ebc8814
|
4fa0aff21ee083d0197a898cdf17ff476fae2ac3
|
refs/heads/main
| 2023-09-05T19:47:38.981127
| 2023-09-05T19:21:33
| 2023-09-05T19:21:33
| 155,220,641
| 102,193
| 22,284
|
Apache-2.0
| 2023-09-14T20:44:49
| 2018-10-29T13:56:00
|
Python
|
UTF-8
|
Python
| false
| false
| 43,123
|
py
|
test_modeling_bark.py
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch Bark model. """
import copy
import inspect
import tempfile
import unittest
from transformers import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
is_torch_available,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ..encodec.test_modeling_encodec import EncodecModelTester
if is_torch_available():
import torch
from transformers import (
BarkCausalModel,
BarkCoarseModel,
BarkFineModel,
BarkModel,
BarkProcessor,
BarkSemanticModel,
)
class BarkSemanticModelTester:
def __init__(
self,
parent,
batch_size=2,
seq_length=4,
is_training=False, # for now training is not supported
use_input_mask=True,
use_labels=True,
vocab_size=33,
output_vocab_size=33,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=2,
intermediate_size=15,
dropout=0.1,
window_size=256,
initializer_range=0.02,
n_codes_total=8, # for BarkFineModel
n_codes_given=1, # for BarkFineModel
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.output_vocab_size = output_vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.window_size = window_size
self.initializer_range = initializer_range
self.bos_token_id = output_vocab_size - 1
self.eos_token_id = output_vocab_size - 1
self.pad_token_id = output_vocab_size - 1
self.n_codes_total = n_codes_total
self.n_codes_given = n_codes_given
self.is_encoder_decoder = False
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
inputs_dict = {
"input_ids": input_ids,
"head_mask": head_mask,
"attention_mask": input_mask,
}
return config, inputs_dict
def get_config(self):
return BarkSemanticConfig(
vocab_size=self.vocab_size,
output_vocab_size=self.output_vocab_size,
hidden_size=self.hidden_size,
num_layers=self.num_hidden_layers,
num_heads=self.num_attention_heads,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
window_size=self.window_size,
)
def get_pipeline_config(self):
config = self.get_config()
config.vocab_size = 300
config.output_vocab_size = 300
return config
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = BarkSemanticModel(config=config).to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["logits"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"logits"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
# test no attention_mask works
outputs = model(input_ids, use_cache=True)
_, past_key_values = outputs.to_tuple()
output_from_no_past = model(next_input_ids)["logits"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["logits"]
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
class BarkCoarseModelTester:
def __init__(
self,
parent,
batch_size=2,
seq_length=4,
is_training=False, # for now training is not supported
use_input_mask=True,
use_labels=True,
vocab_size=33,
output_vocab_size=33,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=2,
intermediate_size=15,
dropout=0.1,
window_size=256,
initializer_range=0.02,
n_codes_total=8, # for BarkFineModel
n_codes_given=1, # for BarkFineModel
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.output_vocab_size = output_vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.window_size = window_size
self.initializer_range = initializer_range
self.bos_token_id = output_vocab_size - 1
self.eos_token_id = output_vocab_size - 1
self.pad_token_id = output_vocab_size - 1
self.n_codes_total = n_codes_total
self.n_codes_given = n_codes_given
self.is_encoder_decoder = False
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
inputs_dict = {
"input_ids": input_ids,
"head_mask": head_mask,
"attention_mask": input_mask,
}
return config, inputs_dict
def get_config(self):
return BarkCoarseConfig(
vocab_size=self.vocab_size,
output_vocab_size=self.output_vocab_size,
hidden_size=self.hidden_size,
num_layers=self.num_hidden_layers,
num_heads=self.num_attention_heads,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
window_size=self.window_size,
)
def get_pipeline_config(self):
config = self.get_config()
config.vocab_size = 300
config.output_vocab_size = 300
return config
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = BarkCoarseModel(config=config).to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["logits"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"logits"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
# test no attention_mask works
outputs = model(input_ids, use_cache=True)
_, past_key_values = outputs.to_tuple()
output_from_no_past = model(next_input_ids)["logits"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["logits"]
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
class BarkFineModelTester:
def __init__(
self,
parent,
batch_size=2,
seq_length=4,
is_training=False, # for now training is not supported
use_input_mask=True,
use_labels=True,
vocab_size=33,
output_vocab_size=33,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=2,
intermediate_size=15,
dropout=0.1,
window_size=256,
initializer_range=0.02,
n_codes_total=8, # for BarkFineModel
n_codes_given=1, # for BarkFineModel
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.output_vocab_size = output_vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.window_size = window_size
self.initializer_range = initializer_range
self.bos_token_id = output_vocab_size - 1
self.eos_token_id = output_vocab_size - 1
self.pad_token_id = output_vocab_size - 1
self.n_codes_total = n_codes_total
self.n_codes_given = n_codes_given
self.is_encoder_decoder = False
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length, self.n_codes_total], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
# randint between self.n_codes_given - 1 and self.n_codes_total - 1
codebook_idx = ids_tensor((1,), self.n_codes_total - self.n_codes_given).item() + self.n_codes_given
inputs_dict = {
"codebook_idx": codebook_idx,
"input_ids": input_ids,
"head_mask": head_mask,
"attention_mask": input_mask,
}
return config, inputs_dict
def get_config(self):
return BarkFineConfig(
vocab_size=self.vocab_size,
output_vocab_size=self.output_vocab_size,
hidden_size=self.hidden_size,
num_layers=self.num_hidden_layers,
num_heads=self.num_attention_heads,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
window_size=self.window_size,
)
def get_pipeline_config(self):
config = self.get_config()
config.vocab_size = 300
config.output_vocab_size = 300
return config
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = BarkFineModel(config=config).to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["logits"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"logits"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
# test no attention_mask works
outputs = model(input_ids, use_cache=True)
_, past_key_values = outputs.to_tuple()
output_from_no_past = model(next_input_ids)["logits"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["logits"]
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
class BarkModelTester:
def __init__(
self,
parent,
semantic_kwargs=None,
coarse_acoustics_kwargs=None,
fine_acoustics_kwargs=None,
codec_kwargs=None,
is_training=False, # for now training is not supported
):
if semantic_kwargs is None:
semantic_kwargs = {}
if coarse_acoustics_kwargs is None:
coarse_acoustics_kwargs = {}
if fine_acoustics_kwargs is None:
fine_acoustics_kwargs = {}
if codec_kwargs is None:
codec_kwargs = {}
self.parent = parent
self.semantic_model_tester = BarkSemanticModelTester(parent, **semantic_kwargs)
self.coarse_acoustics_model_tester = BarkCoarseModelTester(parent, **coarse_acoustics_kwargs)
self.fine_acoustics_model_tester = BarkFineModelTester(parent, **fine_acoustics_kwargs)
self.codec_model_tester = EncodecModelTester(parent, **codec_kwargs)
self.is_training = is_training
def prepare_config_and_inputs(self):
# TODO: @Yoach: Preapre `inputs_dict`
inputs_dict = {}
config = self.get_config()
return config, inputs_dict
def get_config(self):
return BarkConfig.from_sub_model_configs(
self.semantic_model_tester.get_config(),
self.coarse_acoustics_model_tester.get_config(),
self.fine_acoustics_model_tester.get_config(),
self.codec_model_tester.get_config(),
)
def get_pipeline_config(self):
config = self.get_config()
# follow the `get_pipeline_config` of the sub component models
config.semantic_config.vocab_size = 300
config.coarse_acoustics_config.vocab_size = 300
config.fine_acoustics_config.vocab_size = 300
config.semantic_config.output_vocab_size = 300
config.coarse_acoustics_config.output_vocab_size = 300
config.fine_acoustics_config.output_vocab_size = 300
return config
def prepare_config_and_inputs_for_common(self):
# TODO: @Yoach
pass
# return config, inputs_dict
# Need this class in oder to create tiny model for `bark`
# TODO (@Yoach) Implement actual test methods
@unittest.skip("So far all tests will fail.")
class BarkModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (BarkModel,) if is_torch_available() else ()
def setUp(self):
self.model_tester = BarkModelTester(self)
self.config_tester = ConfigTester(self, config_class=BarkConfig, n_embd=37)
@require_torch
class BarkSemanticModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (BarkSemanticModel,) if is_torch_available() else ()
all_generative_model_classes = (BarkCausalModel,) if is_torch_available() else ()
is_encoder_decoder = False
fx_compatible = False
test_missing_keys = False
test_pruning = False
test_model_parallel = False
# no model_parallel for now
test_resize_embeddings = True
def setUp(self):
self.model_tester = BarkSemanticModelTester(self)
self.config_tester = ConfigTester(self, config_class=BarkSemanticConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
input_ids = inputs["input_ids"]
del inputs["input_ids"]
wte = model.get_input_embeddings()
inputs["input_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)[0]
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = self.all_generative_model_classes[0](config).eval().to(torch_device)
if torch_device == "cuda":
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
@require_torch
class BarkCoarseModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
# Same tester as BarkSemanticModelTest, except for model_class and config_class
all_model_classes = (BarkCoarseModel,) if is_torch_available() else ()
all_generative_model_classes = (BarkCausalModel,) if is_torch_available() else ()
is_encoder_decoder = False
fx_compatible = False
test_missing_keys = False
test_pruning = False
test_model_parallel = False
# no model_parallel for now
test_resize_embeddings = True
def setUp(self):
self.model_tester = BarkCoarseModelTester(self)
self.config_tester = ConfigTester(self, config_class=BarkCoarseConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
input_ids = inputs["input_ids"]
del inputs["input_ids"]
wte = model.get_input_embeddings()
inputs["input_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)[0]
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = self.all_generative_model_classes[0](config).eval().to(torch_device)
if torch_device == "cuda":
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
@require_torch
class BarkFineModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (BarkFineModel,) if is_torch_available() else ()
is_encoder_decoder = False
fx_compatible = False
test_missing_keys = False
test_pruning = False
# no model_parallel for now
test_model_parallel = False
# torchscript disabled for now because forward with an int
test_torchscript = False
test_resize_embeddings = True
def setUp(self):
self.model_tester = BarkFineModelTester(self)
self.config_tester = ConfigTester(self, config_class=BarkFineConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
input_ids = inputs["input_ids"]
del inputs["input_ids"]
wte = model.get_input_embeddings()[inputs_dict["codebook_idx"]]
inputs["input_embeds"] = wte(input_ids[:, :, inputs_dict["codebook_idx"]])
with torch.no_grad():
model(**inputs)[0]
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
# take first codebook channel
model = self.all_model_classes[0](config).eval().to(torch_device)
if torch_device == "cuda":
model.half()
# toy generation_configs
semantic_generation_config = BarkSemanticGenerationConfig(semantic_vocab_size=0)
coarse_generation_config = BarkCoarseGenerationConfig(n_coarse_codebooks=config.n_codes_given)
fine_generation_config = BarkFineGenerationConfig(
max_fine_history_length=config.block_size // 2,
max_fine_input_length=config.block_size,
n_fine_codebooks=config.n_codes_total,
)
codebook_size = config.vocab_size - 1
model.generate(
input_ids,
history_prompt=None,
temperature=None,
semantic_generation_config=semantic_generation_config,
coarse_generation_config=coarse_generation_config,
fine_generation_config=fine_generation_config,
codebook_size=codebook_size,
)
model.generate(
input_ids,
history_prompt=None,
temperature=0.7,
semantic_generation_config=semantic_generation_config,
coarse_generation_config=coarse_generation_config,
fine_generation_config=fine_generation_config,
codebook_size=codebook_size,
)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["codebook_idx", "input_ids"]
self.assertListEqual(arg_names[:2], expected_arg_names)
def test_model_common_attributes(self):
# one embedding layer per codebook
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings()[0], (torch.nn.Embedding))
model.set_input_embeddings(
torch.nn.ModuleList([torch.nn.Embedding(10, 10) for _ in range(config.n_codes_total)])
)
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x[0], torch.nn.Linear))
def test_resize_tokens_embeddings(self):
# resizing tokens_embeddings of a ModuleList
original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
if self.model_tester.is_training is False:
model.eval()
model_vocab_size = config.vocab_size
# Retrieve the embeddings and clone theme
model_embed_list = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings_list = [model_embed.weight.clone() for model_embed in model_embed_list]
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed_list = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix for each codebook
for model_embed, cloned_embeddings in zip(model_embed_list, cloned_embeddings_list):
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed_list = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
for model_embed, cloned_embeddings in zip(model_embed_list, cloned_embeddings_list):
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
# only check for the first embedding matrix
models_equal = True
for p1, p2 in zip(cloned_embeddings_list[0], model_embed_list[0].weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
def test_resize_embeddings_untied(self):
# resizing tokens_embeddings of a ModuleList
original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
original_config.tie_word_embeddings = False
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config).to(torch_device)
# if no output embeddings -> leave test
if model.get_output_embeddings() is None:
continue
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_vocab_size = config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
output_embeds_list = model.get_output_embeddings()
for output_embeds in output_embeds_list:
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
output_embeds_list = model.get_output_embeddings()
for output_embeds in output_embeds_list:
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
@require_torch
class BarkModelIntegrationTests(unittest.TestCase):
@cached_property
def model(self):
return BarkModel.from_pretrained("suno/bark").to(torch_device)
@cached_property
def processor(self):
return BarkProcessor.from_pretrained("suno/bark")
@cached_property
def inputs(self):
input_ids = self.processor("In the light of the moon, a little egg lay on a leaf", voice_preset="en_speaker_6")
input_ids = input_ids.to(torch_device)
return input_ids
@cached_property
def semantic_generation_config(self):
semantic_generation_config = BarkSemanticGenerationConfig(**self.model.generation_config.semantic_config)
return semantic_generation_config
@cached_property
def coarse_generation_config(self):
coarse_generation_config = BarkCoarseGenerationConfig(**self.model.generation_config.coarse_acoustics_config)
return coarse_generation_config
@cached_property
def fine_generation_config(self):
fine_generation_config = BarkFineGenerationConfig(**self.model.generation_config.fine_acoustics_config)
return fine_generation_config
@slow
def test_generate_semantic(self):
input_ids = self.inputs
# fmt: off
# check first ids
expected_output_ids = [7363, 321, 41, 1461, 6915, 952, 326, 41, 41, 927,]
# fmt: on
# greedy decoding
with torch.no_grad():
output_ids = self.model.semantic.generate(
**input_ids,
do_sample=False,
temperature=1.0,
semantic_generation_config=self.semantic_generation_config,
)
self.assertListEqual(output_ids[0, : len(expected_output_ids)].tolist(), expected_output_ids)
@slow
def test_generate_coarse(self):
input_ids = self.inputs
history_prompt = input_ids["history_prompt"]
# fmt: off
# check first ids
expected_output_ids = [11018, 11391, 10651, 11418, 10857, 11620, 10642, 11366, 10312, 11528, 10531, 11516, 10474, 11051, 10524, 11051, ]
# fmt: on
with torch.no_grad():
output_ids = self.model.semantic.generate(
**input_ids,
do_sample=False,
temperature=1.0,
semantic_generation_config=self.semantic_generation_config,
)
output_ids = self.model.coarse_acoustics.generate(
output_ids,
history_prompt=history_prompt,
do_sample=False,
temperature=1.0,
semantic_generation_config=self.semantic_generation_config,
coarse_generation_config=self.coarse_generation_config,
codebook_size=self.model.generation_config.codebook_size,
)
self.assertListEqual(output_ids[0, : len(expected_output_ids)].tolist(), expected_output_ids)
@slow
def test_generate_fine(self):
input_ids = self.inputs
history_prompt = input_ids["history_prompt"]
# fmt: off
expected_output_ids = [
[1018, 651, 857, 642, 312, 531, 474, 524, 524, 776,],
[367, 394, 596, 342, 504, 492, 27, 27, 822, 822,],
[961, 955, 221, 955, 955, 686, 939, 939, 479, 176,],
[638, 365, 218, 944, 853, 363, 639, 22, 884, 456,],
[302, 912, 524, 38, 174, 209, 879, 23, 910, 227,],
[440, 673, 861, 666, 372, 558, 49, 172, 232, 342,],
[244, 358, 123, 356, 586, 520, 499, 877, 542, 637,],
[806, 685, 905, 848, 803, 810, 921, 208, 625, 203,],
]
# fmt: on
with torch.no_grad():
output_ids = self.model.semantic.generate(
**input_ids,
do_sample=False,
temperature=1.0,
semantic_generation_config=self.semantic_generation_config,
)
output_ids = self.model.coarse_acoustics.generate(
output_ids,
history_prompt=history_prompt,
do_sample=False,
temperature=1.0,
semantic_generation_config=self.semantic_generation_config,
coarse_generation_config=self.coarse_generation_config,
codebook_size=self.model.generation_config.codebook_size,
)
# greedy decoding
output_ids = self.model.fine_acoustics.generate(
output_ids,
history_prompt=history_prompt,
temperature=None,
semantic_generation_config=self.semantic_generation_config,
coarse_generation_config=self.coarse_generation_config,
fine_generation_config=self.fine_generation_config,
codebook_size=self.model.generation_config.codebook_size,
)
self.assertListEqual(output_ids[0, :, : len(expected_output_ids[0])].tolist(), expected_output_ids)
@slow
def test_generate_end_to_end(self):
input_ids = self.inputs
with torch.no_grad():
self.model.generate(**input_ids)
self.model.generate(**{key: val for (key, val) in input_ids.items() if key != "history_prompt"})
@slow
def test_generate_end_to_end_with_args(self):
input_ids = self.inputs
with torch.no_grad():
self.model.generate(**input_ids, do_sample=True, temperature=0.6, penalty_alpha=0.6)
self.model.generate(**input_ids, do_sample=True, temperature=0.6, num_beams=4)
@slow
def test_generate_end_to_end_with_sub_models_args(self):
input_ids = self.inputs
with torch.no_grad():
self.model.generate(
**input_ids, do_sample=False, temperature=1.0, coarse_do_sample=True, coarse_temperature=0.7
)
self.model.generate(
**input_ids,
do_sample=False,
temperature=1.0,
coarse_do_sample=True,
coarse_temperature=0.7,
fine_temperature=0.3,
)
self.model.generate(
**input_ids,
do_sample=True,
temperature=0.6,
penalty_alpha=0.6,
semantic_temperature=0.9,
coarse_temperature=0.2,
fine_temperature=0.1,
)
@require_torch_gpu
@slow
def test_generate_end_to_end_with_offload(self):
input_ids = self.inputs
with torch.no_grad():
# standard generation
output_with_no_offload = self.model.generate(**input_ids, do_sample=False, temperature=1.0)
torch.cuda.empty_cache()
memory_before_offload = torch.cuda.memory_allocated()
model_memory_footprint = self.model.get_memory_footprint()
# activate cpu offload
self.model.enable_cpu_offload()
memory_after_offload = torch.cuda.memory_allocated()
# checks if the model have been offloaded
# CUDA memory usage after offload should be near 0, leaving room to small differences
room_for_difference = 1.1
self.assertGreater(
(memory_before_offload - model_memory_footprint) * room_for_difference, memory_after_offload
)
# checks if device is the correct one
self.assertEqual(self.model.device.type, torch_device)
# checks if hooks exist
self.assertTrue(hasattr(self.model.semantic, "_hf_hook"))
# output with cpu offload
output_with_offload = self.model.generate(**input_ids, do_sample=False, temperature=1.0)
# checks if same output
self.assertListEqual(output_with_no_offload.tolist(), output_with_offload.tolist())
|
9ad46b927649455ab65ebd5a2c64999c8b5705e9
|
d0a8a516ec9465f18c2868295928c60084d9faf9
|
/docs/extending/examples/get_instance/GetSeleniumLibraryInstance.py
|
22c24443964d40856d68f3147a30fdb6ec1ba237
|
[
"Apache-2.0"
] |
permissive
|
robotframework/SeleniumLibrary
|
2257589f4a38f4e570445933f1712c2ead734a9d
|
79c7ef338820d9b54854b96be8a0c2b36d65d045
|
refs/heads/master
| 2023-08-30T19:35:13.032643
| 2023-08-25T22:43:04
| 2023-08-25T22:43:04
| 2,625,205
| 945
| 508
|
Apache-2.0
| 2023-08-25T18:05:59
| 2011-10-22T08:01:34
|
Python
|
UTF-8
|
Python
| false
| false
| 439
|
py
|
GetSeleniumLibraryInstance.py
|
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
def open_browser(host):
url = f"http://{host}.com/"
sl = BuiltIn().get_library_instance("SeleniumLibrary")
sl.open_browser(url, "chrome")
def get_browser_desired_capabilities():
logger.info("Getting currently open browser desired capabilities")
sl = BuiltIn().get_library_instance("SeleniumLibrary")
return sl.driver.desired_capabilities
|
b971dbedb7871e6df0e77a5714e2abd50ceeec61
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/codeInsight/smartEnter/methodParameterNoDecorators.py
|
feaf0907101e5f2fd4d3346d2eaf90d8df9e94e3
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 37
|
py
|
methodParameterNoDecorators.py
|
class MyClass:
def method<caret>
|
8c0267c7ef56ff69f57ed40df24e5588c035813a
|
6960dfbe2e3373968c82ea70e77b68460dcb4ad7
|
/simple_parsing/helpers/__init__.py
|
a971715108dd5ac0753c121a272af454ed52005b
|
[
"MIT"
] |
permissive
|
lebrice/SimpleParsing
|
a19423a0814f5c8a6a9d1542c1200785f53a4b78
|
77e4ff514787d6a6eff13a735766e088e22e664e
|
refs/heads/master
| 2023-08-16T13:55:36.456203
| 2023-08-10T13:45:49
| 2023-08-10T13:45:49
| 213,200,949
| 304
| 35
|
MIT
| 2023-08-14T20:36:44
| 2019-10-06T16:15:54
|
Python
|
UTF-8
|
Python
| false
| false
| 491
|
py
|
__init__.py
|
""" Collection of helper classes and functions to reduce boilerplate code. """
from .fields import *
from .flatten import FlattenedAccess
from .hparams import HyperParameters
from .partial import Partial, config_for
from .serialization import FrozenSerializable, Serializable, SimpleJsonEncoder, encode
try:
from .serialization import YamlSerializable
except ImportError:
pass
# For backward compatibility purposes
JsonSerializable = Serializable
SimpleEncoder = SimpleJsonEncoder
|
d8fc0be66df9b322dfffc8b883eda84717666a7a
|
083312af3c596aad1a5123fb66bf73ca9907a928
|
/sqlglot/dataframe/sql/group.py
|
ba27c170d80582e3d65d914f774cc2a2f754f6ae
|
[
"MIT"
] |
permissive
|
tobymao/sqlglot
|
8f7a6da463c765d39f75390cddafbd9a0b3076a9
|
32d8e5423a7d7e1b56805fd0020b4aac3ce15d84
|
refs/heads/main
| 2023-09-01T05:18:17.148809
| 2023-09-01T01:53:06
| 2023-09-01T01:53:17
| 347,277,349
| 3,617
| 365
|
MIT
| 2023-09-14T21:43:44
| 2021-03-13T05:01:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,162
|
py
|
group.py
|
from __future__ import annotations
import typing as t
from sqlglot.dataframe.sql import functions as F
from sqlglot.dataframe.sql.column import Column
from sqlglot.dataframe.sql.operations import Operation, operation
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql.dataframe import DataFrame
class GroupedData:
def __init__(self, df: DataFrame, group_by_cols: t.List[Column], last_op: Operation):
self._df = df.copy()
self.spark = df.spark
self.last_op = last_op
self.group_by_cols = group_by_cols
def _get_function_applied_columns(
self, func_name: str, cols: t.Tuple[str, ...]
) -> t.List[Column]:
func_name = func_name.lower()
return [getattr(F, func_name)(name).alias(f"{func_name}({name})") for name in cols]
@operation(Operation.SELECT)
def agg(self, *exprs: t.Union[Column, t.Dict[str, str]]) -> DataFrame:
columns = (
[Column(f"{agg_func}({column_name})") for column_name, agg_func in exprs[0].items()]
if isinstance(exprs[0], dict)
else exprs
)
cols = self._df._ensure_and_normalize_cols(columns)
expression = self._df.expression.group_by(
*[x.expression for x in self.group_by_cols]
).select(*[x.expression for x in self.group_by_cols + cols], append=False)
return self._df.copy(expression=expression)
def count(self) -> DataFrame:
return self.agg(F.count("*").alias("count"))
def mean(self, *cols: str) -> DataFrame:
return self.avg(*cols)
def avg(self, *cols: str) -> DataFrame:
return self.agg(*self._get_function_applied_columns("avg", cols))
def max(self, *cols: str) -> DataFrame:
return self.agg(*self._get_function_applied_columns("max", cols))
def min(self, *cols: str) -> DataFrame:
return self.agg(*self._get_function_applied_columns("min", cols))
def sum(self, *cols: str) -> DataFrame:
return self.agg(*self._get_function_applied_columns("sum", cols))
def pivot(self, *cols: str) -> DataFrame:
raise NotImplementedError("Sum distinct is not currently implemented")
|
43debd58cd0ea6ce7012740db86698eda79cfb55
|
b7d13929749d1f09383b27ab76bdafcc4c9c540c
|
/python/app/thirdparty/oneforall/common/tldextract.py
|
ce20ac27f9f2062077e50724a93b5b0f23df33fb
|
[
"GPL-3.0-only",
"Python-2.0",
"MIT"
] |
permissive
|
taomujian/linbing
|
664244dd31b241c01b532031dc52bb2cee6c4b77
|
b6edb06fc4e53a90c756459d7c03f8b33692b42b
|
refs/heads/master
| 2023-06-21T22:01:43.604719
| 2023-06-18T09:05:42
| 2023-06-18T09:05:42
| 243,150,459
| 545
| 127
|
MIT
| 2022-12-08T09:16:15
| 2020-02-26T02:32:06
|
Python
|
UTF-8
|
Python
| false
| false
| 7,568
|
py
|
tldextract.py
|
# -*- coding: utf-8 -*-
"""`tldextract` accurately separates the gTLD or ccTLD (generic or country code
top-level domain) from the registered domain and subdomains of a URL.
>>> import tldextract
>>> tldextract.extract('http://forums.news.cnn.com/')
ExtractResult(subdomain='forums.news', domain='cnn', suffix='com')
>>> tldextract.extract('http://forums.bbc.co.uk/') # United Kingdom
ExtractResult(subdomain='forums', domain='bbc', suffix='co.uk')
>>> tldextract.extract('http://www.worldbank.org.kg/') # Kyrgyzstan
ExtractResult(subdomain='www', domain='worldbank', suffix='org.kg')
`ExtractResult` is a namedtuple, so it's simple to access the parts you want.
>>> ext = tldextract.extract('http://forums.bbc.co.uk')
>>> (ext.subdomain, ext.domain, ext.suffix)
('forums', 'bbc', 'co.uk')
>>> # rejoin subdomain and domain
>>> '.'.join(ext[:2])
'forums.bbc'
>>> # a common alias
>>> ext.registered_domain
'bbc.co.uk'
Note subdomain and suffix are _optional_. Not all URL-like inputs have a
subdomain or a valid suffix.
>>> tldextract.extract('google.com')
ExtractResult(subdomain='', domain='google', suffix='com')
>>> tldextract.extract('google.notavalidsuffix')
ExtractResult(subdomain='google', domain='notavalidsuffix', suffix='')
>>> tldextract.extract('http://127.0.0.1:8080/deployed/')
ExtractResult(subdomain='', domain='127.0.0.1', suffix='')
If you want to rejoin the whole namedtuple, regardless of whether a subdomain
or suffix were found:
>>> ext = tldextract.extract('http://127.0.0.1:8080/deployed/')
>>> # this has unwanted dots
>>> '.'.join(ext)
'.127.0.0.1.'
"""
import os
import re
import json
import collections
from urllib.parse import scheme_chars
from functools import wraps
import idna
from app.thirdparty.oneforall.common import utils
IP_RE = re.compile(r'^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$') # pylint: disable=line-too-long
SCHEME_RE = re.compile(r'^([' + scheme_chars + ']+:)?//')
class ExtractResult(collections.namedtuple('ExtractResult', 'subdomain domain suffix')):
"""namedtuple of a URL's subdomain, domain, and suffix."""
# Necessary for __dict__ member to get populated in Python 3+
__slots__ = ()
@property
def registered_domain(self):
"""
Joins the domain and suffix fields with a dot, if they're both set.
>>> extract('http://forums.bbc.co.uk').registered_domain
'bbc.co.uk'
>>> extract('http://localhost:8080').registered_domain
''
"""
if self.domain and self.suffix:
return self.domain + '.' + self.suffix
return ''
@property
def fqdn(self):
"""
Returns a Fully Qualified Domain Name, if there is a proper domain/suffix.
>>> extract('http://forums.bbc.co.uk/path/to/file').fqdn
'forums.bbc.co.uk'
>>> extract('http://localhost:8080').fqdn
''
"""
if self.domain and self.suffix:
# self is the namedtuple (subdomain domain suffix)
return '.'.join(i for i in self if i)
return ''
@property
def ipv4(self):
"""
Returns the ipv4 if that is what the presented domain/url is
>>> extract('http://127.0.0.1/path/to/file').ipv4
'127.0.0.1'
>>> extract('http://127.0.0.1.1/path/to/file').ipv4
''
>>> extract('http://256.1.1.1').ipv4
''
"""
if not (self.suffix or self.subdomain) and IP_RE.match(self.domain):
return self.domain
return ''
class TLDExtract(object):
"""A callable for extracting, subdomain, domain, and suffix components from a URL."""
def __init__(self, cache_file=None):
"""
Constructs a callable for extracting subdomain, domain, and suffix
components from a URL.
"""
self.cache_file = os.path.expanduser(cache_file or '')
self._extractor = None
def __call__(self, url):
"""
Takes a string URL and splits it into its subdomain, domain, and
suffix (effective TLD, gTLD, ccTLD, etc.) component.
>>> ext = TLDExtract()
>>> ext('http://forums.news.cnn.com/')
ExtractResult(subdomain='forums.news', domain='cnn', suffix='com')
>>> ext('http://forums.bbc.co.uk/')
ExtractResult(subdomain='forums', domain='bbc', suffix='co.uk')
"""
netloc = SCHEME_RE.sub("", url) \
.partition("/")[0] \
.partition("?")[0] \
.partition("#")[0] \
.split("@")[-1] \
.partition(":")[0] \
.strip() \
.rstrip(".")
labels = netloc.split(".")
translations = [_decode_punycode(label) for label in labels]
suffix_index = self._get_tld_extractor().suffix_index(translations)
suffix = ".".join(labels[suffix_index:])
if not suffix and netloc and utils.looks_like_ip(netloc):
return ExtractResult('', netloc, '')
subdomain = ".".join(labels[:suffix_index - 1]) if suffix_index else ""
domain = labels[suffix_index - 1] if suffix_index else ""
return ExtractResult(subdomain, domain, suffix)
@property
def tlds(self):
return self._get_tld_extractor().tlds
def _get_tld_extractor(self):
"""Get or compute this object's TLDExtractor. Looks up the TLDExtractor
in roughly the following order, based on the settings passed to
__init__:
1. Memoized on `self`
2. Local system cache file"""
# pylint: disable=no-else-return
if self._extractor:
return self._extractor
tlds = self._get_cached_tlds()
if tlds:
self._extractor = _PublicSuffixListTLDExtractor(tlds)
return self._extractor
else:
raise Exception("tlds is empty, cannot proceed without tlds.")
def _get_cached_tlds(self):
"""Read the local TLD cache file. Returns None on IOError or other
error, or if this object is not set to use the cache
file."""
if not self.cache_file:
return None
with open(self.cache_file) as cache_file:
return json.loads(cache_file.read())
TLD_EXTRACTOR = TLDExtract()
@wraps(TLD_EXTRACTOR.__call__)
def extract(url):
return TLD_EXTRACTOR(url)
class _PublicSuffixListTLDExtractor(object):
"""Wrapper around this project's main algo for PSL
lookups.
"""
def __init__(self, tlds):
self.tlds = frozenset(tlds)
def suffix_index(self, lower_spl):
"""Returns the index of the first suffix label.
Returns len(spl) if no suffix is found
"""
length = len(lower_spl)
for i in range(length):
maybe_tld = '.'.join(lower_spl[i:])
exception_tld = '!' + maybe_tld
if exception_tld in self.tlds:
return i + 1
if maybe_tld in self.tlds:
return i
wildcard_tld = '*.' + '.'.join(lower_spl[i + 1:])
if wildcard_tld in self.tlds:
return i
return length
def _decode_punycode(label):
lowered = label.lower()
looks_like_puny = lowered.startswith('xn--')
if looks_like_puny:
try:
return idna.decode(label.encode('ascii')).lower()
except (UnicodeError, IndexError):
pass
return lowered
|
327fe2571bf327596a84716b40f80cbbe78960de
|
ef3bb3137a045795f23a0df161292a218d34c764
|
/interpreter/minipy/reader.py
|
87c8a4461afbcb5c9be7d35f6748e702f45ee9e6
|
[] |
no_license
|
zhao94254/fun
|
23e1c0af7f6a121866ff30bcf513a34cc877235c
|
491acf6a7d9594f91a8cd717a403d9e1e5d0f386
|
refs/heads/master
| 2021-07-01T22:28:21.221379
| 2019-03-11T14:15:24
| 2019-03-11T14:15:24
| 106,671,187
| 365
| 73
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,112
|
py
|
reader.py
|
#!/usr/bin/env python
# @Author : pengyun
# 解析输入
import string
from buffer import Buffer
from expr import *
SYMBOL_STARTS = set(string.ascii_lowercase + string.ascii_uppercase + '_')
SYMBOL_INNERS = SYMBOL_STARTS | set(string.digits)
NUMERAL = set(string.digits + '-.')
WHITESPACE = set(' \t\n\r')
DELIMITERS = set('(),:')
def is_literal(s):
return isinstance(s, (int, float))
def is_name(s):
return isinstance(s, str) and s not in DELIMITERS and s != 'lambda'
# tokenize
def tokenize(s):
"""
>>> tokenize('lambda f: f(0, 4.2)')
['lambda', 'f', ':', 'f', '(', 0, ',', 4.2, ')']
:param s:
:return:
"""
src = Buffer(s)
tokens = []
while True:
token = next_token(src)
if token is None:
return tokens
tokens.append(token)
def take(src, allow_char):
"""根据具体的规则获取具体的字符"""
result = ''
while src.current() in allow_char:
result += src.pop()
return result
def next_token(src):
"""获取一个token"""
take(src, WHITESPACE)
c = src.current()
if c is None:
return c
elif c in NUMERAL:
literal = take(src, NUMERAL)
try:
return int(literal)
except ValueError:
try:
return float(literal)
except ValueError:
raise SyntaxError("{} is not a numeral".format(literal))
elif c in SYMBOL_STARTS:
return take(src, SYMBOL_INNERS)
elif c in DELIMITERS:
src.pop()
return c
else:
raise SyntaxError("{} is not a token".format(c))
def read(s):
src = Buffer(tokenize(s))
if src.current() is not None:
return read_expr(src)
def read_expr(src):
"""将经过tokenize 分割的字符转化为具体的对象"""
token = src.pop()
if token is None:
raise SyntaxError('Incomplete expression')
elif is_literal(token):
return read_call_expr(src, Literal(token))
elif is_name(token):
return read_call_expr(src, Name(token))
elif token == 'lambda':
params = read_comma_separated(src, read_param)
src.expect(':')
body = read_expr(src)
return LambdaExpr(params, body)
elif token == '(':
inner_expr = read_expr(src)
src.expect(')')
return read_call_expr(src, inner_expr)
else:
raise SyntaxError("{} is not the start of a expression".format(token))
def read_comma_separated(src, reader):
if src.current() in (':', ')'):
return []
else:
s = [reader(src)]
while src.current() == ',':
src.pop()
s.append(reader(src))
return s
def read_call_expr(src, operator):
while src.current() == '(':
src.pop()
operands = read_comma_separated(src, read_expr)
src.expect(')')
operator = CallExpr(operator, operands)
return operator
def read_param(src):
token = src.pop()
if is_name(token):
return token
else:
raise SyntaxError("Expected parameter name but got '{}'".format(token))
|
c3ff3575c993473c90738c37e4a535e363b45102
|
dd5f0d6fd9090659d42809cd60c037dbb2a162d9
|
/tests/data/expected/main/main_pattern_properties_field_constraints/output.py
|
446ff3a2c2ab0c294e7222e053fbf5213e3c8535
|
[
"MIT"
] |
permissive
|
koxudaxi/datamodel-code-generator
|
8f9185af200d26a27346115a89c3a6d330086dac
|
73da2d846271c50790834cf676a57a384b5ffcdd
|
refs/heads/master
| 2023-09-02T16:32:41.659791
| 2023-08-30T16:22:22
| 2023-08-30T16:22:22
| 189,179,081
| 1,800
| 245
|
MIT
| 2023-09-14T04:00:12
| 2019-05-29T08:01:32
|
Python
|
UTF-8
|
Python
| false
| false
| 320
|
py
|
output.py
|
# generated by datamodel-codegen:
# filename: pattern_properties.json
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import annotations
from typing import Dict, Optional
from pydantic import BaseModel
class Bar(BaseModel):
name: Optional[str] = None
class Foo(BaseModel):
bar: Dict[str, Bar]
|
696cbeac47b9575b4af2e9881b47deb47e1bcade
|
c5f7019c52cd91a3d9505943b9d866539f2fb0bc
|
/synapse/tests/test_lib_hive.py
|
9400044e006cd61ba86a5aa0f0a75e461e06f460
|
[
"Apache-2.0"
] |
permissive
|
vertexproject/synapse
|
ce31699fcb10cb2c870d448915f4d4524247e2d0
|
1808dff78921b4bfdb451a12ee5d03427a5295b9
|
refs/heads/master
| 2023-09-03T23:48:26.584015
| 2023-08-31T20:34:35
| 2023-08-31T20:34:35
| 37,228,107
| 307
| 63
|
Apache-2.0
| 2023-09-14T21:53:32
| 2015-06-10T23:29:41
|
Python
|
UTF-8
|
Python
| false
| false
| 6,379
|
py
|
test_lib_hive.py
|
import asyncio
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.tests.utils as s_test
import synapse.lib.hive as s_hive
tree0 = {
'kids': {
'hehe': {'value': 'haha'},
'hoho': {'value': 'huhu', 'kids': {
'foo': {'value': 99},
}},
}
}
tree1 = {
'kids': {
'hoho': {'value': 'huhu', 'kids': {
'foo': {'value': 99},
}}
}
}
class HiveTest(s_test.SynTest):
async def test_hive_slab(self):
with self.getTestDir() as dirn:
async with self.getTestHiveFromDirn(dirn) as hive:
path = ('foo', 'bar')
async with await hive.dict(path) as hivedict:
self.none(await hivedict.set('hehe', 200))
self.none(await hivedict.set('haha', 'hoho'))
valus = list(hivedict.values())
self.len(2, valus)
self.eq(set(valus), {200, 'hoho'})
self.eq(200, hivedict.get('hehe'))
self.eq(200, await hivedict.set('hehe', 300))
self.eq(300, hivedict.get('hehe'))
self.eq(300, await hive.get(('foo', 'bar', 'hehe')))
self.eq(300, await hive.set(('foo', 'bar', 'hehe'), 400))
hivedict.setdefault('lulz', 31337)
self.eq(31337, hivedict.get('lulz'))
await hivedict.set('lulz', 'boo')
items = list(hivedict.items())
self.eq([('hehe', 400), ('haha', 'hoho'), ('lulz', 'boo')], items)
self.eq('boo', await hivedict.pop('lulz'))
self.eq(31337, await hivedict.pop('lulz'))
self.eq(None, hivedict.get('nope'))
self.eq(s_common.novalu, hivedict.get('nope', default=s_common.novalu))
self.eq(s_common.novalu, await hivedict.pop('nope', default=s_common.novalu))
async with self.getTestHiveFromDirn(dirn) as hive:
self.eq(400, await hive.get(('foo', 'bar', 'hehe')))
self.eq('hoho', await hive.get(('foo', 'bar', 'haha')))
self.none(await hive.get(('foo', 'bar', 'lulz')))
async def test_hive_telepath(self):
# confirm that the primitives used by higher level APIs
# work using telepath remotes and property synchronize.
async with self.getTestHiveDmon() as dmon:
turl = self.getTestUrl(dmon, 'hive')
async with await s_hive.openurl(turl) as hive0:
path = ('foo', 'bar')
evnt = asyncio.Event()
def onedit(mesg):
evnt.set()
node0 = await hive0.open(path)
node0.on('hive:set', onedit)
async with await s_hive.openurl(turl) as hive1:
node1 = await hive1.open(path)
await node1.set(200)
await evnt.wait()
self.eq(200, node0.valu)
self.eq(201, await node0.add(1))
self.eq(202, await node1.add(1))
self.eq(203, await node0.add(1))
async def test_hive_dir(self):
async with self.getTestHive() as hive:
await hive.open(('foo', 'bar'))
await hive.open(('foo', 'baz'))
await hive.open(('foo', 'faz'))
self.none(hive.dir(('nosuchdir',)))
self.eq([('foo', None, 3)], list(hive.dir(())))
await hive.open(('foo',))
kids = list(hive.dir(('foo',)))
self.len(3, kids)
names = list(sorted([name for (name, node, size) in kids]))
self.eq(names, ('bar', 'baz', 'faz'))
async def test_hive_pop(self):
async with self.getTestHive() as hive:
node = await hive.open(('foo', 'bar'))
await node.set(20)
self.none(await hive.pop(('newp',)))
self.eq(20, await hive.pop(('foo', 'bar')))
self.none(await hive.get(('foo', 'bar')))
# Test recursive delete
node = await hive.open(('foo', 'bar'))
await node.set(20)
self.eq(None, await hive.pop(('foo',)))
self.none(await hive.get(('foo', 'bar')))
async def test_hive_saveload(self):
async with self.getTestHive() as hive:
await hive.loadHiveTree(tree0)
self.eq('haha', await hive.get(('hehe',)))
self.eq('huhu', await hive.get(('hoho',)))
self.eq(99, await hive.get(('hoho', 'foo')))
await hive.loadHiveTree(tree1, trim=True)
self.none(await hive.get(('hehe',)))
self.eq('huhu', await hive.get(('hoho',)))
self.eq(99, await hive.get(('hoho', 'foo')))
async with self.getTestHive() as hive:
node = await hive.open(('hehe', 'haha'))
await node.set(99)
tree = await hive.saveHiveTree()
self.nn(tree['kids']['hehe'])
self.nn(tree['kids']['hehe']['kids']['haha'])
self.eq(99, tree['kids']['hehe']['kids']['haha']['value'])
async def test_hive_exists(self):
async with self.getTestHive() as hive:
await hive.loadHiveTree(tree0)
self.true(await hive.exists(('hoho', 'foo')))
self.false(await hive.exists(('hoho', 'food')))
self.false(await hive.exists(('newp',)))
async def test_hive_rename(self):
async with self.getTestHive() as hive:
await hive.loadHiveTree(tree0)
await self.asyncraises(s_exc.BadHivePath, hive.rename(('hehe',), ('hoho',)))
await self.asyncraises(s_exc.BadHivePath, hive.rename(('newp',), ('newp2',)))
await self.asyncraises(s_exc.BadHivePath, hive.rename(('hehe',), ('hehe', 'foo')))
await hive.rename(('hehe',), ('lolo',))
self.eq('haha', await hive.get(('lolo',)))
self.false(await hive.exists(('hehe',)))
await hive.rename(('hoho',), ('jojo',))
self.false(await hive.exists(('hoho',)))
jojo = await hive.open(('jojo',))
self.len(1, jojo.kids)
self.eq('huhu', jojo.valu)
self.eq(99, await hive.get(('jojo', 'foo')))
|
250b519f1372261524bdf6dd170c887290488659
|
f4a2917e70be48528f0f4b40476eef6c39ab523d
|
/contrib/crowd_counting/crowdcounting/demo/app-start.py
|
6d29f8ed537f531918629ec4766bad09c97e7451
|
[
"BSD-3-Clause",
"LGPL-2.1-or-later",
"Apache-2.0",
"MIT"
] |
permissive
|
microsoft/computervision-recipes
|
d732e6c73d0cef6b7fb9f639d27540cce8df77fb
|
679f1bf82b22b8c948f3703933557a2ba090739d
|
refs/heads/staging
| 2023-09-04T09:22:55.209926
| 2022-10-31T17:12:47
| 2022-10-31T17:12:47
| 170,161,374
| 8,758
| 1,169
|
MIT
| 2023-02-09T18:47:36
| 2019-02-11T16:23:51
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,027
|
py
|
app-start.py
|
import os
import sys
from crowdcounting import CrowdCountModelPose, CrowdCountModelMCNN, Router
from flask import Flask, Response, json, request
import numpy as np
import logging
import time
import argparse
# logging
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(10)
parser = argparse.ArgumentParser(description="A demo app.")
parser.add_argument("-p", "--path", help="Path to MCNN model file", required=True)
args = parser.parse_args()
# flask
app = Flask(__name__)
gpu_id = 0
mcnn_model_path = args.path # "./data/models/mcnn_shtechA_660.h5"
model = Router(gpu_id, mcnn_model_path=mcnn_model_path, cutoff_pose=20, cutoff_mcnn=50)
@app.route("/score", methods=["POST"])
def score():
result = model.score(request.data, return_image=False, img_dim=1750)
js = json.dumps({"count": int(np.round(result["pred"]))})
resp = Response(js, status=200, mimetype="application/json")
return resp
if __name__ == "__main__":
app.run(debug=False, host="0.0.0.0", port=5000, threaded=True)
|
2c220663d6265bd74496667fc8cfca2ee8405bcd
|
4bcc9806152542ab43fc2cf47c499424f200896c
|
/tensorflow/python/kernel_tests/sparse_ops/sparse_to_dense_op_py_test.py
|
af321792e36590ccdca01d2f60354f7bf98e9faf
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
tensorflow/tensorflow
|
906276dbafcc70a941026aa5dc50425ef71ee282
|
a7f3934a67900720af3d3b15389551483bee50b8
|
refs/heads/master
| 2023-08-25T04:24:41.611870
| 2023-08-25T04:06:24
| 2023-08-25T04:14:08
| 45,717,250
| 208,740
| 109,943
|
Apache-2.0
| 2023-09-14T20:55:50
| 2015-11-07T01:19:20
|
C++
|
UTF-8
|
Python
| false
| false
| 8,342
|
py
|
sparse_to_dense_op_py_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.sparse_op."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
@test_util.with_eager_op_as_function
class SparseToDenseTest(test.TestCase, parameterized.TestCase):
def testInt(self):
tf_ans = sparse_ops.sparse_to_dense([1, 3], [5], 1, 0)
np_ans = np.array([0, 1, 0, 1, 0]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
@parameterized.parameters(
dtypes.bfloat16, dtypes.float16, dtypes.float32, dtypes.float64
)
def testFloatTypes(self, dtype):
tf_ans = sparse_ops.sparse_to_dense(
[1, 3], [5], array_ops.constant(1.0, dtype=dtype), 0.0
)
np_ans = np.array([0, 1, 0, 1, 0]).astype(dtype.as_numpy_dtype)
self.assertAllClose(np_ans, tf_ans)
def testComplex(self):
for dtype in [dtypes.complex64, dtypes.complex128]:
tf_val = math_ops.cast(
constant_op.constant([1.0 + 1.0j, 2.0 - 2.0j]), dtypes.complex128)
tf_ans = sparse_ops.sparse_tensor_to_dense(sparse_ops.from_dense(tf_val))
self.assertAllClose(tf_val, tf_ans)
def testEmptyNonZeros(self):
indices = array_ops.constant([], dtype=dtypes.int32)
values = array_ops.constant([], dtype=dtypes.float32)
tf_ans = sparse_ops.sparse_to_dense(indices, [5], values, 0.0)
np_ans = np.array([0, 0, 0, 0, 0]).astype(np.float32)
self.assertAllClose(np_ans, tf_ans)
def testString(self):
tf_ans = sparse_ops.sparse_to_dense([1, 3], [5], "a", "b")
np_ans = np.array(["b", "a", "b", "a", "b"]).astype(np.string_)
self.assertAllEqual(np_ans, tf_ans)
def testSetValue(self):
tf_ans = sparse_ops.sparse_to_dense([1, 3], [5], [1, 2], -1)
np_ans = np.array([-1, 1, -1, 2, -1]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testSetSingleValue(self):
tf_ans = sparse_ops.sparse_to_dense([1, 3], [5], 1, -1)
np_ans = np.array([-1, 1, -1, 1, -1]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def test2d(self):
tf_ans = sparse_ops.sparse_to_dense([[1, 3], [2, 0]], [3, 4], 1, -1)
np_ans = np.array([[-1, -1, -1, -1],
[-1, -1, -1, 1],
[1, -1, -1, -1]]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testZeroDefault(self):
x = sparse_ops.sparse_to_dense(2, [4], 7)
self.assertAllEqual(x, [0, 0, 7, 0])
def test3d(self):
tf_ans = sparse_ops.sparse_to_dense([[1, 3, 0], [2, 0, 1]], [3, 4, 2], 1,
-1)
np_ans = np.ones((3, 4, 2), dtype=np.int32) * -1
np_ans[1, 3, 0] = 1
np_ans[2, 0, 1] = 1
self.assertAllClose(np_ans, tf_ans)
def testBadShape(self):
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be rank 1"):
sparse_ops.sparse_to_dense([1, 3], [[5], [3]], 1, -1)
def testBadValue(self):
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
r"sparse_values has incorrect shape \[2,1\], "
r"should be \[\] or \[2\]"):
self.evaluate(sparse_ops.sparse_to_dense([1, 3], [5], [[5], [3]], -1))
def testBadNumValues(self):
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
r"sparse_values has incorrect shape \[3\], should be \[\] or \[2\]"):
self.evaluate(sparse_ops.sparse_to_dense([1, 3], [5], [1, 2, 3], -1))
def testBadDefault(self):
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"default_value should be a scalar"):
self.evaluate(sparse_ops.sparse_to_dense([1, 3], [5], [1, 2], [0]))
@test_util.disable_xla("XLA does not check validity for SparseToDense")
def testOutOfBoundsIndicesWithWithoutValidation(self):
# The GPU implementation doesn't print the contents of the invalid inputs,
# since the overhead of memory copy between device to host is large.
# Therefore, the following three tests on invalid inputs will distinguish
# the reference error messages between GPUs and CPUs.
error_msg = (r"out of bounds" if test_util.is_gpu_available() else
r"indices\[1\] = \[10\] is out of bounds: need 0 <= "
"index < \[5\]")
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
error_msg):
self.evaluate(
sparse_ops.sparse_to_dense([[1], [10]], [5], [1.0, 1.0], 0.0))
# When validate_indices=False, the GPU kernel won't check out-of-bound
# access. Therefore, we skip the following test.
if not test_util.is_gpu_available():
# Disable checks, the allocation should still fail.
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"out of bounds"):
self.evaluate(
sparse_ops.sparse_to_dense([[1], [10]], [5], [-1.0, 1.0],
0.0,
validate_indices=False))
@test_util.disable_xla("XLA does not check validity for SparseToDense")
def testRepeatingIndicesWithWithoutValidation(self):
error_msg = (r"indices\[1\] is repeated" if test_util.is_gpu_available()
else r"indices\[1\] = \[1\] is repeated")
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
error_msg):
self.evaluate(
sparse_ops.sparse_to_dense([[1], [1]], [5], [-1.0, 1.0], 0.0))
# Disable checks
self.evaluate(
sparse_ops.sparse_to_dense([[1], [1]], [5], [-1.0, 1.0],
0.0,
validate_indices=False))
@test_util.disable_xla("XLA does not check validity for SparseToDense")
def testUnsortedIndicesWithWithoutValidation(self):
error_msg = (r"indices\[1\] is out of order"
if test_util.is_gpu_available() else
r"indices\[1\] = \[1\] is out of order")
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
error_msg):
self.evaluate(
sparse_ops.sparse_to_dense([[2], [1]], [5], [-1.0, 1.0], 0.0))
# Disable checks
self.evaluate(
sparse_ops.sparse_to_dense([[2], [1]], [5], [-1.0, 1.0],
0.0,
validate_indices=False))
def testShapeInferenceKnownShape(self):
with ops.Graph().as_default():
indices = array_ops.placeholder(dtypes.int64)
shape = [4, 5, 6]
output = sparse_ops.sparse_to_dense(indices, shape, 1, 0)
self.assertEqual(output.get_shape(), [4, 5, 6])
shape = array_ops.placeholder(dtypes.int64, shape=(3,))
output = sparse_ops.sparse_to_dense(indices, shape, 1, 0)
self.assertEqual(output.get_shape().as_list(), [None, None, None])
def testShapeInferenceUnknownShape(self):
with ops.Graph().as_default():
indices = array_ops.placeholder(dtypes.int64)
shape = array_ops.placeholder(dtypes.int64)
output = sparse_ops.sparse_to_dense(indices, shape, 1, 0)
self.assertIsNone(output.get_shape().ndims)
if __name__ == "__main__":
test.main()
|
14f66d59dd0a9a8a62268915091b2e3191e80110
|
8eb7e2224cd81cd21fd5b0c4dd54abe85ba07e49
|
/netmiko/adtran/__init__.py
|
354eae20a6180fe9d22b07387856c4f61dd1ecee
|
[
"MIT"
] |
permissive
|
ktbyers/netmiko
|
f8b980569fd863f0a7bfe28580366339c4bd31ec
|
2e56b40ec639da130471c59dd1f3c93983471e41
|
refs/heads/develop
| 2023-08-30T20:33:05.554926
| 2023-08-29T21:50:45
| 2023-08-29T21:50:45
| 27,283,062
| 3,397
| 1,594
|
MIT
| 2023-09-04T03:04:31
| 2014-11-28T21:42:52
|
Python
|
UTF-8
|
Python
| false
| false
| 107
|
py
|
__init__.py
|
from netmiko.adtran.adtran import AdtranOSSSH, AdtranOSTelnet
__all__ = ["AdtranOSSSH", "AdtranOSTelnet"]
|
59d1f9f22546f580b286173fa3080ce5cc558036
|
bdf0d4d3aac186af3ad0ad6ac9f380f9a0573fba
|
/aries_cloudagent/protocols/out_of_band/v1_0/handlers/tests/test_problem_report_handler.py
|
c8b69357cbb59d24370d268d41e7b77e5e8f2472
|
[
"LicenseRef-scancode-dco-1.1",
"Apache-2.0"
] |
permissive
|
hyperledger/aries-cloudagent-python
|
f25d961e0717a4d703bf43df3e4b4bc8ec07b908
|
39cac36d8937ce84a9307ce100aaefb8bc05ec04
|
refs/heads/main
| 2023-09-01T15:37:05.353674
| 2023-08-31T14:13:06
| 2023-08-31T14:13:06
| 193,556,007
| 370
| 530
|
Apache-2.0
| 2023-09-14T17:59:34
| 2019-06-24T18:12:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,948
|
py
|
test_problem_report_handler.py
|
"""Test Problem Report Handler."""
import pytest
from asynctest import mock as async_mock
from ......connections.models.conn_record import ConnRecord
from ......core.profile import ProfileSession
from ......messaging.request_context import RequestContext
from ......messaging.responder import MockResponder
from ......transport.inbound.receipt import MessageReceipt
from ...handlers import problem_report_handler as test_module
from ...manager import OutOfBandManagerError
from ...messages.problem_report import OOBProblemReport, ProblemReportReason
@pytest.fixture()
async def request_context() -> RequestContext:
ctx = RequestContext.test_context()
ctx.message_receipt = MessageReceipt()
yield ctx
@pytest.fixture()
async def connection_record(request_context, session) -> ConnRecord:
record = ConnRecord()
request_context.connection_record = record
await record.save(session)
yield record
@pytest.fixture()
async def session(request_context) -> ProfileSession:
yield await request_context.session()
class TestOOBProblemReportHandler:
@pytest.mark.asyncio
@async_mock.patch.object(test_module, "OutOfBandManager")
async def test_called(self, mock_oob_mgr, request_context, connection_record):
mock_oob_mgr.return_value.receive_problem_report = async_mock.CoroutineMock()
request_context.message = OOBProblemReport(
description={
"en": "No such connection",
"code": ProblemReportReason.NO_EXISTING_CONNECTION.value,
}
)
handler = test_module.OOBProblemReportMessageHandler()
responder = MockResponder()
await handler.handle(context=request_context, responder=responder)
mock_oob_mgr.return_value.receive_problem_report.assert_called_once_with(
problem_report=request_context.message,
receipt=request_context.message_receipt,
conn_record=connection_record,
)
@pytest.mark.asyncio
@async_mock.patch.object(test_module, "OutOfBandManager")
async def test_exception(self, mock_oob_mgr, request_context, connection_record):
mock_oob_mgr.return_value.receive_problem_report = async_mock.CoroutineMock()
mock_oob_mgr.return_value.receive_problem_report.side_effect = (
OutOfBandManagerError("error")
)
request_context.message = OOBProblemReport(
description={
"en": "Connection not active",
"code": ProblemReportReason.EXISTING_CONNECTION_NOT_ACTIVE.value,
}
)
handler = test_module.OOBProblemReportMessageHandler()
with async_mock.patch.object(
handler._logger, "exception", async_mock.MagicMock()
) as mock_exc_logger:
responder = MockResponder()
await handler.handle(context=request_context, responder=responder)
assert mock_exc_logger.called_once()
|
7b48e21783ef2da37e57c8f2fc23956e38e1c721
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/tests/st/ops/gpu/test_nn_matmul.py
|
c8ea15b4b5595c7a3b20d2ccaecbde7838500ebe
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 3,188
|
py
|
test_nn_matmul.py
|
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
class Net(nn.Cell):
def __init__(self, transpose_x1, transpose_x2):
super(Net, self).__init__()
self.matmul = nn.MatMul(transpose_x1, transpose_x2)
def construct(self, x1, x2):
return self.matmul(x1, x2)
def test_x1_2D_x2_3D():
x1 = np.random.randn(16, 64).astype(np.float32)
x2 = np.random.randn(32, 64, 20).astype(np.float32)
transpose_x1 = False
transpose_x2 = False
net = Net(transpose_x1, transpose_x2)
output = net(Tensor(x1), Tensor(x2))
assert output.shape == (32, 16, 20)
def test_x1_4D_x2_3D_transpose_x2_True():
x1 = np.random.randn(3, 2, 3, 4).astype(np.float32)
x2 = np.random.randn(1, 5, 4).astype(np.float32)
transpose_x1 = False
transpose_x2 = True
net = Net(transpose_x1, transpose_x2)
output = net(Tensor(x1), Tensor(x2))
assert output.shape == (3, 2, 3, 5)
def test_x1_3D_transpose_x1_True_x2_2D():
x1 = np.random.randn(2, 3, 4).astype(np.float32)
x2 = np.random.randn(3, 4).astype(np.float32)
transpose_x1 = True
transpose_x2 = False
net = Net(transpose_x1, transpose_x2)
output = net(Tensor(x1), Tensor(x2))
assert output.shape == (2, 4, 4)
def test_x1_3D_transpose_x1_True_x2_3D_transpose_x2_True():
x1 = np.random.randn(2, 5, 6).astype(np.float32)
x2 = np.random.randn(2, 4, 5).astype(np.float32)
transpose_x1 = True
transpose_x2 = True
net = Net(transpose_x1, transpose_x2)
output = net(Tensor(x1), Tensor(x2))
assert output.shape == (2, 6, 4)
def test_x1_1D_x2_1D():
x1 = np.random.randn(4).astype(np.float32)
x2 = np.random.randn(4).astype(np.float32)
transpose_x1 = False
transpose_x2 = False
net = Net(transpose_x1, transpose_x2)
output = net(Tensor(x1), Tensor(x2))
assert output.shape == ()
def test_x1_1D_x2_3D():
x1 = np.random.randn(4).astype(np.float32)
x2 = np.random.randn(2, 4, 5).astype(np.float32)
transpose_x1 = False
transpose_x2 = False
net = Net(transpose_x1, transpose_x2)
output = net(Tensor(x1), Tensor(x2))
assert output.shape == (2, 5)
def test_x1_3D_x2_1D():
x1 = np.random.randn(2, 4, 5).astype(np.float32)
x2 = np.random.randn(5).astype(np.float32)
transpose_x1 = False
transpose_x2 = False
net = Net(transpose_x1, transpose_x2)
output = net(Tensor(x1), Tensor(x2))
assert output.shape == (2, 4)
def test_x1_1D_transpose_x1_True_x2_3D():
x1 = np.random.randn(4).astype(np.float32)
x2 = np.random.randn(2, 4, 5).astype(np.float32)
transpose_x1 = True
transpose_x2 = False
net = Net(transpose_x1, transpose_x2)
output = net(Tensor(x1), Tensor(x2))
assert output.shape == (2, 5)
def test_x1_3D_x2_1D_transpose_x2_True():
x1 = np.random.randn(2, 4, 5).astype(np.float32)
x2 = np.random.randn(5).astype(np.float32)
transpose_x1 = False
transpose_x2 = True
net = Net(transpose_x1, transpose_x2)
output = net(Tensor(x1), Tensor(x2))
assert output.shape == (2, 4)
|
ec5621b748b7887af52f975a6263892197bfb9d5
|
2bd7beb55c0120e4a94741710d3c8ff0a4bb8599
|
/test_project/streamblocks/models.py
|
99ea58607a3a7513e2341cb80ea144f59a80230d
|
[
"BSD-2-Clause"
] |
permissive
|
raagin/django-streamfield
|
9047598da3cf8b6fa0d61aa90e270dfacd6bfc41
|
c6d45f0caae0a0603ea41862c61b237cb31604ab
|
refs/heads/master
| 2023-07-28T07:50:08.751567
| 2023-07-04T09:41:09
| 2023-07-04T09:41:09
| 184,009,451
| 104
| 18
|
NOASSERTION
| 2023-03-29T11:55:54
| 2019-04-29T06:04:20
|
Python
|
UTF-8
|
Python
| false
| false
| 852
|
py
|
models.py
|
from django.db import models
class RichText(models.Model):
text = models.TextField(blank=True, null=True)
options = {
"gray_bgr": {
"label": "Block on gray background",
"type": "checkbox",
"default": False
}
}
class Meta:
# This will use as name of block in admin
verbose_name="Text"
# list of objects
class Column(models.Model):
text = models.TextField(null=True, blank=True)
# StreamField option for list of objects
as_list = True
class Meta:
verbose_name="Column"
verbose_name_plural="Columns"
class Separator(models.Model):
class Meta:
abstract = True
verbose_name="Separator"
# Register blocks for StreamField as list of models
STREAMBLOCKS_MODELS = [
RichText,
Column,
Separator
]
|
5979df77dcb64cf61b385c8e5422068ac257de68
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayOpenServicemarketCommodityExtendinfosAddModel.py
|
b8a4fe99da1216cd20888ff837f6f41cb897923c
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,784
|
py
|
AlipayOpenServicemarketCommodityExtendinfosAddModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.CommodityPublicExtInfos import CommodityPublicExtInfos
class AlipayOpenServicemarketCommodityExtendinfosAddModel(object):
def __init__(self):
self._commodity_ext_infos = None
self._commodity_id = None
self._user_id = None
@property
def commodity_ext_infos(self):
return self._commodity_ext_infos
@commodity_ext_infos.setter
def commodity_ext_infos(self, value):
if isinstance(value, list):
self._commodity_ext_infos = list()
for i in value:
if isinstance(i, CommodityPublicExtInfos):
self._commodity_ext_infos.append(i)
else:
self._commodity_ext_infos.append(CommodityPublicExtInfos.from_alipay_dict(i))
@property
def commodity_id(self):
return self._commodity_id
@commodity_id.setter
def commodity_id(self, value):
self._commodity_id = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.commodity_ext_infos:
if isinstance(self.commodity_ext_infos, list):
for i in range(0, len(self.commodity_ext_infos)):
element = self.commodity_ext_infos[i]
if hasattr(element, 'to_alipay_dict'):
self.commodity_ext_infos[i] = element.to_alipay_dict()
if hasattr(self.commodity_ext_infos, 'to_alipay_dict'):
params['commodity_ext_infos'] = self.commodity_ext_infos.to_alipay_dict()
else:
params['commodity_ext_infos'] = self.commodity_ext_infos
if self.commodity_id:
if hasattr(self.commodity_id, 'to_alipay_dict'):
params['commodity_id'] = self.commodity_id.to_alipay_dict()
else:
params['commodity_id'] = self.commodity_id
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenServicemarketCommodityExtendinfosAddModel()
if 'commodity_ext_infos' in d:
o.commodity_ext_infos = d['commodity_ext_infos']
if 'commodity_id' in d:
o.commodity_id = d['commodity_id']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
ed1036cea27c7f357885d68bfa18fd6c4cd89ee4
|
d17a8870ff8ac77b82d0d37e20c85b23aa29ca74
|
/lite/tests/unittest_py/op/test_beam_search_decode_op.py
|
61f33e0aa0142c6433a457c2fa6054aacb7b76ac
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle-Lite
|
4ab49144073451d38da6f085a8c56822caecd5b2
|
e241420f813bd91f5164f0d9ee0bc44166c0a172
|
refs/heads/develop
| 2023-09-02T05:28:14.017104
| 2023-09-01T10:32:39
| 2023-09-01T10:32:39
| 104,208,128
| 2,545
| 1,041
|
Apache-2.0
| 2023-09-12T06:46:10
| 2017-09-20T11:41:42
|
C++
|
UTF-8
|
Python
| false
| false
| 3,539
|
py
|
test_beam_search_decode_op.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
import argparse
from functools import partial
import random
import numpy as np
class TestBeamSearchDecodeOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
# it doesn't suppor std::vector<Tensor>
# self.enable_testing_on_place(
# TargetType.Host,
# PrecisionType.FP32,
# DataLayoutType.NCHW,
# thread=[1, 4])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
# doesn't support tensorList type
return False
def sample_program_configs(self, draw):
in_shape = draw(st.sampled_from([[5, 1]]))
lod_data = draw(st.sampled_from([[[0, 1, 2], [0, 2, 4]]]))
def generate_pre_ids(*args, **kwargs):
return np.random.random(in_shape).astype(np.int64)
def generate_pre_score(*args, **kwargs):
return np.random.random(in_shape).astype(np.float32)
beam_search_ops = OpConfig(
type="beam_search_decode",
inputs={
"Ids": ["ids_data", "ids_data2"],
"Scores": ["scores_data", "scores_data2"]
},
outputs={
"SentenceIds": ["sentence_ids_data"],
"SentenceScores": ["sentence_scores_data"]
},
attrs={"beam_size": in_shape[0],
"end_id": 0})
program_config = ProgramConfig(
ops=[beam_search_ops],
weights={},
inputs={
"ids_data": TensorConfig(
data_gen=partial(generate_pre_ids), lod=lod_data),
"ids_data2": TensorConfig(
data_gen=partial(generate_pre_ids), lod=lod_data),
"scores_data": TensorConfig(
data_gen=partial(generate_pre_score), lod=lod_data),
"scores_data2": TensorConfig(
data_gen=partial(generate_pre_score), lod=lod_data),
},
outputs=["sentence_ids_data", "sentence_scores_data"])
return program_config
def sample_predictor_configs(self):
return self.get_predictor_configs(), ["beam_search_decode"], (1e-5,
1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
if __name__ == "__main__":
unittest.main(argv=[''])
|
86b1f6f39361ee48a49e62c943ce23f1fae79f66
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/nasm/all/conanfile.py
|
f570e673ace4db779762f6d24ab40bbf58655205
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 4,965
|
py
|
conanfile.py
|
from conan import ConanFile
from conan.tools.env import VirtualBuildEnv
from conan.tools.files import apply_conandata_patches, chdir, copy, export_conandata_patches, get, replace_in_file, rmdir
from conan.tools.gnu import Autotools, AutotoolsToolchain
from conan.tools.layout import basic_layout
from conan.tools.microsoft import NMakeToolchain, is_msvc
import os
import shutil
required_conan_version = ">=1.55.0"
class NASMConan(ConanFile):
name = "nasm"
package_type = "application"
url = "https://github.com/conan-io/conan-center-index"
homepage = "http://www.nasm.us"
description = "The Netwide Assembler, NASM, is an 80x86 and x86-64 assembler"
license = "BSD-2-Clause"
topics = ("asm", "installer", "assembler",)
settings = "os", "arch", "compiler", "build_type"
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
@property
def _nasm(self):
suffix = "w.exe" if is_msvc(self) else ""
return os.path.join(self.package_folder, "bin", f"nasm{suffix}")
@property
def _ndisasm(self):
suffix = "w.exe" if is_msvc(self) else ""
return os.path.join(self.package_folder, "bin", f"ndisasm{suffix}")
def _chmod_plus_x(self, filename):
if os.name == "posix":
os.chmod(filename, os.stat(filename).st_mode | 0o111)
def export_sources(self):
export_conandata_patches(self)
def configure(self):
self.settings.rm_safe("compiler.libcxx")
self.settings.rm_safe("compiler.cppstd")
def layout(self):
basic_layout(self, src_folder="src")
def package_id(self):
del self.info.settings.compiler
def build_requirements(self):
if self._settings_build.os == "Windows":
self.tool_requires("strawberryperl/5.32.1.1")
if not is_msvc(self):
self.win_bash = True
if not self.conf.get("tools.microsoft.bash:path", check_type=str):
self.tool_requires("msys2/cci.latest")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
env = VirtualBuildEnv(self)
env.generate()
if is_msvc(self):
tc = NMakeToolchain(self)
tc.generate()
else:
tc = AutotoolsToolchain(self)
if self.settings.arch == "x86":
tc.extra_cflags.append("-m32")
elif self.settings.arch == "x86_64":
tc.extra_cflags.append("-m64")
tc.generate()
def build(self):
apply_conandata_patches(self)
if is_msvc(self):
with chdir(self, self.source_folder):
self.run(f'nmake /f {os.path.join("Mkfiles", "msvc.mak")}')
else:
autotools = Autotools(self)
autotools.configure()
# GCC9 - "pure" attribute on function returning "void"
replace_in_file(self, "Makefile", "-Werror=attributes", "")
# Need "-arch" flag for the linker when cross-compiling.
# FIXME: Revisit after https://github.com/conan-io/conan/issues/9069, using new Autotools integration
# TODO it is time to revisit, not sure what to do here though...
if str(self.version).startswith("2.13"):
replace_in_file(self, "Makefile", "$(CC) $(LDFLAGS) -o", "$(CC) $(ALL_CFLAGS) $(LDFLAGS) -o")
replace_in_file(self, "Makefile", "$(INSTALLROOT)", "$(DESTDIR)")
autotools.make()
def package(self):
copy(self, pattern="LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
if is_msvc(self):
copy(self, pattern="*.exe", src=self.source_folder, dst=os.path.join(self.package_folder, "bin"), keep_path=False)
with chdir(self, os.path.join(self.package_folder, "bin")):
shutil.copy2("nasm.exe", "nasmw.exe")
shutil.copy2("ndisasm.exe", "ndisasmw.exe")
else:
autotools = Autotools(self)
autotools.install()
rmdir(self, os.path.join(self.package_folder, "share"))
self._chmod_plus_x(self._nasm)
self._chmod_plus_x(self._ndisasm)
def package_info(self):
self.cpp_info.libdirs = []
self.cpp_info.includedirs = []
compiler_executables = {"asm": self._nasm}
self.conf_info.update("tools.build:compiler_executables", compiler_executables)
self.buildenv_info.define_path("NASM", self._nasm)
self.buildenv_info.define_path("NDISASM", self._ndisasm)
self.buildenv_info.define_path("AS", self._nasm)
# TODO: Legacy, to be removed on Conan 2.0
self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
self.env_info.NASM = self._nasm
self.env_info.NDISASM = self._ndisasm
self.env_info.AS = self._nasm
|
3c6487e018dc23198738d2720651266e9eb33ab8
|
06113cbe5e7ee089617755ea16ff29863b4572ad
|
/airflow_code_editor/app_builder_view.py
|
183c02f3b0a9a5b48ebdc55952cc0b5160562ba5
|
[
"Apache-2.0"
] |
permissive
|
andreax79/airflow-code-editor
|
53e17cfafe4bcecb5125d7125a6848ed430f3f03
|
19067c2db89bc14d2b8f2d9b9eff30daabff860e
|
refs/heads/main
| 2023-08-31T14:45:06.663022
| 2023-08-28T07:44:16
| 2023-08-28T07:46:35
| 200,844,662
| 322
| 52
|
Apache-2.0
| 2023-07-25T06:26:47
| 2019-08-06T12:13:46
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 6,014
|
py
|
app_builder_view.py
|
#!/usr/bin/env python
#
# Copyright 2019 Andrea Bonomi <andrea.bonomi@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the Licens
#
from flask import request
from flask_appbuilder import BaseView, expose
from airflow_code_editor.code_editor_view import AbstractCodeEditorView
from airflow_code_editor.commons import (
ROUTE,
MENU_CATEGORY,
MENU_LABEL,
JS_FILES,
VERSION,
)
__all__ = ["appbuilder_view"]
try:
from airflow.www import auth
from airflow.security import permissions
PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
]
# ############################################################################
# AppBuilder (Airflow >= 2.0)
class AppBuilderCodeEditorView(BaseView, AbstractCodeEditorView):
route_base = ROUTE
base_permissions = ["can_list", "can_create", "menu_acccess"]
@expose("/")
@auth.has_access(PERMISSIONS)
def list(self):
return self._index()
@expose("/repo", methods=["POST"])
@auth.has_access(PERMISSIONS)
def repo_base(self, path=None):
return self._git_repo(path)
@expose("/repo/<path:path>", methods=["GET", "HEAD", "POST"])
@auth.has_access(PERMISSIONS)
def repo(self, path=None):
return self._git_repo(path)
@expose("/files/<path:path>", methods=["POST"])
@auth.has_access(PERMISSIONS)
def save(self, path=None):
return self._save(path)
@expose("/files/<path:path>", methods=["GET"])
@auth.has_access(PERMISSIONS)
def load(self, path=None):
return self._load(path)
@expose("/format", methods=["POST"])
@auth.has_access(PERMISSIONS)
def format(self):
return self._format()
@expose("/tree", methods=["GET", "HEAD"])
@auth.has_access(PERMISSIONS)
def tree_base(self, path=None):
return self._tree(path, args=request.args, method=request.method)
@expose("/tree/<path:path>", methods=["GET", "HEAD"])
@auth.has_access(PERMISSIONS)
def tree(self, path=None):
return self._tree(path, args=request.args, method=request.method)
@expose("/search", methods=["GET"])
@auth.has_access(PERMISSIONS)
def search(self):
return self._search(args=request.args)
@expose("/ping", methods=["GET"])
@auth.has_access(PERMISSIONS)
def ping(self):
return self._ping()
def _render(self, template, *args, **kargs):
return self.render_template(
template + "_appbuilder.html",
airflow_major_version=self.airflow_major_version,
js_files=JS_FILES,
version=VERSION,
*args,
**kargs
)
except (ImportError, ModuleNotFoundError):
from airflow_code_editor.auth import has_access
from airflow.www_rbac.decorators import has_dag_access
# ############################################################################
# AppBuilder (Airflow >= 1.10 < 2.0 and rbac = True)
class AppBuilderCodeEditorView(BaseView, AbstractCodeEditorView):
route_base = ROUTE
base_permissions = ["can_list"]
@expose("/")
@has_dag_access(can_dag_edit=True)
@has_access
def list(self):
return self._index()
@expose("/repo", methods=["POST"])
@has_dag_access(can_dag_edit=True)
def repo_base(self, path=None):
return self._git_repo(path)
@expose("/repo/<path:path>", methods=["GET", "HEAD", "POST"])
@has_dag_access(can_dag_edit=True)
def repo(self, path=None):
return self._git_repo(path)
@expose("/files/<path:path>", methods=["POST"])
@has_dag_access(can_dag_edit=True)
def save(self, path=None):
return self._save(path)
@expose("/files/<path:path>", methods=["GET"])
@has_dag_access(can_dag_edit=True)
def load(self, path=None):
return self._load(path)
@expose("/format", methods=["POST"])
@has_dag_access(can_dag_edit=True)
def format(self):
return self._format()
@expose("/tree", methods=["GET"])
@has_dag_access(can_dag_edit=True)
def tree_base(self, path=None):
return self._tree(path, args=request.args, method=request.method)
@expose("/tree/<path:path>", methods=["GET"])
@has_dag_access(can_dag_edit=True)
def tree(self, path=None):
return self._tree(path, args=request.args, method=request.method)
@expose("/search", methods=["GET"])
@has_dag_access(can_dag_edit=True)
def search(self):
return self._search(args=request.args)
@expose("/ping", methods=["GET"])
def ping(self):
return self._ping()
def _render(self, template, *args, **kargs):
return self.render_template(
template + "_appbuilder.html",
airflow_major_version=self.airflow_major_version,
js_files=JS_FILES,
version=VERSION,
*args,
**kargs
)
appbuilder_code_editor_view = AppBuilderCodeEditorView()
appbuilder_view = {
"category": MENU_CATEGORY,
"name": MENU_LABEL,
"view": appbuilder_code_editor_view,
}
|
002d234ded066c254bb2464decf1c2c2cd0abe52
|
e1c27be4a9242b31d4e950b31b5b50cd4279b520
|
/crhelper/utils.pyi
|
e82c77b73f3281a40e204d0e4da92f125144f540
|
[
"Apache-2.0"
] |
permissive
|
aws-cloudformation/custom-resource-helper
|
75c82fb100bcec2bafcc09aaa9053ef0cac0dbb8
|
f2095c5d11e5d3060f6e210b76313cea5f97bc8e
|
refs/heads/main
| 2022-12-24T08:35:00.237474
| 2022-06-07T02:23:12
| 2022-06-07T02:23:12
| 169,458,597
| 350
| 61
|
Apache-2.0
| 2022-12-17T16:22:07
| 2019-02-06T18:53:12
|
Python
|
UTF-8
|
Python
| false
| false
| 36
|
pyi
|
utils.pyi
|
from typing import Any
logger: Any
|
7bb3f43454c38b59cb99ff45fe04941c282ac275
|
88b6d330080adb08d9599f32e6ef18433312c3b9
|
/testsuite/style/util.py
|
96a280708c7183a497798e4a1a1299c5e2b52d40
|
[] |
no_license
|
polserver/polserver
|
bf6dc55c9ee666674957b25db6bf8bd3a69519b8
|
e6c93ad709abbe1f0d671ec4b090a93ee6064fc7
|
refs/heads/master
| 2023-08-30T23:56:11.655874
| 2023-08-30T10:42:59
| 2023-08-30T10:42:59
| 22,833,801
| 129
| 83
| null | 2023-09-07T14:59:14
| 2014-08-11T09:28:41
|
C++
|
UTF-8
|
Python
| false
| false
| 2,298
|
py
|
util.py
|
'''
Common utlities for testsuite
@author Bodom
'''
import os
import abc
import subprocess
class Skip():
''' Empty constant util class '''
pass
class SourceChecker(metaclass=abc.ABCMeta):
''' A generic script running checks over the whole source tree '''
# Folders to ignore, relative to root
IGNORE = ('.git', 'bin-build', 'bin', 'lib', 'doc', 'testsuite')
# Description of what is being checked
WHAT = ''
SKIP = Skip()
def __init__(self, quiet=False):
self.quiet = quiet
mydir = os.path.dirname(os.path.realpath(__file__))
self.polroot = os.path.realpath(os.path.join(mydir, '..', '..', 'pol-core'))
def isIgnored(self, full):
rel = os.path.relpath(full, self.polroot)
for ign in self.IGNORE:
if rel.startswith(ign):
return True
return False
def walk(self, path):
''' Like os.walk() but accounts for self.IGNORE '''
for entry in os.listdir(path):
full = os.path.join(path, entry)
if self.isIgnored(full):
continue
if os.path.isdir(full):
if self.isGitIgnored(full):
continue
yield from self.walk(full)
elif os.path.isfile(full):
yield full
else:
raise NotImplementedError()
def run(self):
''' Runs checks over all real source files
@return True on success, False on error
'''
print("Checking {}, POL root is {}".format(self.WHAT, self.polroot))
analyzed = 0
ignored = 0
errors = 0
for fpath in self.walk(self.polroot):
if self.isGitIgnored(fpath):
ignored += 1
continue
base, ext = os.path.splitext(fpath)
res = self.checkFile(fpath, ext)
if res is self.SKIP:
continue
analyzed += 1
if not res:
errors += 1
print()
print("Done. {} files analyzed, {} errors, {} ignored.".format(analyzed, errors, ignored))
if errors:
return False
return True
@abc.abstractmethod
def checkFile(self, path, ext):
''' Checks file for validity
@param path string: The full file path
@param ext string: The file extension
@return True on success, self.Skip to skip
'''
return False
def isGitIgnored(self, path):
''' Checks if file is ignored by git '''
cmd = ('git', 'check-ignore', '-q', path)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
po, pe = proc.communicate()
return proc.returncode == 0
|
f27c32c92a63eb7cceb001b96d172f100f9e2355
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Tensorflow_Pandas_Numpy/source3.6/pandas/tseries/frequencies.py
|
0cffd818202ed639076b24c9cf54c1fa42e921c2
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 8,384
|
py
|
frequencies.py
|
# -*- coding: utf-8 -*-
from datetime import timedelta
from pandas.compat import zip
from pandas import compat
import re
import numpy as np
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.common import (
is_period_arraylike,
is_timedelta64_dtype,
is_datetime64_dtype)
from pandas.tseries.offsets import DateOffset
from pandas._libs.tslib import Timedelta
import pandas._libs.tslibs.frequencies as libfreqs
from pandas._libs.tslibs.frequencies import ( # noqa, semi-public API
get_freq, get_base_alias, get_to_timestamp_base, get_freq_code,
FreqGroup,
is_subperiod, is_superperiod)
from pandas._libs.tslibs.resolution import (Resolution,
_FrequencyInferer,
_TimedeltaFrequencyInferer)
from pytz import AmbiguousTimeError
RESO_NS = 0
RESO_US = 1
RESO_MS = 2
RESO_SEC = 3
RESO_MIN = 4
RESO_HR = 5
RESO_DAY = 6
# ---------------------------------------------------------------------
# Offset names ("time rules") and related functions
from pandas._libs.tslibs.offsets import _offset_to_period_map # noqa:E402
from pandas.tseries.offsets import (Nano, Micro, Milli, Second, # noqa
Minute, Hour,
Day, BDay, CDay, Week, MonthBegin,
MonthEnd, BMonthBegin, BMonthEnd,
QuarterBegin, QuarterEnd, BQuarterBegin,
BQuarterEnd, YearBegin, YearEnd,
BYearBegin, BYearEnd, prefix_mapping)
try:
cday = CDay()
except NotImplementedError:
cday = None
#: cache of previously seen offsets
_offset_map = {}
def get_period_alias(offset_str):
""" alias to closest period strings BQ->Q etc"""
return _offset_to_period_map.get(offset_str, None)
_name_to_offset_map = {'days': Day(1),
'hours': Hour(1),
'minutes': Minute(1),
'seconds': Second(1),
'milliseconds': Milli(1),
'microseconds': Micro(1),
'nanoseconds': Nano(1)}
def to_offset(freq):
"""
Return DateOffset object from string or tuple representation
or datetime.timedelta object
Parameters
----------
freq : str, tuple, datetime.timedelta, DateOffset or None
Returns
-------
delta : DateOffset
None if freq is None
Raises
------
ValueError
If freq is an invalid frequency
See Also
--------
pandas.DateOffset
Examples
--------
>>> to_offset('5min')
<5 * Minutes>
>>> to_offset('1D1H')
<25 * Hours>
>>> to_offset(('W', 2))
<2 * Weeks: weekday=6>
>>> to_offset((2, 'B'))
<2 * BusinessDays>
>>> to_offset(datetime.timedelta(days=1))
<Day>
>>> to_offset(Hour())
<Hour>
"""
if freq is None:
return None
if isinstance(freq, DateOffset):
return freq
if isinstance(freq, tuple):
name = freq[0]
stride = freq[1]
if isinstance(stride, compat.string_types):
name, stride = stride, name
name, _ = libfreqs._base_and_stride(name)
delta = get_offset(name) * stride
elif isinstance(freq, timedelta):
delta = None
freq = Timedelta(freq)
try:
for name in freq.components._fields:
offset = _name_to_offset_map[name]
stride = getattr(freq.components, name)
if stride != 0:
offset = stride * offset
if delta is None:
delta = offset
else:
delta = delta + offset
except Exception:
raise ValueError(libfreqs._INVALID_FREQ_ERROR.format(freq))
else:
delta = None
stride_sign = None
try:
splitted = re.split(libfreqs.opattern, freq)
if splitted[-1] != '' and not splitted[-1].isspace():
# the last element must be blank
raise ValueError('last element must be blank')
for sep, stride, name in zip(splitted[0::4], splitted[1::4],
splitted[2::4]):
if sep != '' and not sep.isspace():
raise ValueError('separator must be spaces')
prefix = libfreqs._lite_rule_alias.get(name) or name
if stride_sign is None:
stride_sign = -1 if stride.startswith('-') else 1
if not stride:
stride = 1
if prefix in Resolution._reso_str_bump_map.keys():
stride, name = Resolution.get_stride_from_decimal(
float(stride), prefix
)
stride = int(stride)
offset = get_offset(name)
offset = offset * int(np.fabs(stride) * stride_sign)
if delta is None:
delta = offset
else:
delta = delta + offset
except Exception:
raise ValueError(libfreqs._INVALID_FREQ_ERROR.format(freq))
if delta is None:
raise ValueError(libfreqs._INVALID_FREQ_ERROR.format(freq))
return delta
def get_offset(name):
"""
Return DateOffset object associated with rule name
Examples
--------
get_offset('EOM') --> BMonthEnd(1)
"""
if name not in libfreqs._dont_uppercase:
name = name.upper()
name = libfreqs._lite_rule_alias.get(name, name)
name = libfreqs._lite_rule_alias.get(name.lower(), name)
else:
name = libfreqs._lite_rule_alias.get(name, name)
if name not in _offset_map:
try:
split = name.split('-')
klass = prefix_mapping[split[0]]
# handles case where there's no suffix (and will TypeError if too
# many '-')
offset = klass._from_name(*split[1:])
except (ValueError, TypeError, KeyError):
# bad prefix or suffix
raise ValueError(libfreqs._INVALID_FREQ_ERROR.format(name))
# cache
_offset_map[name] = offset
# do not return cache because it's mutable
return _offset_map[name].copy()
getOffset = get_offset
# ---------------------------------------------------------------------
# Period codes
def infer_freq(index, warn=True):
"""
Infer the most likely frequency given the input index. If the frequency is
uncertain, a warning will be printed.
Parameters
----------
index : DatetimeIndex or TimedeltaIndex
if passed a Series will use the values of the series (NOT THE INDEX)
warn : boolean, default True
Returns
-------
freq : string or None
None if no discernible frequency
TypeError if the index is not datetime-like
ValueError if there are less than three values.
"""
import pandas as pd
if isinstance(index, ABCSeries):
values = index._values
if not (is_datetime64_dtype(values) or
is_timedelta64_dtype(values) or
values.dtype == object):
raise TypeError("cannot infer freq from a non-convertible dtype "
"on a Series of {dtype}".format(dtype=index.dtype))
index = values
if is_period_arraylike(index):
raise TypeError("PeriodIndex given. Check the `freq` attribute "
"instead of using infer_freq.")
elif isinstance(index, pd.TimedeltaIndex):
inferer = _TimedeltaFrequencyInferer(index, warn=warn)
return inferer.get_freq()
if isinstance(index, pd.Index) and not isinstance(index, pd.DatetimeIndex):
if isinstance(index, (pd.Int64Index, pd.Float64Index)):
raise TypeError("cannot infer freq from a non-convertible index "
"type {type}".format(type=type(index)))
index = index.values
if not isinstance(index, pd.DatetimeIndex):
try:
index = pd.DatetimeIndex(index)
except AmbiguousTimeError:
index = pd.DatetimeIndex(index.asi8)
inferer = _FrequencyInferer(index, warn=warn)
return inferer.get_freq()
|
5cb555a53154e734aff3570f4a8ad046f1a3017a
|
9317a3ce2d972e65d4c08242606e10ad99c93c23
|
/src/autogpt_plugins/planner/planner.py
|
b708085a8a9385c249938f8e363e100c86debceb
|
[
"MIT"
] |
permissive
|
Significant-Gravitas/Auto-GPT-Plugins
|
c751b43fedf34a6e157e09c8b5cd5f9f4e3d17ac
|
397589394138f9ed43660b1aa99d501a8bfff061
|
refs/heads/master
| 2023-08-24T12:28:02.320683
| 2023-06-29T07:09:16
| 2023-06-29T07:09:16
| 625,411,260
| 3,377
| 499
|
MIT
| 2023-09-01T18:31:29
| 2023-04-09T02:54:14
|
Python
|
UTF-8
|
Python
| false
| false
| 4,416
|
py
|
planner.py
|
import json
import os
def check_plan():
"""this function checks if the file plan.md exists, if it doesn't exist it gets created"""
current_working_directory = os.getcwd()
workdir = os.path.join(
current_working_directory, "autogpt", "auto_gpt_workspace", "plan.md"
)
file_name = workdir
if not os.path.exists(file_name):
with open(file_name, "w") as file:
file.write(
"""
# Task List and status:
- [ ] Create a detailed checklist for the current plan and goals
- [ ] Finally, review that every new task is completed
## Notes:
- Use the run_planning_cycle command frequently to keep this plan up to date.
"""
)
print(f"{file_name} created.")
with open(file_name, "r") as file:
return file.read()
def update_plan():
"""this function checks if the file plan.md exists, if it doesn't exist it gets created"""
current_working_directory = os.getcwd()
workdir = os.path.join(current_working_directory, 'autogpt', 'auto_gpt_workspace', 'plan.md')
file_name = workdir
with open(file_name, 'r') as file:
data = file.read()
response = generate_improved_plan(data)
with open(file_name, "w") as file:
file.write(response)
print(f"{file_name} updated.")
return response
def generate_improved_plan(prompt: str) -> str:
"""Generate an improved plan using OpenAI's ChatCompletion functionality"""
import openai
tasks = load_tasks()
model = os.getenv('PLANNER_MODEL', os.getenv('FAST_LLM_MODEL', 'gpt-3.5-turbo'))
max_tokens = os.getenv('PLANNER_TOKEN_LIMIT', os.getenv('FAST_TOKEN_LIMIT', 1500))
temperature = os.getenv('PLANNER_TEMPERATURE', os.getenv('TEMPERATURE', 0.5))
# Call the OpenAI API for chat completion
response = openai.ChatCompletion.create(
model=model,
messages=[
{
"role": "system",
"content": "You are an assistant that improves and adds crucial points to plans in .md format.",
},
{
"role": "user",
"content": f"Update the following plan given the task status below, keep the .md format:\n{prompt}\n"
f"Include the current tasks in the improved plan, keep mind of their status and track them "
f"with a checklist:\n{tasks}\n Revised version should comply with the contents of the "
f"tasks at hand:",
},
],
max_tokens=int(max_tokens),
n=1,
temperature=float(temperature),
)
# Extract the improved plan from the response
improved_plan = response.choices[0].message.content.strip()
return improved_plan
def create_task(task_id=None, task_description: str = None, status=False):
task = {"description": task_description, "completed": status}
tasks = load_tasks()
tasks[str(task_id)] = task
current_working_directory = os.getcwd()
workdir = os.path.join(
current_working_directory, "autogpt", "auto_gpt_workspace", "tasks.json"
)
file_name = workdir
with open(file_name, "w") as f:
json.dump(tasks, f)
return tasks
def load_tasks() -> dict:
current_working_directory = os.getcwd()
workdir = os.path.join(
current_working_directory, "autogpt", "auto_gpt_workspace", "tasks.json"
)
file_name = workdir
if not os.path.exists(file_name):
with open(file_name, "w") as f:
f.write("{}")
with open(file_name) as f:
try:
tasks = json.load(f)
if isinstance(tasks, list):
tasks = {}
except json.JSONDecodeError:
tasks = {}
return tasks
def update_task_status(task_id):
tasks = load_tasks()
if str(task_id) not in tasks:
print(f"Task with ID {task_id} not found.")
return
tasks[str(task_id)]["completed"] = True
current_working_directory = os.getcwd()
workdir = os.path.join(
current_working_directory, "autogpt", "auto_gpt_workspace", "tasks.json"
)
file_name = workdir
with open(file_name, "w") as f:
json.dump(tasks, f)
return f"Task with ID {task_id} has been marked as completed."
|
24c5f2de48b5ab2515068fb76dc4feaa57456eab
|
6a63f3baf8a5a54bba1974acd3a4dc14f486eb88
|
/pyexcel/plugins/parsers/django.py
|
4a0459180d7d33c851864c8a33f6e069e98efec3
|
[
"BSD-3-Clause"
] |
permissive
|
pyexcel/pyexcel
|
7ecda49447a7576119441a9a72fb5ecbd46f9ec0
|
f1bf5fe0f9119b64c9a87ad57d9fc771cbc1a52d
|
refs/heads/dev
| 2023-08-04T14:33:11.890759
| 2023-07-16T11:19:18
| 2023-07-16T11:19:18
| 23,225,199
| 1,190
| 182
|
NOASSERTION
| 2023-07-18T00:16:44
| 2014-08-22T12:52:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,116
|
py
|
django.py
|
"""
pyexcel.plugin.parsers.django
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Export data into database datables
:copyright: (c) 2015-2022 by Onni Software Ltd.
:license: New BSD License
"""
from pyexcel_io import get_data, iget_data
from pyexcel_io.database import common as django
from pyexcel.parser import DbParser
class DjangoExporter(DbParser):
"""Export data from django model"""
def parse_db(
self, argument, export_columns_list=None, on_demand=True, **keywords
):
models = argument
exporter = django.DjangoModelExporter()
if export_columns_list is None:
export_columns_list = [None] * len(models)
for model, export_columns in zip(models, export_columns_list):
adapter = django.DjangoModelExportAdapter(model, export_columns)
exporter.append(adapter)
if on_demand:
sheets, _ = iget_data(
exporter, file_type=self._file_type, **keywords
)
else:
sheets = get_data(exporter, file_type=self._file_type, **keywords)
return sheets
|
2ac2f91749ce3c7aae9b9aeb0869a18b6fcd176b
|
b8bbdfc593b6d816e67a344f720f90ec05236778
|
/airflow/api_connexion/endpoints/dag_run_endpoint.py
|
d56fd469638cabc253b23ced0176c67af70c060a
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
apache/airflow
|
ed78db0a8bab7e096990e143926e52f518e288ab
|
1b122c15030e99cef9d4ff26d3781a7a9d6949bc
|
refs/heads/main
| 2023-09-01T08:37:34.556097
| 2023-09-01T06:49:05
| 2023-09-01T06:49:05
| 33,884,891
| 22,756
| 11,558
|
Apache-2.0
| 2023-09-14T20:12:36
| 2015-04-13T18:04:58
|
Python
|
UTF-8
|
Python
| false
| false
| 17,301
|
py
|
dag_run_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from http import HTTPStatus
from typing import TYPE_CHECKING
import pendulum
from connexion import NoContent
from flask import g
from marshmallow import ValidationError
from sqlalchemy import delete, or_, select
from airflow.api.common.mark_tasks import (
set_dag_run_state_to_failed,
set_dag_run_state_to_queued,
set_dag_run_state_to_success,
)
from airflow.api_connexion import security
from airflow.api_connexion.endpoints.request_dict import get_json_request_dict
from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound
from airflow.api_connexion.parameters import (
apply_sorting,
check_limit,
format_datetime,
format_parameters,
)
from airflow.api_connexion.schemas.dag_run_schema import (
DAGRunCollection,
clear_dagrun_form_schema,
dagrun_collection_schema,
dagrun_schema,
dagruns_batch_form_schema,
set_dagrun_note_form_schema,
set_dagrun_state_form_schema,
)
from airflow.api_connexion.schemas.dataset_schema import (
DatasetEventCollection,
dataset_event_collection_schema,
)
from airflow.api_connexion.schemas.task_instance_schema import (
TaskInstanceReferenceCollection,
task_instance_reference_collection_schema,
)
from airflow.models import DagModel, DagRun
from airflow.security import permissions
from airflow.utils.airflow_flask_app import get_airflow_app
from airflow.utils.db import get_query_count
from airflow.utils.log.action_logger import action_event_from_permission
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.state import DagRunState
from airflow.utils.types import DagRunType
from airflow.www.decorators import action_logging
from airflow.www.extensions.init_auth_manager import get_auth_manager
if TYPE_CHECKING:
from sqlalchemy.orm import Session
from sqlalchemy.sql import Select
from airflow.api_connexion.types import APIResponse
RESOURCE_EVENT_PREFIX = "dag_run"
@security.requires_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG_RUN),
],
)
@provide_session
def delete_dag_run(*, dag_id: str, dag_run_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Delete a DAG Run."""
deleted_count = session.execute(
delete(DagRun).where(DagRun.dag_id == dag_id, DagRun.run_id == dag_run_id)
).rowcount
if deleted_count == 0:
raise NotFound(detail=f"DAGRun with DAG ID: '{dag_id}' and DagRun ID: '{dag_run_id}' not found")
return NoContent, HTTPStatus.NO_CONTENT
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
],
)
@provide_session
def get_dag_run(*, dag_id: str, dag_run_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Get a DAG Run."""
dag_run = session.scalar(select(DagRun).where(DagRun.dag_id == dag_id, DagRun.run_id == dag_run_id))
if dag_run is None:
raise NotFound(
"DAGRun not found",
detail=f"DAGRun with DAG ID: '{dag_id}' and DagRun ID: '{dag_run_id}' not found",
)
return dagrun_schema.dump(dag_run)
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DATASET),
],
)
@provide_session
def get_upstream_dataset_events(
*, dag_id: str, dag_run_id: str, session: Session = NEW_SESSION
) -> APIResponse:
"""If dag run is dataset-triggered, return the dataset events that triggered it."""
dag_run: DagRun | None = session.scalar(
select(DagRun).where(
DagRun.dag_id == dag_id,
DagRun.run_id == dag_run_id,
)
)
if dag_run is None:
raise NotFound(
"DAGRun not found",
detail=f"DAGRun with DAG ID: '{dag_id}' and DagRun ID: '{dag_run_id}' not found",
)
events = dag_run.consumed_dataset_events
return dataset_event_collection_schema.dump(
DatasetEventCollection(dataset_events=events, total_entries=len(events))
)
def _fetch_dag_runs(
query: Select,
*,
end_date_gte: str | None,
end_date_lte: str | None,
execution_date_gte: str | None,
execution_date_lte: str | None,
start_date_gte: str | None,
start_date_lte: str | None,
updated_at_gte: str | None = None,
updated_at_lte: str | None = None,
limit: int | None,
offset: int | None,
order_by: str,
session: Session,
) -> tuple[list[DagRun], int]:
if start_date_gte:
query = query.where(DagRun.start_date >= start_date_gte)
if start_date_lte:
query = query.where(DagRun.start_date <= start_date_lte)
# filter execution date
if execution_date_gte:
query = query.where(DagRun.execution_date >= execution_date_gte)
if execution_date_lte:
query = query.where(DagRun.execution_date <= execution_date_lte)
# filter end date
if end_date_gte:
query = query.where(DagRun.end_date >= end_date_gte)
if end_date_lte:
query = query.where(DagRun.end_date <= end_date_lte)
# filter updated at
if updated_at_gte:
query = query.where(DagRun.updated_at >= updated_at_gte)
if updated_at_lte:
query = query.where(DagRun.updated_at <= updated_at_lte)
total_entries = get_query_count(query, session=session)
to_replace = {"dag_run_id": "run_id"}
allowed_filter_attrs = [
"id",
"state",
"dag_id",
"execution_date",
"dag_run_id",
"start_date",
"end_date",
"updated_at",
"external_trigger",
"conf",
]
query = apply_sorting(query, order_by, to_replace, allowed_filter_attrs)
return session.scalars(query.offset(offset).limit(limit)).all(), total_entries
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
],
)
@format_parameters(
{
"start_date_gte": format_datetime,
"start_date_lte": format_datetime,
"execution_date_gte": format_datetime,
"execution_date_lte": format_datetime,
"end_date_gte": format_datetime,
"end_date_lte": format_datetime,
"updated_at_gte": format_datetime,
"updated_at_lte": format_datetime,
"limit": check_limit,
}
)
@provide_session
def get_dag_runs(
*,
dag_id: str,
start_date_gte: str | None = None,
start_date_lte: str | None = None,
execution_date_gte: str | None = None,
execution_date_lte: str | None = None,
end_date_gte: str | None = None,
end_date_lte: str | None = None,
updated_at_gte: str | None = None,
updated_at_lte: str | None = None,
state: list[str] | None = None,
offset: int | None = None,
limit: int | None = None,
order_by: str = "id",
session: Session = NEW_SESSION,
):
"""Get all DAG Runs."""
query = select(DagRun)
# This endpoint allows specifying ~ as the dag_id to retrieve DAG Runs for all DAGs.
if dag_id == "~":
appbuilder = get_airflow_app().appbuilder
query = query.where(DagRun.dag_id.in_(appbuilder.sm.get_readable_dag_ids(g.user)))
else:
query = query.where(DagRun.dag_id == dag_id)
if state:
query = query.where(DagRun.state.in_(state))
dag_run, total_entries = _fetch_dag_runs(
query,
end_date_gte=end_date_gte,
end_date_lte=end_date_lte,
execution_date_gte=execution_date_gte,
execution_date_lte=execution_date_lte,
start_date_gte=start_date_gte,
start_date_lte=start_date_lte,
updated_at_gte=updated_at_gte,
updated_at_lte=updated_at_lte,
limit=limit,
offset=offset,
order_by=order_by,
session=session,
)
return dagrun_collection_schema.dump(DAGRunCollection(dag_runs=dag_run, total_entries=total_entries))
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
],
)
@provide_session
def get_dag_runs_batch(*, session: Session = NEW_SESSION) -> APIResponse:
"""Get list of DAG Runs."""
body = get_json_request_dict()
try:
data = dagruns_batch_form_schema.load(body)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
appbuilder = get_airflow_app().appbuilder
readable_dag_ids = appbuilder.sm.get_readable_dag_ids(g.user)
query = select(DagRun)
if data.get("dag_ids"):
dag_ids = set(data["dag_ids"]) & set(readable_dag_ids)
query = query.where(DagRun.dag_id.in_(dag_ids))
else:
query = query.where(DagRun.dag_id.in_(readable_dag_ids))
states = data.get("states")
if states:
query = query.where(DagRun.state.in_(states))
dag_runs, total_entries = _fetch_dag_runs(
query,
end_date_gte=data["end_date_gte"],
end_date_lte=data["end_date_lte"],
execution_date_gte=data["execution_date_gte"],
execution_date_lte=data["execution_date_lte"],
start_date_gte=data["start_date_gte"],
start_date_lte=data["start_date_lte"],
limit=data["page_limit"],
offset=data["page_offset"],
order_by=data.get("order_by", "id"),
session=session,
)
return dagrun_collection_schema.dump(DAGRunCollection(dag_runs=dag_runs, total_entries=total_entries))
@security.requires_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),
],
)
@provide_session
@action_logging(
event=action_event_from_permission(
prefix=RESOURCE_EVENT_PREFIX,
permission=permissions.ACTION_CAN_CREATE,
),
)
def post_dag_run(*, dag_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Trigger a DAG."""
dm = session.scalar(select(DagModel).where(DagModel.is_active, DagModel.dag_id == dag_id).limit(1))
if not dm:
raise NotFound(title="DAG not found", detail=f"DAG with dag_id: '{dag_id}' not found")
if dm.has_import_errors:
raise BadRequest(
title="DAG cannot be triggered",
detail=f"DAG with dag_id: '{dag_id}' has import errors",
)
try:
post_body = dagrun_schema.load(get_json_request_dict(), session=session)
except ValidationError as err:
raise BadRequest(detail=str(err))
logical_date = pendulum.instance(post_body["execution_date"])
run_id = post_body["run_id"]
dagrun_instance = session.scalar(
select(DagRun)
.where(
DagRun.dag_id == dag_id,
or_(DagRun.run_id == run_id, DagRun.execution_date == logical_date),
)
.limit(1)
)
if not dagrun_instance:
try:
dag = get_airflow_app().dag_bag.get_dag(dag_id)
dag_run = dag.create_dagrun(
run_type=DagRunType.MANUAL,
run_id=run_id,
execution_date=logical_date,
data_interval=dag.timetable.infer_manual_data_interval(run_after=logical_date),
state=DagRunState.QUEUED,
conf=post_body.get("conf"),
external_trigger=True,
dag_hash=get_airflow_app().dag_bag.dags_hash.get(dag_id),
session=session,
)
dag_run_note = post_body.get("note")
if dag_run_note:
current_user_id = get_auth_manager().get_user_id()
dag_run.note = (dag_run_note, current_user_id)
return dagrun_schema.dump(dag_run)
except ValueError as ve:
raise BadRequest(detail=str(ve))
if dagrun_instance.execution_date == logical_date:
raise AlreadyExists(
detail=(
f"DAGRun with DAG ID: '{dag_id}' and "
f"DAGRun logical date: '{logical_date.isoformat(sep=' ')}' already exists"
),
)
raise AlreadyExists(detail=f"DAGRun with DAG ID: '{dag_id}' and DAGRun ID: '{run_id}' already exists")
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
],
)
@provide_session
def update_dag_run_state(*, dag_id: str, dag_run_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Set a state of a dag run."""
dag_run: DagRun | None = session.scalar(
select(DagRun).where(DagRun.dag_id == dag_id, DagRun.run_id == dag_run_id)
)
if dag_run is None:
error_message = f"Dag Run id {dag_run_id} not found in dag {dag_id}"
raise NotFound(error_message)
try:
post_body = set_dagrun_state_form_schema.load(get_json_request_dict())
except ValidationError as err:
raise BadRequest(detail=str(err))
state = post_body["state"]
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if state == DagRunState.SUCCESS:
set_dag_run_state_to_success(dag=dag, run_id=dag_run.run_id, commit=True)
elif state == DagRunState.QUEUED:
set_dag_run_state_to_queued(dag=dag, run_id=dag_run.run_id, commit=True)
else:
set_dag_run_state_to_failed(dag=dag, run_id=dag_run.run_id, commit=True)
dag_run = session.get(DagRun, dag_run.id)
return dagrun_schema.dump(dag_run)
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
],
)
@provide_session
def clear_dag_run(*, dag_id: str, dag_run_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Clear a dag run."""
dag_run: DagRun | None = session.scalar(
select(DagRun).where(DagRun.dag_id == dag_id, DagRun.run_id == dag_run_id)
)
if dag_run is None:
error_message = f"Dag Run id {dag_run_id} not found in dag {dag_id}"
raise NotFound(error_message)
try:
post_body = clear_dagrun_form_schema.load(get_json_request_dict())
except ValidationError as err:
raise BadRequest(detail=str(err))
dry_run = post_body.get("dry_run", False)
dag = get_airflow_app().dag_bag.get_dag(dag_id)
start_date = dag_run.logical_date
end_date = dag_run.logical_date
if dry_run:
task_instances = dag.clear(
start_date=start_date,
end_date=end_date,
task_ids=None,
include_subdags=True,
include_parentdag=True,
only_failed=False,
dry_run=True,
)
return task_instance_reference_collection_schema.dump(
TaskInstanceReferenceCollection(task_instances=task_instances)
)
else:
dag.clear(
start_date=start_date,
end_date=end_date,
task_ids=None,
include_subdags=True,
include_parentdag=True,
only_failed=False,
)
dag_run = session.execute(select(DagRun).where(DagRun.id == dag_run.id)).scalar_one()
return dagrun_schema.dump(dag_run)
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
],
)
@provide_session
def set_dag_run_note(*, dag_id: str, dag_run_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Set the note for a dag run."""
dag_run: DagRun | None = session.scalar(
select(DagRun).where(DagRun.dag_id == dag_id, DagRun.run_id == dag_run_id)
)
if dag_run is None:
error_message = f"Dag Run id {dag_run_id} not found in dag {dag_id}"
raise NotFound(error_message)
try:
post_body = set_dagrun_note_form_schema.load(get_json_request_dict())
new_note = post_body["note"]
except ValidationError as err:
raise BadRequest(detail=str(err))
current_user_id = get_auth_manager().get_user_id()
if dag_run.dag_run_note is None:
dag_run.note = (new_note, current_user_id)
else:
dag_run.dag_run_note.content = new_note
dag_run.dag_run_note.user_id = current_user_id
session.commit()
return dagrun_schema.dump(dag_run)
|
f727d771dc44cadf3af1a2630ce3b92f910d8269
|
5b6ba0f288b1e2ac236af846a9bf546a63228476
|
/iotbx/mtz/extract_from_symmetry_lib.py
|
1b61f0b91bc77df845b7b0139575b276bb70ff06
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
cctbx/cctbx_project
|
5b547b416cadbdf95cca21dace9f54272a08d98a
|
7f4dfb6c873fd560920f697cbfd8a5ff6eed82fa
|
refs/heads/master
| 2023-08-17T17:44:05.077010
| 2023-08-16T22:40:22
| 2023-08-16T22:40:22
| 39,508,026
| 206
| 131
|
NOASSERTION
| 2023-09-14T17:12:55
| 2015-07-22T13:36:27
|
Python
|
UTF-8
|
Python
| false
| false
| 4,989
|
py
|
extract_from_symmetry_lib.py
|
from __future__ import absolute_import, division, print_function
from cctbx import sgtbx
import libtbx.load_env
import os.path as op
from six.moves import range
if (libtbx.env.has_module("ccp4io")):
for _ in ["libccp4/data", "data"]:
ccp4io_lib_data = libtbx.env.under_dist(
module_name="ccp4io", path=_)
if (op.isdir(ccp4io_lib_data)):
break
else:
ccp4io_lib_data = None
else:
ccp4io_lib_data = None
_ccp4_symbol_cache = {"symop.lib": {}, "syminfo.lib": {}}
_syminfo_lib_cache = []
syminfo_lib_bad_old = set("""
P 21/m 21/m 2/n a
""".splitlines())
def ccp4_symbol(space_group_info, lib_name, require_at_least_one_lib=True):
assert lib_name in _ccp4_symbol_cache
sg_type = space_group_info.type()
lookup_symbol = sg_type.lookup_symbol()
cache = _ccp4_symbol_cache[lib_name]
result = cache.get(lookup_symbol, "..unknown..")
if (result != "..unknown.."):
return result
if (lib_name != "syminfo.lib" or len(_syminfo_lib_cache) == 0):
lib_paths = []
if (ccp4io_lib_data is not None):
lib_paths.append(op.join(ccp4io_lib_data, lib_name))
import os
if 'CCP4_LIB' in os.environ:
lib_paths.append(op.expandvars("$CCP4_LIB/data/"+lib_name))
if 'CLIBD' in os.environ:
lib_paths.append(op.expandvars("$CLIBD/"+lib_name))
found_at_least_one_lib = False
for lib_path in lib_paths:
if (op.isfile(lib_path)):
found_at_least_one_lib = True
if (lib_name == "symop.lib"):
with open(lib_path) as fh:
ccp4_symbol = search_symop_lib_for_ccp4_symbol(
space_group_info=space_group_info,
file_iter=fh)
if (ccp4_symbol is not None):
cache[lookup_symbol] = ccp4_symbol
return ccp4_symbol
else:
build_syminfo_lib_cache(lib_path)
break
else:
if (require_at_least_one_lib):
assert found_at_least_one_lib
if (lib_name == "syminfo.lib"):
for hall,ccp4_symbol in _syminfo_lib_cache[sg_type.number()]:
sgi = sgtbx.space_group_info(symbol="Hall: "+hall)
lus = sgi.type().lookup_symbol()
cache[lus] = ccp4_symbol
if (lus == lookup_symbol):
return ccp4_symbol
return None
def search_symop_lib_for_ccp4_symbol(space_group_info, file_iter):
given_space_group_number = space_group_info.type().number()
for line in file_iter:
flds = line.split(None, 4)
space_group_number = int(flds[0][-3:])
order_z = int(flds[1])
if (space_group_number != given_space_group_number):
for i in range(order_z):
next(file_iter)
else:
result = flds[3]
group = collect_symops(file_iter=file_iter, order_z=order_z)
if (space_group_info.group() == group):
return result
return None
def collect_symops(file_iter, order_z):
result = sgtbx.space_group()
for i in range(order_z):
line = next(file_iter).strip()
result.expand_smx(sgtbx.rt_mx(line))
return result
def build_syminfo_lib_cache(lib_path):
_syminfo_lib_cache.append(None)
for number in range(230):
_syminfo_lib_cache.append([])
with open(lib_path) as file_iter:
for line in file_iter:
l = line.strip()
if (l == "begin_spacegroup"):
number = None
symbols = {}
for line in file_iter:
l = line.strip()
if (l == "end_spacegroup"):
assert number is not None
assert len(symbols) == 3
def get_shortest(s_list):
result = None
for s in s_list:
if (len(s) == 0): continue
if (result is None or len(result) > len(s)):
result = s
return result
ccp4_symbol = get_shortest(symbols["old"])
if ( ccp4_symbol is None
or ccp4_symbol in syminfo_lib_bad_old):
if (len(symbols["xhm"]) != 0):
ccp4_symbol = symbols["xhm"]
else:
raise RuntimeError("Missing both xHM and old symbols")
_syminfo_lib_cache[number].append((symbols["hall"], ccp4_symbol))
break
if (l.startswith("number ")):
flds = l.split()
assert len(flds) == 2
number = int(flds[1])
assert number >= 1
assert number <= 230
elif (l.startswith("symbol ")):
flds = l.split(None, 2)
assert len(flds) == 3
stype = flds[1].lower()
if (stype in ["hall", "xhm", "old"]):
assert stype not in symbols
symbol = flds[2].strip()
assert len(symbol) >= 2
assert symbol.startswith("'")
assert symbol.endswith("'")
if (stype == "old"):
symbols[stype] = " ".join(symbol[1:-1].split()).split("' '")
else:
symbols[stype] = symbol[1:-1]
else:
raise RuntimeError("Missing end_spacegroup")
return None
|
c9c6664d202673d174f3f827c37c4d59b4ea3567
|
e7efae2b83216d9621bd93390959d652de779c3d
|
/snmp/tests/test_e2e_core_profiles/test_profile_nec_univerge.py
|
bfa92de13ace55deec2540d726ad42fc59960b72
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] |
permissive
|
DataDog/integrations-core
|
ee1886cc7655972b2791e6ab8a1c62ab35afdb47
|
406072e4294edff5b46b513f0cdf7c2c00fac9d2
|
refs/heads/master
| 2023-08-31T04:08:06.243593
| 2023-08-30T18:22:10
| 2023-08-30T18:22:10
| 47,203,045
| 852
| 1,548
|
BSD-3-Clause
| 2023-09-14T16:39:54
| 2015-12-01T16:41:45
|
Python
|
UTF-8
|
Python
| false
| false
| 6,533
|
py
|
test_profile_nec_univerge.py
|
# (C) Datadog, Inc. 2023-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.dev.utils import get_metadata_metrics
from .. import common
from ..test_e2e_core_metadata import assert_device_metadata
from .utils import (
assert_common_metrics,
assert_extend_generic_bgp4,
assert_extend_generic_if,
create_e2e_core_test_config,
get_device_ip_from_config,
)
pytestmark = [pytest.mark.e2e, common.py3_plus_only, common.snmp_integration_only]
def test_e2e_profile_nec_univerge(dd_agent_check):
config = create_e2e_core_test_config('nec-univerge')
aggregator = common.dd_agent_check_wrapper(dd_agent_check, config, rate=True)
ip_address = get_device_ip_from_config(config)
common_tags = [
'snmp_profile:nec-univerge',
'snmp_host:nec-univerge.device.name',
'device_namespace:default',
'snmp_device:' + ip_address,
] + []
# --- TEST EXTENDED METRICS ---
assert_extend_generic_bgp4(aggregator, common_tags)
assert_extend_generic_if(aggregator, common_tags)
# --- TEST METRICS ---
assert_common_metrics(aggregator, common_tags)
aggregator.assert_metric('snmp.cpu.usage', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.memory.usage', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.picoCelsius', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.picoFahrenheit', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.picoVoltage', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalActiveTunnels', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalAuthFails', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalDecryptFails', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalHashValidFails', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalInNotifys', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalInP1SaDelRequests', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalInP2ExchgInvalids', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalInP2ExchgRejects', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalInP2Exchgs', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalInP2SaDelRequests', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalInitTunnelFails', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalInitTunnels', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalOutNotifys', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalOutP1SaDelRequests', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalOutP2ExchgInvalids', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalOutP2ExchgRejects', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalOutP2Exchgs', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalOutP2SaDelRequests', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalRespTunnelFails', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pikeGlobalRespTunnels', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pipSecGlobalActiveTunnels', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pipSecGlobalInAuthFails', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pipSecGlobalInAuths', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pipSecGlobalInDecryptFails', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pipSecGlobalInDecrypts', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pipSecGlobalInDrops', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pipSecGlobalInOctets', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pipSecGlobalInPkts', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pipSecGlobalInReplayDrops', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pipSecGlobalNoSaFails', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pipSecGlobalOutAuthFails', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pipSecGlobalOutAuths', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pipSecGlobalOutDrops', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pipSecGlobalOutEncryptFails', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pipSecGlobalOutEncrypts', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pipSecGlobalOutOctets', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.nec.pipSecGlobalOutPkts', metric_type=aggregator.GAUGE, tags=common_tags)
# --- TEST METADATA ---
device = {
'description': 'nec-univerge Device Description',
'id': 'default:' + ip_address,
'id_tags': ['device_namespace:default', 'snmp_device:' + ip_address],
'ip_address': '' + ip_address,
'name': 'nec-univerge.device.name',
'profile': 'nec-univerge',
'status': 1,
'sys_object_id': '1.3.6.1.4.1.119.1.84.18',
'vendor': 'nec',
}
device['tags'] = common_tags
assert_device_metadata(aggregator, device)
# --- CHECK COVERAGE ---
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
|
9ee84e9ec89683ba984799cc30c6ec44f090a57a
|
e90bf4b372da78ceec15282d060b48d18ba8d4e9
|
/tests/dbus/udisks2/test_manager.py
|
46f6dbbe397655f8c8c8338c9441c85a6a90b29c
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/supervisor
|
67f2e1755ff5fbf7cf2084351e1c32c6995274e0
|
4838b280adafed0997f32e021274b531178386cd
|
refs/heads/main
| 2023-08-31T22:51:25.949277
| 2023-08-31T08:01:42
| 2023-08-31T08:01:42
| 84,926,758
| 928
| 477
|
Apache-2.0
| 2023-09-14T17:11:27
| 2017-03-14T08:54:15
|
Python
|
UTF-8
|
Python
| false
| false
| 9,376
|
py
|
test_manager.py
|
"""Test UDisks2 Manager interface."""
from pathlib import Path
from awesomeversion import AwesomeVersion
from dbus_fast import Variant
from dbus_fast.aio.message_bus import MessageBus
import pytest
from supervisor.dbus.udisks2 import UDisks2
from supervisor.dbus.udisks2.const import PartitionTableType
from supervisor.dbus.udisks2.data import DeviceSpecification
from supervisor.exceptions import DBusNotConnectedError, DBusObjectError
from tests.common import mock_dbus_services
from tests.dbus_service_mocks.base import DBusServiceMock
from tests.dbus_service_mocks.udisks2_manager import (
UDisks2Manager as UDisks2ManagerService,
)
@pytest.fixture(name="udisks2_manager_service")
async def fixture_udisks2_manager_service(
udisks2_services: dict[str, DBusServiceMock | dict[str, DBusServiceMock]]
) -> UDisks2ManagerService:
"""Mock UDisks2 Manager service."""
yield udisks2_services["udisks2_manager"]
async def test_udisks2_manager_info(
udisks2_manager_service: UDisks2ManagerService, dbus_session_bus: MessageBus
):
"""Test udisks2 manager dbus connection."""
udisks2_manager_service.GetBlockDevices.calls.clear()
udisks2 = UDisks2()
assert udisks2.supported_filesystems is None
await udisks2.connect(dbus_session_bus)
assert udisks2.supported_filesystems == [
"ext4",
"vfat",
"ntfs",
"exfat",
"swap",
]
assert udisks2.version == AwesomeVersion("2.9.2")
assert {block.object_path for block in udisks2.block_devices} == {
"/org/freedesktop/UDisks2/block_devices/loop0",
"/org/freedesktop/UDisks2/block_devices/mmcblk1",
"/org/freedesktop/UDisks2/block_devices/mmcblk1p1",
"/org/freedesktop/UDisks2/block_devices/mmcblk1p2",
"/org/freedesktop/UDisks2/block_devices/mmcblk1p3",
"/org/freedesktop/UDisks2/block_devices/sda",
"/org/freedesktop/UDisks2/block_devices/sda1",
"/org/freedesktop/UDisks2/block_devices/sdb",
"/org/freedesktop/UDisks2/block_devices/sdb1",
"/org/freedesktop/UDisks2/block_devices/zram1",
}
assert {drive.object_path for drive in udisks2.drives} == {
"/org/freedesktop/UDisks2/drives/BJTD4R_0x97cde291",
"/org/freedesktop/UDisks2/drives/SSK_SSK_Storage_DF56419883D56",
"/org/freedesktop/UDisks2/drives/Generic_Flash_Disk_61BCDDB6",
}
assert udisks2_manager_service.GetBlockDevices.calls == [
({"auth.no_user_interaction": Variant("b", True)},)
]
udisks2_manager_service.GetBlockDevices.calls.clear()
udisks2_manager_service.emit_properties_changed({"SupportedFilesystems": ["ext4"]})
await udisks2_manager_service.ping()
assert udisks2.supported_filesystems == ["ext4"]
assert udisks2_manager_service.GetBlockDevices.calls == []
udisks2_manager_service.emit_properties_changed({}, ["SupportedFilesystems"])
await udisks2_manager_service.ping()
await udisks2_manager_service.ping()
await udisks2_manager_service.ping() # Three pings: signal, get all properties and get block devices
assert udisks2.supported_filesystems == [
"ext4",
"vfat",
"ntfs",
"exfat",
"swap",
]
assert udisks2_manager_service.GetBlockDevices.calls == [
({"auth.no_user_interaction": Variant("b", True)},)
]
async def test_update_checks_devices_and_drives(dbus_session_bus: MessageBus):
"""Test update rechecks block devices and drives correctly."""
mocked = await mock_dbus_services(
{
"udisks2_manager": None,
"udisks2_block": [
"/org/freedesktop/UDisks2/block_devices/sda",
"/org/freedesktop/UDisks2/block_devices/sda1",
"/org/freedesktop/UDisks2/block_devices/sdb",
],
"udisks2_drive": [
"/org/freedesktop/UDisks2/drives/SSK_SSK_Storage_DF56419883D56",
"/org/freedesktop/UDisks2/drives/Generic_Flash_Disk_61BCDDB6",
],
},
dbus_session_bus,
)
udisks2_manager_service: UDisks2ManagerService = mocked["udisks2_manager"]
udisks2_manager_service.block_devices = [
"/org/freedesktop/UDisks2/block_devices/sda",
"/org/freedesktop/UDisks2/block_devices/sda1",
"/org/freedesktop/UDisks2/block_devices/sdb",
]
udisks2 = UDisks2()
await udisks2.connect(dbus_session_bus)
assert len(udisks2.block_devices) == 3
assert (
udisks2.get_block_device(
"/org/freedesktop/UDisks2/block_devices/sda"
).partition_table
is None
)
assert (
udisks2.get_block_device(
"/org/freedesktop/UDisks2/block_devices/sda1"
).filesystem
is None
)
sdb = udisks2.get_block_device("/org/freedesktop/UDisks2/block_devices/sdb")
assert sdb.is_connected is True
with pytest.raises(DBusObjectError):
udisks2.get_block_device("/org/freedesktop/UDisks2/block_devices/mmcblk1")
assert len(udisks2.drives) == 2
assert (
udisks2.get_drive(
"/org/freedesktop/UDisks2/drives/SSK_SSK_Storage_DF56419883D56"
).is_connected
is True
)
flash_disk = udisks2.get_drive(
"/org/freedesktop/UDisks2/drives/Generic_Flash_Disk_61BCDDB6"
)
assert flash_disk.is_connected is True
with pytest.raises(DBusObjectError):
udisks2.get_drive("/org/freedesktop/UDisks2/drives/BJTD4R_0x97cde291")
await mock_dbus_services(
{
"udisks2_block": "/org/freedesktop/UDisks2/block_devices/mmcblk1",
"udisks2_drive": "/org/freedesktop/UDisks2/drives/BJTD4R_0x97cde291",
"udisks2_filesystem": "/org/freedesktop/UDisks2/block_devices/sda1",
"udisks2_partition_table": "/org/freedesktop/UDisks2/block_devices/sda",
},
dbus_session_bus,
)
udisks2_manager_service.block_devices = [
"/org/freedesktop/UDisks2/block_devices/sda",
"/org/freedesktop/UDisks2/block_devices/sda1",
"/org/freedesktop/UDisks2/block_devices/mmcblk1",
]
await udisks2.update()
assert len(udisks2.block_devices) == 3
assert (
udisks2.get_block_device(
"/org/freedesktop/UDisks2/block_devices/sda"
).partition_table.type
== PartitionTableType.GPT
)
assert (
udisks2.get_block_device(
"/org/freedesktop/UDisks2/block_devices/sda1"
).filesystem.mount_points
== []
)
assert (
udisks2.get_block_device(
"/org/freedesktop/UDisks2/block_devices/mmcblk1"
).is_connected
is True
)
with pytest.raises(DBusObjectError):
udisks2.get_block_device("/org/freedesktop/UDisks2/block_devices/sdb")
assert sdb.is_connected is False
assert sdb.is_shutdown is True
assert len(udisks2.drives) == 2
assert (
udisks2.get_drive(
"/org/freedesktop/UDisks2/drives/SSK_SSK_Storage_DF56419883D56"
).is_connected
is True
)
assert (
udisks2.get_drive(
"/org/freedesktop/UDisks2/drives/BJTD4R_0x97cde291"
).is_connected
is True
)
with pytest.raises(DBusObjectError):
udisks2.get_drive("/org/freedesktop/UDisks2/drives/Generic_Flash_Disk_61BCDDB6")
assert flash_disk.is_connected is False
assert flash_disk.is_shutdown is True
async def test_get_block_device(
udisks2_manager_service: UDisks2ManagerService, dbus_session_bus: MessageBus
):
"""Test get block device by object path."""
udisks2 = UDisks2()
with pytest.raises(DBusNotConnectedError):
udisks2.get_block_device("/org/freedesktop/UDisks2/block_devices/sda1")
await udisks2.connect(dbus_session_bus)
block_device = udisks2.get_block_device(
"/org/freedesktop/UDisks2/block_devices/sda1"
)
assert block_device.id_label == "hassos-data-old"
with pytest.raises(DBusObjectError):
udisks2.get_block_device("non_existent")
async def test_get_drive(
udisks2_manager_service: UDisks2ManagerService, dbus_session_bus: MessageBus
):
"""Test get drive by object path."""
udisks2 = UDisks2()
with pytest.raises(DBusNotConnectedError):
udisks2.get_drive("/org/freedesktop/UDisks2/drives/BJTD4R_0x97cde291")
await udisks2.connect(dbus_session_bus)
drive = udisks2.get_drive("/org/freedesktop/UDisks2/drives/BJTD4R_0x97cde291")
assert drive.id == "BJTD4R-0x97cde291"
with pytest.raises(DBusObjectError):
udisks2.get_drive("non_existent")
async def test_resolve_device(
udisks2_manager_service: UDisks2ManagerService, dbus_session_bus: MessageBus
):
"""Test resolve device."""
udisks2_manager_service.ResolveDevice.calls.clear()
udisks2 = UDisks2()
with pytest.raises(DBusNotConnectedError):
await udisks2.resolve_device(DeviceSpecification(path=Path("/dev/sda1")))
await udisks2.connect(dbus_session_bus)
devices = await udisks2.resolve_device(DeviceSpecification(path=Path("/dev/sda1")))
assert len(devices) == 1
assert devices[0].id_label == "hassos-data-old"
assert udisks2_manager_service.ResolveDevice.calls == [
(
{"path": Variant("s", "/dev/sda1")},
{"auth.no_user_interaction": Variant("b", True)},
)
]
|
d5691f0dd9c3a31c3eb891f0cc35bce61ef15ba1
|
a3f06e5c4947523e3a2a96e721239858c9fc3330
|
/tests/upgrade-before.py
|
6ab3bb85f2f7344443b5dbb56426d864e77c7e8e
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
SpriteLink/NIPAP
|
61491d09e88f0a8101a089115cad794cf402f528
|
6b34b39c7c4ef59069f51adbb736050d214a2d99
|
refs/heads/master
| 2023-08-14T16:51:55.080168
| 2023-07-06T14:19:12
| 2023-07-06T14:19:12
| 2,087,868
| 469
| 166
|
MIT
| 2023-07-06T14:19:35
| 2011-07-22T09:14:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,504
|
py
|
upgrade-before.py
|
#!/usr/bin/env python
#
# This is run by Travis-CI before an upgrade to load some data into the
# database. After the upgrade is complete, the data is verified by
# upgrade-after.py to make sure that the upgrade of the database went smoothly.
#
import logging
import unittest
import sys
import os
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, MODULE_DIR + '/..')
sys.path.insert(0, MODULE_DIR + '/../pynipap')
sys.path.insert(0, MODULE_DIR + '/../nipap')
sys.path.insert(0, MODULE_DIR + '/../nipap-cli')
from nipap.backend import Nipap
from nipap.authlib import SqliteAuth
from nipap.nipapconfig import NipapConfig
from pynipap import AuthOptions, VRF, Pool, Prefix, NipapNonExistentError, NipapDuplicateError, NipapValueError
import pynipap
pynipap.xmlrpc_uri = 'http://unittest:gottatest@127.0.0.1:1337'
o = AuthOptions({
'authoritative_source': 'nipap'
})
class TestHelper:
@classmethod
def clear_database(cls):
cfg = NipapConfig('/etc/nipap/nipap.conf')
n = Nipap()
# have to delete hosts before we can delete the rest
n._execute("DELETE FROM ip_net_plan WHERE masklen(prefix) = 32")
# the rest
n._execute("DELETE FROM ip_net_plan")
# delete all except for the default VRF with id 0
n._execute("DELETE FROM ip_net_vrf WHERE id > 0")
# set default info for VRF 0
n._execute("UPDATE ip_net_vrf SET name = 'default', description = 'The default VRF, typically the Internet.' WHERE id = 0")
n._execute("DELETE FROM ip_net_pool")
n._execute("DELETE FROM ip_net_asn")
def add_prefix(self, prefix, type, description, tags=None):
if tags is None:
tags = []
p = Prefix()
p.prefix = prefix
p.type = type
p.description = description
p.tags = tags
p.save()
return p
class TestLoad(unittest.TestCase):
""" Load some data into the database
"""
def test_load_data(self):
"""
"""
th = TestHelper()
p1 = th.add_prefix('192.168.0.0/16', 'reservation', 'test')
p2 = th.add_prefix('192.168.0.0/20', 'reservation', 'test')
p3 = th.add_prefix('192.168.0.0/24', 'reservation', 'test')
p4 = th.add_prefix('192.168.1.0/24', 'reservation', 'test')
p5 = th.add_prefix('192.168.2.0/24', 'reservation', 'test')
p6 = th.add_prefix('192.168.32.0/20', 'reservation', 'test')
p7 = th.add_prefix('192.168.32.0/24', 'reservation', 'test')
p8 = th.add_prefix('192.168.32.1/32', 'reservation', 'test')
ps1 = th.add_prefix('2001:db8:1::/48', 'reservation', 'test')
ps2 = th.add_prefix('2001:db8:1::/64', 'reservation', 'test')
ps3 = th.add_prefix('2001:db8:2::/48', 'reservation', 'test')
pool1 = Pool()
pool1.name = 'upgrade-test'
pool1.ipv4_default_prefix_length = 31
pool1.ipv6_default_prefix_length = 112
pool1.save()
p2.pool = pool1
p2.save()
ps1.pool = pool1
ps1.save()
pool2 = Pool()
pool2.name = 'upgrade-test2'
pool2.save()
vrf1 = VRF()
vrf1.name = 'foo'
vrf1.rt = '123:123'
vrf1.save()
if __name__ == '__main__':
# set up logging
log = logging.getLogger()
logging.basicConfig()
log.setLevel(logging.INFO)
if sys.version_info >= (2,7):
unittest.main(verbosity=2)
else:
unittest.main()
|
e9a355b8476694b955ab1cf86e43376cde9ea10e
|
edc1134436a79ca883a0d25f3c8dfffc4235c514
|
/pyro/contrib/epidemiology/util.py
|
5c2a29bf4b1f7bb785ddf566f63a13bd1c5056d5
|
[
"Apache-2.0"
] |
permissive
|
pyro-ppl/pyro
|
2283d8ca528fc090c724a3a6e0f344e505ebbf77
|
0e82cad30f75b892a07e6c9a5f9e24f2cb5d0d81
|
refs/heads/dev
| 2023-08-18T00:35:28.014919
| 2023-08-06T21:01:36
| 2023-08-06T21:01:36
| 94,506,832
| 3,647
| 606
|
Apache-2.0
| 2023-09-14T13:52:14
| 2017-06-16T05:03:47
|
Python
|
UTF-8
|
Python
| false
| false
| 10,990
|
py
|
util.py
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import numpy
import torch
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.distributions.util import broadcast_shape
from pyro.ops.special import safe_log
def clamp(tensor, *, min=None, max=None):
"""
Like :func:`torch.clamp` but dispatches to :func:`torch.min` and/or
:func:`torch.max` if ``min`` and/or ``max`` is a :class:`~torch.Tensor`.
"""
if isinstance(min, torch.Tensor):
tensor = torch.max(tensor, min)
min = None
if isinstance(max, torch.Tensor):
tensor = torch.min(tensor, max)
max = None
if min is None and max is None:
return tensor
return tensor.clamp(min=min, max=max)
def cat2(lhs, rhs, *, dim=-1):
"""
Like ``torch.cat([lhs, rhs], dim=dim)`` but dispatches to
:func:`torch.nn.functional.pad` in case one of ``lhs`` or ``rhs`` is a
scalar.
"""
assert dim < 0
if not isinstance(lhs, torch.Tensor):
pad = (0, 0) * (-1 - dim) + (1, 0)
return torch.nn.functional.pad(rhs, pad, value=lhs)
if not isinstance(rhs, torch.Tensor):
pad = (0, 0) * (-1 - dim) + (0, 1)
return torch.nn.functional.pad(lhs, pad, value=rhs)
diff = lhs.dim() - rhs.dim()
if diff > 0:
rhs = rhs.expand((1,) * diff + rhs.shape)
elif diff < 0:
diff = -diff
lhs = lhs.expand((1,) * diff + lhs.shape)
shape = list(broadcast_shape(lhs.shape, rhs.shape))
shape[dim] = -1
return torch.cat([lhs.expand(shape), rhs.expand(shape)], dim=dim)
@torch.no_grad()
def align_samples(samples, model, particle_dim):
"""
Unsqueeze stacked samples such that their particle dim all aligns.
This traces ``model`` to determine the ``event_dim`` of each site.
"""
assert particle_dim < 0
sample = {name: value[0] for name, value in samples.items()}
with poutine.block(), poutine.trace() as tr, poutine.condition(data=sample):
model()
samples = samples.copy()
for name, value in samples.items():
event_dim = tr.trace.nodes[name]["fn"].event_dim
pad = event_dim - particle_dim - value.dim()
if pad < 0:
raise ValueError("Cannot align samples, try moving particle_dim left")
if pad > 0:
shape = value.shape[:1] + (1,) * pad + value.shape[1:]
samples[name] = value.reshape(shape)
return samples
# this 8 x 10 tensor encodes the coefficients of 8 10-dimensional polynomials
# that are used to construct the num_quant_bins=16 quantization strategy
W16 = [
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.1562511562511555e-07],
[
1.1562511562511557e-07,
1.04062604062604e-06,
4.16250416250416e-06,
9.712509712509707e-06,
1.456876456876456e-05,
1.4568764568764562e-05,
9.712509712509707e-06,
4.16250416250416e-06,
1.04062604062604e-06,
-6.937506937506934e-07,
],
[
5.839068339068337e-05,
0.0002591158841158841,
0.0005036630036630038,
0.0005536130536130536,
0.00036421911421911425,
0.00013111888111888106,
9.712509712509736e-06,
-1.2487512487512482e-05,
-5.2031302031302014e-06,
1.6187516187516182e-06,
],
[
0.0018637612387612374,
0.004983558108558107,
0.005457042957042955,
0.0029234654234654212,
0.000568181818181818,
-0.0001602564102564102,
-8.741258741258739e-05,
4.162504162504162e-06,
9.365634365634364e-06,
-1.7536475869809201e-06,
],
[
0.015560115039281694,
0.025703289765789755,
0.015009296259296255,
0.0023682336182336166,
-0.000963966588966589,
-0.00029380341880341857,
5.6656306656306665e-05,
1.5956265956265953e-05,
-6.417193917193917e-06,
7.515632515632516e-07,
],
[
0.057450111616778265,
0.05790875790875791,
0.014424464424464418,
-0.0030303030303030303,
-0.0013791763791763793,
0.00011655011655011669,
5.180005180005181e-05,
-8.325008325008328e-06,
3.4687534687534703e-07,
0.0,
],
[
0.12553422657589322,
0.072988122988123,
-0.0011641136641136712,
-0.006617456617456618,
-0.00028651903651903725,
0.00027195027195027195,
3.2375032375032334e-06,
-5.550005550005552e-06,
3.4687534687534703e-07,
0.0,
],
[
0.21761806865973532,
1.7482707128494565e-17,
-0.028320290820290833,
0.0,
0.0014617327117327117,
0.0,
-3.561253561253564e-05,
0.0,
3.4687534687534714e-07,
0.0,
],
]
W16 = numpy.array(W16)
def compute_bin_probs(s, num_quant_bins):
"""
Compute categorical probabilities for a quantization scheme with num_quant_bins many
bins. `s` is a real-valued tensor with values in [0, 1]. Returns probabilities
of shape `s.shape` + `(num_quant_bins,)`
"""
t = 1 - s
if num_quant_bins == 2:
probs = torch.stack([t, s], dim=-1)
return probs
ss = s * s
tt = t * t
if num_quant_bins == 4:
# This cubic spline interpolates over the nearest four integers, ensuring
# piecewise quadratic gradients.
probs = torch.stack(
[
t * tt,
4 + ss * (3 * s - 6),
4 + tt * (3 * t - 6),
s * ss,
],
dim=-1,
) * (1 / 6)
return probs
if num_quant_bins == 8:
# This quintic spline interpolates over the nearest eight integers, ensuring
# piecewise quartic gradients.
s3 = ss * s
s4 = ss * ss
s5 = s3 * ss
t3 = tt * t
t4 = tt * tt
t5 = t3 * tt
probs = torch.stack(
[
2 * t5,
2 + 10 * t + 20 * tt + 20 * t3 + 10 * t4 - 7 * t5,
55 + 115 * t + 70 * tt - 9 * t3 - 25 * t4 + 7 * t5,
302 - 100 * ss + 10 * s4,
302 - 100 * tt + 10 * t4,
55 + 115 * s + 70 * ss - 9 * s3 - 25 * s4 + 7 * s5,
2 + 10 * s + 20 * ss + 20 * s3 + 10 * s4 - 7 * s5,
2 * s5,
],
dim=-1,
) * (1 / 840)
return probs
if num_quant_bins == 12:
# This septic spline interpolates over the nearest 12 integers
s3 = ss * s
s4 = ss * ss
s5 = s3 * ss
s6 = s3 * s3
s7 = s4 * s3
t3 = tt * t
t4 = tt * tt
t5 = t3 * tt
t6 = t3 * t3
t7 = t4 * t3
probs = torch.stack(
[
693 * t7,
693
+ 4851 * t
+ 14553 * tt
+ 24255 * t3
+ 24255 * t4
+ 14553 * t5
+ 4851 * t6
- 3267 * t7,
84744
+ 282744 * t
+ 382536 * tt
+ 249480 * t3
+ 55440 * t4
- 24948 * t5
- 18018 * t6
+ 5445 * t7,
1017423
+ 1823283 * t
+ 1058211 * tt
+ 51975 * t3
- 148995 * t4
- 18711 * t5
+ 20097 * t6
- 3267 * t7,
3800016
+ 3503808 * t
+ 365904 * tt
- 443520 * t3
- 55440 * t4
+ 33264 * t5
- 2772 * t6,
8723088 - 1629936 * ss + 110880.0 * s4 - 2772 * s6,
8723088 - 1629936 * tt + 110880.0 * t4 - 2772 * t6,
3800016
+ 3503808 * s
+ 365904 * ss
- 443520 * s3
- 55440 * s4
+ 33264 * s5
- 2772 * s6,
1017423
+ 1823283 * s
+ 1058211 * ss
+ 51975 * s3
- 148995 * s4
- 18711 * s5
+ 20097 * s6
- 3267 * s7,
84744
+ 282744 * s
+ 382536 * ss
+ 249480 * s3
+ 55440 * s4
- 24948 * s5
- 18018 * s6
+ 5445 * s7,
693
+ 4851 * s
+ 14553 * ss
+ 24255 * s3
+ 24255 * s4
+ 14553 * s5
+ 4851 * s6
- 3267 * s7,
693 * s7,
],
dim=-1,
) * (1 / 32931360)
return probs
if num_quant_bins == 16:
# This nonic spline interpolates over the nearest 16 integers
w16 = torch.from_numpy(W16).to(s.device).type_as(s)
s_powers = s.unsqueeze(-1).unsqueeze(-1).pow(torch.arange(10.0))
t_powers = t.unsqueeze(-1).unsqueeze(-1).pow(torch.arange(10.0))
splines_t = (w16 * t_powers).sum(-1)
splines_s = (w16 * s_powers).sum(-1)
index = [0, 1, 2, 3, 4, 5, 6, 15, 7, 14, 13, 12, 11, 10, 9, 8]
probs = torch.cat([splines_t, splines_s], dim=-1)
probs = probs.index_select(-1, torch.tensor(index))
return probs
raise ValueError("Unsupported num_quant_bins: {}".format(num_quant_bins))
def _all(x):
return x.all() if isinstance(x, torch.Tensor) else x
def _unsqueeze(x):
return x.unsqueeze(-1) if isinstance(x, torch.Tensor) else x
def quantize(name, x_real, min, max, num_quant_bins=4):
"""Randomly quantize in a way that preserves probability mass."""
assert _all(min < max)
if num_quant_bins == 1:
x = x_real.detach().round()
return pyro.deterministic(name, x, event_dim=0)
lb = x_real.detach().floor()
probs = compute_bin_probs(x_real - lb, num_quant_bins=num_quant_bins)
q = pyro.sample(
"Q_" + name, dist.Categorical(probs), infer={"enumerate": "parallel"}
)
q = q.type_as(x_real) - (num_quant_bins // 2 - 1)
x = lb + q
x = torch.max(x, 2 * min - 1 - x)
x = torch.min(x, 2 * max + 1 - x)
return pyro.deterministic(name, x, event_dim=0)
def quantize_enumerate(x_real, min, max, num_quant_bins=4):
"""Quantize, then manually enumerate."""
assert _all(min < max)
lb = x_real.detach().floor()
probs = compute_bin_probs(x_real - lb, num_quant_bins=num_quant_bins)
logits = safe_log(probs)
arange_min = 1 - num_quant_bins // 2
arange_max = 1 + num_quant_bins // 2
q = torch.arange(arange_min, arange_max)
x = lb.unsqueeze(-1) + q
x = torch.max(x, 2 * _unsqueeze(min) - 1 - x)
x = torch.min(x, 2 * _unsqueeze(max) + 1 - x)
return x, logits
|
27d9b458a05b562cd4f0c52e5b4e055200e55127
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-webrisk/google/cloud/webrisk_v1beta1/types/webrisk.py
|
889fe0961259c68f63eeead08c9923120516fcae
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632
| 2023-08-31T22:49:26
| 2023-08-31T22:49:26
| 16,316,451
| 2,792
| 917
|
Apache-2.0
| 2023-09-14T21:45:18
| 2014-01-28T15:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 16,885
|
py
|
webrisk.py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import MutableMapping, MutableSequence
from google.protobuf import timestamp_pb2 # type: ignore
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.webrisk.v1beta1",
manifest={
"ThreatType",
"CompressionType",
"ComputeThreatListDiffRequest",
"ComputeThreatListDiffResponse",
"SearchUrisRequest",
"SearchUrisResponse",
"SearchHashesRequest",
"SearchHashesResponse",
"ThreatEntryAdditions",
"ThreatEntryRemovals",
"RawIndices",
"RawHashes",
"RiceDeltaEncoding",
},
)
class ThreatType(proto.Enum):
r"""The type of threat. This maps dirrectly to the threat list a
threat may belong to.
Values:
THREAT_TYPE_UNSPECIFIED (0):
Unknown.
MALWARE (1):
Malware targeting any platform.
SOCIAL_ENGINEERING (2):
Social engineering targeting any platform.
UNWANTED_SOFTWARE (3):
Unwanted software targeting any platform.
"""
THREAT_TYPE_UNSPECIFIED = 0
MALWARE = 1
SOCIAL_ENGINEERING = 2
UNWANTED_SOFTWARE = 3
class CompressionType(proto.Enum):
r"""The ways in which threat entry sets can be compressed.
Values:
COMPRESSION_TYPE_UNSPECIFIED (0):
Unknown.
RAW (1):
Raw, uncompressed data.
RICE (2):
Rice-Golomb encoded data.
"""
COMPRESSION_TYPE_UNSPECIFIED = 0
RAW = 1
RICE = 2
class ComputeThreatListDiffRequest(proto.Message):
r"""Describes an API diff request.
Attributes:
threat_type (google.cloud.webrisk_v1beta1.types.ThreatType):
The ThreatList to update.
version_token (bytes):
The current version token of the client for
the requested list (the client version that was
received from the last successful diff).
constraints (google.cloud.webrisk_v1beta1.types.ComputeThreatListDiffRequest.Constraints):
Required. The constraints associated with
this request.
"""
class Constraints(proto.Message):
r"""The constraints for this diff.
Attributes:
max_diff_entries (int):
The maximum size in number of entries. The diff will not
contain more entries than this value. This should be a power
of 2 between 2\ **10 and 2**\ 20. If zero, no diff size
limit is set.
max_database_entries (int):
Sets the maximum number of entries that the client is
willing to have in the local database. This should be a
power of 2 between 2\ **10 and 2**\ 20. If zero, no database
size limit is set.
supported_compressions (MutableSequence[google.cloud.webrisk_v1beta1.types.CompressionType]):
The compression types supported by the
client.
"""
max_diff_entries: int = proto.Field(
proto.INT32,
number=1,
)
max_database_entries: int = proto.Field(
proto.INT32,
number=2,
)
supported_compressions: MutableSequence[
"CompressionType"
] = proto.RepeatedField(
proto.ENUM,
number=3,
enum="CompressionType",
)
threat_type: "ThreatType" = proto.Field(
proto.ENUM,
number=1,
enum="ThreatType",
)
version_token: bytes = proto.Field(
proto.BYTES,
number=2,
)
constraints: Constraints = proto.Field(
proto.MESSAGE,
number=3,
message=Constraints,
)
class ComputeThreatListDiffResponse(proto.Message):
r"""
Attributes:
response_type (google.cloud.webrisk_v1beta1.types.ComputeThreatListDiffResponse.ResponseType):
The type of response. This may indicate that
an action must be taken by the client when the
response is received.
additions (google.cloud.webrisk_v1beta1.types.ThreatEntryAdditions):
A set of entries to add to a local threat
type's list.
removals (google.cloud.webrisk_v1beta1.types.ThreatEntryRemovals):
A set of entries to remove from a local
threat type's list. This field may be empty.
new_version_token (bytes):
The new opaque client version token.
checksum (google.cloud.webrisk_v1beta1.types.ComputeThreatListDiffResponse.Checksum):
The expected SHA256 hash of the client state;
that is, of the sorted list of all hashes
present in the database after applying the
provided diff. If the client state doesn't match
the expected state, the client must disregard
this diff and retry later.
recommended_next_diff (google.protobuf.timestamp_pb2.Timestamp):
The soonest the client should wait before
issuing any diff request. Querying sooner is
unlikely to produce a meaningful diff. Waiting
longer is acceptable considering the use case.
If this field is not set clients may update as
soon as they want.
"""
class ResponseType(proto.Enum):
r"""The type of response sent to the client.
Values:
RESPONSE_TYPE_UNSPECIFIED (0):
Unknown.
DIFF (1):
Partial updates are applied to the client's
existing local database.
RESET (2):
Full updates resets the client's entire local
database. This means that either the client had
no state, was seriously out-of-date, or the
client is believed to be corrupt.
"""
RESPONSE_TYPE_UNSPECIFIED = 0
DIFF = 1
RESET = 2
class Checksum(proto.Message):
r"""The expected state of a client's local database.
Attributes:
sha256 (bytes):
The SHA256 hash of the client state; that is,
of the sorted list of all hashes present in the
database.
"""
sha256: bytes = proto.Field(
proto.BYTES,
number=1,
)
response_type: ResponseType = proto.Field(
proto.ENUM,
number=4,
enum=ResponseType,
)
additions: "ThreatEntryAdditions" = proto.Field(
proto.MESSAGE,
number=5,
message="ThreatEntryAdditions",
)
removals: "ThreatEntryRemovals" = proto.Field(
proto.MESSAGE,
number=6,
message="ThreatEntryRemovals",
)
new_version_token: bytes = proto.Field(
proto.BYTES,
number=7,
)
checksum: Checksum = proto.Field(
proto.MESSAGE,
number=8,
message=Checksum,
)
recommended_next_diff: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=2,
message=timestamp_pb2.Timestamp,
)
class SearchUrisRequest(proto.Message):
r"""Request to check URI entries against threatLists.
Attributes:
uri (str):
Required. The URI to be checked for matches.
threat_types (MutableSequence[google.cloud.webrisk_v1beta1.types.ThreatType]):
Required. The ThreatLists to search in.
"""
uri: str = proto.Field(
proto.STRING,
number=1,
)
threat_types: MutableSequence["ThreatType"] = proto.RepeatedField(
proto.ENUM,
number=2,
enum="ThreatType",
)
class SearchUrisResponse(proto.Message):
r"""
Attributes:
threat (google.cloud.webrisk_v1beta1.types.SearchUrisResponse.ThreatUri):
The threat list matches. This may be empty if
the URI is on no list.
"""
class ThreatUri(proto.Message):
r"""Contains threat information on a matching uri.
Attributes:
threat_types (MutableSequence[google.cloud.webrisk_v1beta1.types.ThreatType]):
The ThreatList this threat belongs to.
expire_time (google.protobuf.timestamp_pb2.Timestamp):
The cache lifetime for the returned match.
Clients must not cache this response past this
timestamp to avoid false positives.
"""
threat_types: MutableSequence["ThreatType"] = proto.RepeatedField(
proto.ENUM,
number=1,
enum="ThreatType",
)
expire_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=2,
message=timestamp_pb2.Timestamp,
)
threat: ThreatUri = proto.Field(
proto.MESSAGE,
number=1,
message=ThreatUri,
)
class SearchHashesRequest(proto.Message):
r"""Request to return full hashes matched by the provided hash
prefixes.
Attributes:
hash_prefix (bytes):
A hash prefix, consisting of the most
significant 4-32 bytes of a SHA256 hash. For
JSON requests, this field is base64-encoded.
threat_types (MutableSequence[google.cloud.webrisk_v1beta1.types.ThreatType]):
Required. The ThreatLists to search in.
"""
hash_prefix: bytes = proto.Field(
proto.BYTES,
number=1,
)
threat_types: MutableSequence["ThreatType"] = proto.RepeatedField(
proto.ENUM,
number=2,
enum="ThreatType",
)
class SearchHashesResponse(proto.Message):
r"""
Attributes:
threats (MutableSequence[google.cloud.webrisk_v1beta1.types.SearchHashesResponse.ThreatHash]):
The full hashes that matched the requested
prefixes. The hash will be populated in the key.
negative_expire_time (google.protobuf.timestamp_pb2.Timestamp):
For requested entities that did not match the
threat list, how long to cache the response
until.
"""
class ThreatHash(proto.Message):
r"""Contains threat information on a matching hash.
Attributes:
threat_types (MutableSequence[google.cloud.webrisk_v1beta1.types.ThreatType]):
The ThreatList this threat belongs to.
This must contain at least one entry.
hash_ (bytes):
A 32 byte SHA256 hash. This field is in
binary format. For JSON requests, hashes are
base64-encoded.
expire_time (google.protobuf.timestamp_pb2.Timestamp):
The cache lifetime for the returned match.
Clients must not cache this response past this
timestamp to avoid false positives.
"""
threat_types: MutableSequence["ThreatType"] = proto.RepeatedField(
proto.ENUM,
number=1,
enum="ThreatType",
)
hash_: bytes = proto.Field(
proto.BYTES,
number=2,
)
expire_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
threats: MutableSequence[ThreatHash] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=ThreatHash,
)
negative_expire_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=2,
message=timestamp_pb2.Timestamp,
)
class ThreatEntryAdditions(proto.Message):
r"""Contains the set of entries to add to a local database.
May contain a combination of compressed and raw data in a single
response.
Attributes:
raw_hashes (MutableSequence[google.cloud.webrisk_v1beta1.types.RawHashes]):
The raw SHA256-formatted entries.
Repeated to allow returning sets of hashes with
different prefix sizes.
rice_hashes (google.cloud.webrisk_v1beta1.types.RiceDeltaEncoding):
The encoded 4-byte prefixes of SHA256-formatted entries,
using a Golomb-Rice encoding. The hashes are converted to
uint32, sorted in ascending order, then delta encoded and
stored as encoded_data.
"""
raw_hashes: MutableSequence["RawHashes"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="RawHashes",
)
rice_hashes: "RiceDeltaEncoding" = proto.Field(
proto.MESSAGE,
number=2,
message="RiceDeltaEncoding",
)
class ThreatEntryRemovals(proto.Message):
r"""Contains the set of entries to remove from a local database.
Attributes:
raw_indices (google.cloud.webrisk_v1beta1.types.RawIndices):
The raw removal indices for a local list.
rice_indices (google.cloud.webrisk_v1beta1.types.RiceDeltaEncoding):
The encoded local, lexicographically-sorted list indices,
using a Golomb-Rice encoding. Used for sending compressed
removal indices. The removal indices (uint32) are sorted in
ascending order, then delta encoded and stored as
encoded_data.
"""
raw_indices: "RawIndices" = proto.Field(
proto.MESSAGE,
number=1,
message="RawIndices",
)
rice_indices: "RiceDeltaEncoding" = proto.Field(
proto.MESSAGE,
number=2,
message="RiceDeltaEncoding",
)
class RawIndices(proto.Message):
r"""A set of raw indices to remove from a local list.
Attributes:
indices (MutableSequence[int]):
The indices to remove from a
lexicographically-sorted local list.
"""
indices: MutableSequence[int] = proto.RepeatedField(
proto.INT32,
number=1,
)
class RawHashes(proto.Message):
r"""The uncompressed threat entries in hash format.
Hashes can be anywhere from 4 to 32 bytes in size. A large
majority are 4 bytes, but some hashes are lengthened if they
collide with the hash of a popular URI.
Used for sending ThreatEntryAdditons to clients that do not
support compression, or when sending non-4-byte hashes to
clients that do support compression.
Attributes:
prefix_size (int):
The number of bytes for each prefix encoded
below. This field can be anywhere from 4
(shortest prefix) to 32 (full SHA256 hash).
raw_hashes (bytes):
The hashes, in binary format, concatenated
into one long string. Hashes are sorted in
lexicographic order. For JSON API users, hashes
are base64-encoded.
"""
prefix_size: int = proto.Field(
proto.INT32,
number=1,
)
raw_hashes: bytes = proto.Field(
proto.BYTES,
number=2,
)
class RiceDeltaEncoding(proto.Message):
r"""The Rice-Golomb encoded data. Used for sending compressed
4-byte hashes or compressed removal indices.
Attributes:
first_value (int):
The offset of the first entry in the encoded
data, or, if only a single integer was encoded,
that single integer's value. If the field is
empty or missing, assume zero.
rice_parameter (int):
The Golomb-Rice parameter, which is a number between 2 and
28. This field is missing (that is, zero) if ``num_entries``
is zero.
entry_count (int):
The number of entries that are delta encoded in the encoded
data. If only a single integer was encoded, this will be
zero and the single value will be stored in ``first_value``.
encoded_data (bytes):
The encoded deltas that are encoded using the
Golomb-Rice coder.
"""
first_value: int = proto.Field(
proto.INT64,
number=1,
)
rice_parameter: int = proto.Field(
proto.INT32,
number=2,
)
entry_count: int = proto.Field(
proto.INT32,
number=3,
)
encoded_data: bytes = proto.Field(
proto.BYTES,
number=4,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
595ad59fa813607ceeb3207109d189862ec5cda6
|
7f4305b6a874ac16dcd6e5525ccae04e691e8cd7
|
/collipa/libs/tornadomail/functional.py
|
52d732de7c0ff638090d6b576ea1488b26252aff
|
[
"WTFPL"
] |
permissive
|
yetone/collipa
|
f518402ed290fd552e8d6eb6cb565532c677e777
|
20a23a412585a2f66e2cdc7a2959bd7d3d933e6c
|
refs/heads/master
| 2021-01-01T15:18:47.070618
| 2018-01-08T03:28:19
| 2018-01-08T03:28:19
| 11,044,594
| 101
| 46
| null | 2018-01-08T03:28:20
| 2013-06-29T03:33:53
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 14,205
|
py
|
functional.py
|
# License for code in this file that was taken from Python 2.5.
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF
# hereby grants Licensee a nonexclusive, royalty-free, world-wide
# license to reproduce, analyze, test, perform and/or display publicly,
# prepare derivative works, distribute, and otherwise use Python
# alone or in any derivative version, provided, however, that PSF's
# License Agreement and PSF's notice of copyright, i.e., "Copyright (c)
# 2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software Foundation;
# All Rights Reserved" are retained in Python alone or in any derivative
# version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*(args+moreargs), **dict(kwargs, **morekwargs))
return _curried
### Begin from Python 2.5 functools.py ########################################
# Summary of changes made to the Python 2.5 code below:
# * swapped ``partial`` for ``curry`` to maintain backwards-compatibility
# in Django.
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software Foundation.
# All Rights Reserved.
###############################################################################
# update_wrapper() and wraps() are tools to help write
# wrapper functions that can handle naive introspection
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
WRAPPER_UPDATES = ('__dict__',)
def update_wrapper(wrapper,
wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Update a wrapper function to look like the wrapped function
wrapper is the function to be updated
wrapped is the original function
assigned is a tuple naming the attributes assigned directly
from the wrapped function to the wrapper function (defaults to
functools.WRAPPER_ASSIGNMENTS)
updated is a tuple naming the attributes off the wrapper that
are updated with the corresponding attribute from the wrapped
function (defaults to functools.WRAPPER_UPDATES)
"""
for attr in assigned:
setattr(wrapper, attr, getattr(wrapped, attr))
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr))
# Return the wrapper so this can be used as a decorator via curry()
return wrapper
def wraps(wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Decorator factory to apply update_wrapper() to a wrapper function
Returns a decorator that invokes update_wrapper() with the decorated
function as the wrapper argument and the arguments to wraps() as the
remaining arguments. Default arguments are as for update_wrapper().
This is a convenience function to simplify applying curry() to
update_wrapper().
"""
return curry(update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
### End from Python 2.5 functools.py ##########################################
def memoize(func, cache, num_args):
"""
Wrap a function so that results for any argument tuple are stored in
'cache'. Note that the args to the function must be usable as dictionary
keys.
Only the first num_args are considered when creating the key.
"""
def wrapper(*args):
mem_args = args[:num_args]
if mem_args in cache:
return cache[mem_args]
result = func(*args)
cache[mem_args] = result
return result
return wraps(func)(wrapper)
class Promise(object):
"""
This is just a base class for the proxy class created in
the closure of the lazy function. It can be used to recognize
promises in code.
"""
pass
def lazy(func, *resultclasses):
"""
Turns any callable into a lazy evaluated callable. You need to give result
classes or types -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__dispatch = None
def __init__(self, args, kw):
self.__func = func
self.__args = args
self.__kw = kw
if self.__dispatch is None:
self.__prepare_class__()
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(self.__func, self.__args, self.__kw) + resultclasses
)
def __prepare_class__(cls):
cls.__dispatch = {}
for resultclass in resultclasses:
cls.__dispatch[resultclass] = {}
for (k, v) in resultclass.__dict__.items():
# All __promise__ return the same wrapper method, but they
# also do setup, inserting the method into the dispatch
# dict.
meth = cls.__promise__(resultclass, k, v)
if hasattr(cls, k):
continue
setattr(cls, k, meth)
cls._delegate_str = str in resultclasses
cls._delegate_unicode = unicode in resultclasses
assert not (cls._delegate_str and cls._delegate_unicode), "Cannot call lazy() with both str and unicode return types."
if cls._delegate_unicode:
cls.__unicode__ = cls.__unicode_cast
elif cls._delegate_str:
cls.__str__ = cls.__str_cast
__prepare_class__ = classmethod(__prepare_class__)
def __promise__(cls, klass, funcname, func):
# Builds a wrapper around some magic method and registers that magic
# method for the given type and method name.
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = self.__func(*self.__args, **self.__kw)
for t in type(res).mro():
if t in self.__dispatch:
return self.__dispatch[t][funcname](res, *args, **kw)
raise TypeError("Lazy object returned unexpected type.")
if klass not in cls.__dispatch:
cls.__dispatch[klass] = {}
cls.__dispatch[klass][funcname] = func
return __wrapper__
__promise__ = classmethod(__promise__)
def __unicode_cast(self):
return self.__func(*self.__args, **self.__kw)
def __str_cast(self):
return str(self.__func(*self.__args, **self.__kw))
def __cmp__(self, rhs):
if self._delegate_str:
s = str(self.__func(*self.__args, **self.__kw))
elif self._delegate_unicode:
s = unicode(self.__func(*self.__args, **self.__kw))
else:
s = self.__func(*self.__args, **self.__kw)
if isinstance(rhs, Promise):
return -cmp(rhs, s)
else:
return cmp(s, rhs)
def __mod__(self, rhs):
if self._delegate_str:
return str(self) % rhs
elif self._delegate_unicode:
return unicode(self) % rhs
else:
raise AssertionError('__mod__ not supported for non-string types')
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return wraps(func)(__wrapper__)
def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
return lazy(func, *resultclasses)(*args, **kwargs)
def allow_lazy(func, *resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
def wrapper(*args, **kwargs):
for arg in list(args) + kwargs.values():
if isinstance(arg, Promise):
break
else:
return func(*args, **kwargs)
return lazy(func, *resultclasses)(*args, **kwargs)
return wraps(func)(wrapper)
class LazyObject(object):
"""
A wrapper for another class that can be used to delay instantiation of the
wrapped class.
By subclassing, you have the opportunity to intercept and alter the
instantiation. If you don't need to do that, use SimpleLazyObject.
"""
def __init__(self):
self._wrapped = None
def __getattr__(self, name):
if self._wrapped is None:
self._setup()
return getattr(self._wrapped, name)
def __setattr__(self, name, value):
if name == "_wrapped":
# Assign to __dict__ to avoid infinite __setattr__ loops.
self.__dict__["_wrapped"] = value
else:
if self._wrapped is None:
self._setup()
setattr(self._wrapped, name, value)
def __delattr__(self, name):
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is None:
self._setup()
delattr(self._wrapped, name)
def _setup(self):
"""
Must be implemented by subclasses to initialise the wrapped object.
"""
raise NotImplementedError
# introspection support:
__members__ = property(lambda self: self.__dir__())
def __dir__(self):
if self._wrapped is None:
self._setup()
return dir(self._wrapped)
class SimpleLazyObject(LazyObject):
"""
A lazy object initialised from any function.
Designed for compound objects of unknown type. For builtins or objects of
known type, use django.utils.functional.lazy.
"""
def __init__(self, func):
"""
Pass in a callable that returns the object to be wrapped.
If copies are made of the resulting SimpleLazyObject, which can happen
in various circumstances within Django, then you must ensure that the
callable can be safely run more than once and will return the same
value.
"""
self.__dict__['_setupfunc'] = func
# For some reason, we have to inline LazyObject.__init__ here to avoid
# recursion
self._wrapped = None
def __str__(self):
if self._wrapped is None: self._setup()
return str(self._wrapped)
def __unicode__(self):
if self._wrapped is None: self._setup()
return unicode(self._wrapped)
def __deepcopy__(self, memo):
if self._wrapped is None:
# We have to use SimpleLazyObject, not self.__class__, because the
# latter is proxied.
result = SimpleLazyObject(self._setupfunc)
memo[id(self)] = result
return result
else:
# Changed to use deepcopy from copycompat, instead of copy
# For Python 2.4.
from copycompat import deepcopy
return deepcopy(self._wrapped, memo)
# Need to pretend to be the wrapped class, for the sake of objects that care
# about this (especially in equality tests)
def __get_class(self):
if self._wrapped is None: self._setup()
return self._wrapped.__class__
__class__ = property(__get_class)
def __eq__(self, other):
if self._wrapped is None: self._setup()
return self._wrapped == other
def __hash__(self):
if self._wrapped is None: self._setup()
return hash(self._wrapped)
def _setup(self):
self._wrapped = self._setupfunc()
|
45a8a7d760ebba14fc2f17cca463c7f128103b14
|
62179a165ec620ba967dbc20016e890978fbff50
|
/examples/tensorflow/classification/datasets/preprocessing/cifar.py
|
94a1d7fd817a49b2a2a2261d98ba9c182572be95
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/nncf
|
91fcf153a96f85da166aacb7a70ca4941e4ba4a4
|
c027c8b43c4865d46b8de01d8350dd338ec5a874
|
refs/heads/develop
| 2023-08-24T11:25:05.704499
| 2023-08-23T14:44:05
| 2023-08-23T14:44:05
| 263,687,600
| 558
| 157
|
Apache-2.0
| 2023-09-14T17:06:41
| 2020-05-13T16:41:05
|
Python
|
UTF-8
|
Python
| false
| false
| 6,151
|
py
|
cifar.py
|
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import tensorflow as tf
from examples.tensorflow.classification.datasets.preprocessing import utils
# Calculated from the CIFAR10 training set
CIFAR10_MEAN_RGB = (0.4914 * 255, 0.4822 * 255, 0.4465 * 255)
CIFAR10_STDDEV_RGB = (0.247 * 255, 0.2435 * 255, 0.2616 * 255)
# Calculated from the CIFAR100 training set
CIFAR100_MEAN_RGB = (0.5071 * 255, 0.4867 * 255, 0.4408 * 255)
CIFAR100_STDDEV_RGB = (0.2675 * 255, 0.2565 * 255, 0.2761 * 255)
IMAGE_SIZE = 32
PADDING = 4
def preprocess_for_eval(
image: tf.Tensor,
image_size: int = IMAGE_SIZE,
dtype: tf.dtypes.DType = tf.float32,
means: Tuple[float, ...] = None,
stddev: Tuple[float, ...] = None,
) -> tf.Tensor:
"""
Preprocesses the given image for evaluation.
:param image: `Tensor` representing an image of arbitrary size.
:param image_size: image height/width dimension.
:param dtype: the dtype to convert the images to.
:param means: values to subtract from each channel.
:param stddev: values to divide from each channel.
:return: a preprocessed and normalized image `Tensor`.
"""
images = tf.image.resize_with_crop_or_pad(image, image_size, image_size)
images = tf.cast(images, tf.float32)
images = utils.normalize(images, means, stddev)
images = tf.image.convert_image_dtype(images, dtype=dtype)
return images
def preprocess_for_train(
image: tf.Tensor,
image_size: int = IMAGE_SIZE,
num_channels: int = 3,
padding: int = PADDING,
dtype: tf.dtypes.DType = tf.float32,
means: Tuple[float, ...] = None,
stddev: Tuple[float, ...] = None,
) -> tf.Tensor:
"""
Preprocesses the given image for training.
:param image: `Tensor` representing an image of arbitrary size.
:param image_size: image height/width dimension.
:param num_channels: number of image input channels.
:param padding: the amound of padding before and after each dimension of the image.
:param dtype: the dtype to convert the images to. Set to `None` to skip conversion.
:param means: values to subtract from each channel.
:param stddev: values to divide from each channel.
:return: a preprocessed and normalized image `Tensor`.
"""
images = image
if padding > 0:
images = tf.pad(images, [[padding, padding], [padding, padding], [0, 0]], constant_values=0)
images = tf.image.random_crop(images, [image_size, image_size, num_channels])
images = tf.image.random_flip_left_right(images)
images = tf.cast(images, tf.float32)
images = utils.normalize(images, means, stddev)
images = tf.image.convert_image_dtype(images, dtype=dtype)
return images
def preprocess_image(
image: tf.Tensor,
image_size: int = IMAGE_SIZE,
is_training: bool = False,
dtype: tf.dtypes.DType = tf.float32,
means: Tuple[float, ...] = None,
stddev: Tuple[float, ...] = None,
) -> tf.Tensor:
"""
Preprocesses the given image.
:param image: `Tensor` representing an image of arbitrary size.
:param image_size: image height/width dimension
:param is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
:param dtype: the dtype to convert the images to.
:param means: values to subtract from each channel.
:param stddev: values to divide from each channel.
:return: a preprocessed image.
"""
if is_training:
return preprocess_for_train(image=image, image_size=image_size, dtype=dtype, means=means, stddev=stddev)
return preprocess_for_eval(image=image, image_size=image_size, dtype=dtype, means=means, stddev=stddev)
def cifar10_preprocess_image(
image: tf.Tensor,
image_size: int = IMAGE_SIZE,
is_training: bool = False,
dtype: tf.dtypes.DType = tf.float32,
means: Tuple[float, ...] = CIFAR10_MEAN_RGB,
stddev: Tuple[float, ...] = CIFAR10_STDDEV_RGB,
) -> tf.Tensor:
"""
Preprocesses the given image using mean and standard deviation calculated by CIFAR10 dataset
:param image: `Tensor` representing an image of arbitrary size.
:param image_size: image height/width dimension
:param is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
:param dtype: the dtype to convert the images to.
:param means: values to subtract from each channel.
:param stddev: values to divide from each channel.
:return: a preprocessed image.
"""
return preprocess_image(
image=image, image_size=image_size, is_training=is_training, dtype=dtype, means=means, stddev=stddev
)
def cifar100_preprocess_image(
image: tf.Tensor,
image_size: int = IMAGE_SIZE,
is_training: bool = False,
dtype: tf.dtypes.DType = tf.float32,
means: Tuple[float, ...] = CIFAR100_MEAN_RGB,
stddev: Tuple[float, ...] = CIFAR100_STDDEV_RGB,
) -> tf.Tensor:
"""
Preprocesses the given image using mean and standard deviation calculated by CIFAR100 dataset
:param image: `Tensor` representing an image of arbitrary size.
:param image_size: image height/width dimension
:param is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
:param dtype: the dtype to convert the images to.
:param means: values to subtract from each channel.
:param stddev: values to divide from each channel.
:return: a preprocessed image.
"""
return preprocess_image(
image=image, image_size=image_size, is_training=is_training, dtype=dtype, means=means, stddev=stddev
)
|
3081f677d634679113392c191e1ac86aa1975255
|
da1500e0d3040497614d5327d2461a22e934b4d8
|
/third_party/web_platform_tests/tools/pywebsocket/src/mod_pywebsocket/_stream_hixie75.py
|
94cf5b31ba0fc3c3a3477de82297a0c26da1c48c
|
[
"LicenseRef-scancode-w3c-03-bsd-license",
"BSD-3-Clause",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"Apache-2.0",
"MIT"
] |
permissive
|
youtube/cobalt
|
34085fc93972ebe05b988b15410e99845efd1968
|
acefdaaadd3ef46f10f63d1acae2259e4024d383
|
refs/heads/main
| 2023-09-01T13:09:47.225174
| 2023-09-01T08:54:54
| 2023-09-01T08:54:54
| 50,049,789
| 169
| 80
|
BSD-3-Clause
| 2023-09-14T21:50:50
| 2016-01-20T18:11:34
| null |
UTF-8
|
Python
| false
| false
| 8,842
|
py
|
_stream_hixie75.py
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides a class for parsing/building frames of the WebSocket
protocol version HyBi 00 and Hixie 75.
Specification:
- HyBi 00 http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00
- Hixie 75 http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-75
"""
from mod_pywebsocket import common
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_base import StreamBase
from mod_pywebsocket._stream_base import UnsupportedFrameException
from mod_pywebsocket import util
class StreamHixie75(StreamBase):
"""A class for parsing/building frames of the WebSocket protocol version
HyBi 00 and Hixie 75.
"""
def __init__(self, request, enable_closing_handshake=False):
"""Construct an instance.
Args:
request: mod_python request.
enable_closing_handshake: to let StreamHixie75 perform closing
handshake as specified in HyBi 00, set
this option to True.
"""
StreamBase.__init__(self, request)
self._logger = util.get_class_logger(self)
self._enable_closing_handshake = enable_closing_handshake
self._request.client_terminated = False
self._request.server_terminated = False
def send_message(self, message, end=True, binary=False):
"""Send message.
Args:
message: unicode string to send.
binary: not used in hixie75.
Raises:
BadOperationException: when called on a server-terminated
connection.
"""
if not end:
raise BadOperationException(
'StreamHixie75 doesn\'t support send_message with end=False')
if binary:
raise BadOperationException(
'StreamHixie75 doesn\'t support send_message with binary=True')
if self._request.server_terminated:
raise BadOperationException(
'Requested send_message after sending out a closing handshake')
self._write(''.join(['\x00', message.encode('utf-8'), '\xff']))
def _read_payload_length_hixie75(self):
"""Reads a length header in a Hixie75 version frame with length.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
length = 0
while True:
b_str = self._read(1)
b = ord(b_str)
length = length * 128 + (b & 0x7f)
if (b & 0x80) == 0:
break
return length
def receive_message(self):
"""Receive a WebSocket frame and return its payload an unicode string.
Returns:
payload unicode string in a WebSocket frame.
Raises:
ConnectionTerminatedException: when read returns empty
string.
BadOperationException: when called on a client-terminated
connection.
"""
if self._request.client_terminated:
raise BadOperationException(
'Requested receive_message after receiving a closing '
'handshake')
while True:
# Read 1 byte.
# mp_conn.read will block if no bytes are available.
# Timeout is controlled by TimeOut directive of Apache.
frame_type_str = self.receive_bytes(1)
frame_type = ord(frame_type_str)
if (frame_type & 0x80) == 0x80:
# The payload length is specified in the frame.
# Read and discard.
length = self._read_payload_length_hixie75()
if length > 0:
_ = self.receive_bytes(length)
# 5.3 3. 12. if /type/ is 0xFF and /length/ is 0, then set the
# /client terminated/ flag and abort these steps.
if not self._enable_closing_handshake:
continue
if frame_type == 0xFF and length == 0:
self._request.client_terminated = True
if self._request.server_terminated:
self._logger.debug(
'Received ack for server-initiated closing '
'handshake')
return None
self._logger.debug(
'Received client-initiated closing handshake')
self._send_closing_handshake()
self._logger.debug(
'Sent ack for client-initiated closing handshake')
return None
else:
# The payload is delimited with \xff.
bytes = self._read_until('\xff')
# The WebSocket protocol section 4.4 specifies that invalid
# characters must be replaced with U+fffd REPLACEMENT
# CHARACTER.
message = bytes.decode('utf-8', 'replace')
if frame_type == 0x00:
return message
# Discard data of other types.
def _send_closing_handshake(self):
if not self._enable_closing_handshake:
raise BadOperationException(
'Closing handshake is not supported in Hixie 75 protocol')
self._request.server_terminated = True
# 5.3 the server may decide to terminate the WebSocket connection by
# running through the following steps:
# 1. send a 0xFF byte and a 0x00 byte to the client to indicate the
# start of the closing handshake.
self._write('\xff\x00')
def close_connection(self, unused_code='', unused_reason=''):
"""Closes a WebSocket connection.
Raises:
ConnectionTerminatedException: when closing handshake was
not successfull.
"""
if self._request.server_terminated:
self._logger.debug(
'Requested close_connection but server is already terminated')
return
if not self._enable_closing_handshake:
self._request.server_terminated = True
self._logger.debug('Connection closed')
return
self._send_closing_handshake()
self._logger.debug('Sent server-initiated closing handshake')
# TODO(ukai): 2. wait until the /client terminated/ flag has been set,
# or until a server-defined timeout expires.
#
# For now, we expect receiving closing handshake right after sending
# out closing handshake, and if we couldn't receive non-handshake
# frame, we take it as ConnectionTerminatedException.
message = self.receive_message()
if message is not None:
raise ConnectionTerminatedException(
'Didn\'t receive valid ack for closing handshake')
# TODO: 3. close the WebSocket connection.
# note: mod_python Connection (mp_conn) doesn't have close method.
def send_ping(self, body):
raise BadOperationException(
'StreamHixie75 doesn\'t support send_ping')
# vi:sts=4 sw=4 et
|
f0b0c35520c7d87d21589642b5c38acb5e9c7a95
|
c675ff5fcd3b13fa39352bb8cac11d75262659a8
|
/tests/test_observable/test_publish.py
|
469bf26b9b75e54a863a9198df8bef4e1d997c87
|
[
"MIT"
] |
permissive
|
ReactiveX/RxPY
|
469eb714996c205989e99899a6f1ab1ae2f42dd0
|
af1663d35810fdcd4c25a3ed2e8f0d71b55c341d
|
refs/heads/master
| 2023-08-14T19:27:40.086304
| 2023-01-08T10:02:08
| 2023-03-04T15:33:19
| 8,946,089
| 4,764
| 467
|
MIT
| 2023-09-05T02:53:16
| 2013-03-22T06:16:54
|
Python
|
UTF-8
|
Python
| false
| false
| 16,870
|
py
|
test_publish.py
|
import unittest
import reactivex
from reactivex import ConnectableObservable, Observable
from reactivex import operators as ops
from reactivex.abc import ObserverBase
from reactivex.testing import ReactiveTest, TestScheduler
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class RxException(Exception):
pass
# Helper function for raising exceptions within lambdas
def _raise(ex):
raise RxException(ex)
class MySubject(Observable, ObserverBase):
def __init__(self):
super(MySubject, self).__init__()
self.dispose_on_map = {}
self.subscribe_count = 0
self.disposed = False
self.observer = None
def _subscribe_core(self, observer, scheduler=None):
self.subscribe_count += 1
self.observer = observer
class Duck:
def __init__(self, this):
self.this = this
def dispose(self):
self.this.disposed = True
return Duck(self)
def dispose_on(self, value, disposable):
self.dispose_on_map[value] = disposable
def on_next(self, value):
self.observer.on_next(value)
if value in self.dispose_on_map:
self.dispose_on_map[value].dispose()
def on_error(self, error):
self.observer.on_error(error)
def on_completed(self):
self.observer.on_completed()
class TestPublish(unittest.TestCase):
def test_publish_cold_zip(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(40, 0),
on_next(90, 1),
on_next(150, 2),
on_next(210, 3),
on_next(240, 4),
on_next(270, 5),
on_next(330, 6),
on_next(340, 7),
on_completed(390),
)
def create():
def mapper(ys):
return ys.pipe(
ops.zip(ys),
ops.map(sum),
)
return xs.pipe(ops.publish(mapper=mapper))
results = scheduler.start(create)
assert results.messages == [
on_next(210, 6),
on_next(240, 8),
on_next(270, 10),
on_next(330, 12),
on_next(340, 14),
on_completed(390),
]
assert xs.subscriptions == [subscribe(200, 390)]
def test_ref_count_connects_on_first(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, 1),
on_next(220, 2),
on_next(230, 3),
on_next(240, 4),
on_completed(250),
)
subject = MySubject()
conn = ConnectableObservable(xs, subject)
def create():
return conn.pipe(ops.ref_count())
res = scheduler.start(create)
assert res.messages == [
on_next(210, 1),
on_next(220, 2),
on_next(230, 3),
on_next(240, 4),
on_completed(250),
]
assert subject.disposed
def test_ref_count_notconnected(self):
disconnected = [False]
count = [0]
def factory(scheduler):
count[0] += 1
def create(obs, scheduler=None):
def func():
disconnected[0] = True
return func
return reactivex.create(create)
xs = reactivex.defer(factory)
subject = MySubject()
conn = ConnectableObservable(xs, subject)
refd = conn.pipe(ops.ref_count())
dis1 = refd.subscribe()
self.assertEqual(1, count[0])
self.assertEqual(1, subject.subscribe_count)
assert not disconnected[0]
dis2 = refd.subscribe()
self.assertEqual(1, count[0])
self.assertEqual(2, subject.subscribe_count)
assert not disconnected[0]
dis1.dispose()
assert not disconnected[0]
dis2.dispose()
assert disconnected[0]
disconnected[0] = False
dis3 = refd.subscribe()
self.assertEqual(2, count[0])
self.assertEqual(3, subject.subscribe_count)
assert not disconnected[0]
dis3.dispose()
assert disconnected[0]
def test_publish_basic(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(110, 7),
on_next(220, 3),
on_next(280, 4),
on_next(290, 1),
on_next(340, 8),
on_next(360, 5),
on_next(370, 6),
on_next(390, 7),
on_next(410, 13),
on_next(430, 2),
on_next(450, 9),
on_next(520, 11),
on_next(560, 20),
on_completed(600),
)
ys = [None]
subscription = [None]
connection = [None]
results = scheduler.create_observer()
def action0(scheduler, state):
ys[0] = xs.pipe(ops.publish())
scheduler.schedule_absolute(created, action0)
def action1(scheduler, state):
subscription[0] = ys[0].subscribe(results)
scheduler.schedule_absolute(subscribed, action1)
def action2(scheduler, state):
subscription[0].dispose()
scheduler.schedule_absolute(disposed, action2)
def action3(scheduler, state):
connection[0] = ys[0].connect()
scheduler.schedule_absolute(300, action3)
def action4(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(400, action4)
def action5(scheduler, state):
connection[0] = ys[0].connect()
scheduler.schedule_absolute(500, action5)
def action6(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(550, action6)
def action7(scheduler, state):
connection[0] = ys[0].connect()
scheduler.schedule_absolute(650, action7)
def action8(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(800, action8)
scheduler.start()
assert results.messages == [
on_next(340, 8),
on_next(360, 5),
on_next(370, 6),
on_next(390, 7),
on_next(520, 11),
]
assert xs.subscriptions == [
subscribe(300, 400),
subscribe(500, 550),
subscribe(650, 800),
]
def test_publish_error(self):
ex = "ex"
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(110, 7),
on_next(220, 3),
on_next(280, 4),
on_next(290, 1),
on_next(340, 8),
on_next(360, 5),
on_next(370, 6),
on_next(390, 7),
on_next(410, 13),
on_next(430, 2),
on_next(450, 9),
on_next(520, 11),
on_next(560, 20),
on_error(600, ex),
)
ys = [None]
subscription = [None]
connection = [None]
results = scheduler.create_observer()
def action0(scheduler, state):
ys[0] = xs.pipe(ops.publish())
scheduler.schedule_absolute(created, action0)
def action1(scheduler, state):
subscription[0] = ys[0].subscribe(results)
scheduler.schedule_absolute(subscribed, action1)
def action2(scheduler, state):
subscription[0].dispose()
scheduler.schedule_absolute(disposed, action2)
def action3(scheduler, state):
connection[0] = ys[0].connect()
scheduler.schedule_absolute(300, action3)
def action4(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(400, action4)
def action5(scheduler, state):
connection[0] = ys[0].connect()
scheduler.schedule_absolute(500, action5)
def action6(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(800, action6)
scheduler.start()
assert results.messages == [
on_next(340, 8),
on_next(360, 5),
on_next(370, 6),
on_next(390, 7),
on_next(520, 11),
on_next(560, 20),
on_error(600, ex),
]
assert xs.subscriptions == [subscribe(300, 400), subscribe(500, 600)]
def test_publish_complete(self):
connection = [None]
subscription = [None]
ys = [None]
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(110, 7),
on_next(220, 3),
on_next(280, 4),
on_next(290, 1),
on_next(340, 8),
on_next(360, 5),
on_next(370, 6),
on_next(390, 7),
on_next(410, 13),
on_next(430, 2),
on_next(450, 9),
on_next(520, 11),
on_next(560, 20),
on_completed(600),
)
results = scheduler.create_observer()
def action0(scheduler, state):
ys[0] = xs.pipe(ops.publish())
scheduler.schedule_absolute(created, action0)
def action1(scheduler, state):
subscription[0] = ys[0].subscribe(results)
scheduler.schedule_absolute(subscribed, action1)
def action2(scheduler, state):
subscription[0].dispose()
scheduler.schedule_absolute(disposed, action2)
def action3(scheduler, state):
connection[0] = ys[0].connect()
scheduler.schedule_absolute(300, action3)
def action4(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(400, action4)
def action5(scheduler, state):
connection[0] = ys[0].connect()
scheduler.schedule_absolute(500, action5)
def action6(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(800, action6)
scheduler.start()
assert results.messages == [
on_next(340, 8),
on_next(360, 5),
on_next(370, 6),
on_next(390, 7),
on_next(520, 11),
on_next(560, 20),
on_completed(600),
]
assert xs.subscriptions == [subscribe(300, 400), subscribe(500, 600)]
def test_publish_dispose(self):
connection = [None]
subscription = [None]
ys = [None]
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(110, 7),
on_next(220, 3),
on_next(280, 4),
on_next(290, 1),
on_next(340, 8),
on_next(360, 5),
on_next(370, 6),
on_next(390, 7),
on_next(410, 13),
on_next(430, 2),
on_next(450, 9),
on_next(520, 11),
on_next(560, 20),
on_completed(600),
)
results = scheduler.create_observer()
def action0(scheduler, state):
ys[0] = xs.pipe(ops.publish())
scheduler.schedule_absolute(created, action0)
def action1(scheduler, state):
subscription[0] = ys[0].subscribe(results)
scheduler.schedule_absolute(subscribed, action1)
def action2(scheduler, state):
subscription[0].dispose()
scheduler.schedule_absolute(350, action2)
def action3(scheduler, state):
connection[0] = ys[0].connect()
scheduler.schedule_absolute(300, action3)
def action4(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(400, action4)
def action5(scheduler, state):
connection[0] = ys[0].connect()
scheduler.schedule_absolute(500, action5)
def action6(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(550, action6)
def action7(scheduler, state):
connection[0] = ys[0].connect()
scheduler.schedule_absolute(650, action7)
def action8(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(800, action8)
scheduler.start()
assert results.messages == [on_next(340, 8)]
assert xs.subscriptions == [
subscribe(300, 400),
subscribe(500, 550),
subscribe(650, 800),
]
def test_publish_multipleconnections(self):
xs = reactivex.never()
ys = xs.pipe(ops.publish())
connection1 = ys.connect()
connection2 = ys.connect()
assert connection1 == connection2
connection1.dispose()
connection2.dispose()
connection3 = ys.connect()
assert connection1 != connection3
connection3.dispose()
def test_publish_lambda_zip_complete(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(110, 7),
on_next(220, 3),
on_next(280, 4),
on_next(290, 1),
on_next(340, 8),
on_next(360, 5),
on_next(370, 6),
on_next(390, 7),
on_next(410, 13),
on_next(430, 2),
on_next(450, 9),
on_next(520, 11),
on_next(560, 20),
on_completed(600),
)
def create():
def mapper(_xs):
return _xs.pipe(
ops.zip(_xs.pipe(ops.skip(1))),
ops.map(sum),
)
return xs.pipe(ops.publish(mapper))
results = scheduler.start(create)
assert results.messages == [
on_next(280, 7),
on_next(290, 5),
on_next(340, 9),
on_next(360, 13),
on_next(370, 11),
on_next(390, 13),
on_next(410, 20),
on_next(430, 15),
on_next(450, 11),
on_next(520, 20),
on_next(560, 31),
on_completed(600),
]
assert xs.subscriptions == [subscribe(200, 600)]
def test_publish_lambda_zip_error(self):
ex = "ex"
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(110, 7),
on_next(220, 3),
on_next(280, 4),
on_next(290, 1),
on_next(340, 8),
on_next(360, 5),
on_next(370, 6),
on_next(390, 7),
on_next(410, 13),
on_next(430, 2),
on_next(450, 9),
on_next(520, 11),
on_next(560, 20),
on_error(600, ex),
)
def create():
def mapper(_xs):
return _xs.pipe(
ops.zip(_xs.pipe(ops.skip(1))),
ops.map(sum),
)
return xs.pipe(ops.publish(mapper))
results = scheduler.start(create)
assert results.messages == [
on_next(280, 7),
on_next(290, 5),
on_next(340, 9),
on_next(360, 13),
on_next(370, 11),
on_next(390, 13),
on_next(410, 20),
on_next(430, 15),
on_next(450, 11),
on_next(520, 20),
on_next(560, 31),
on_error(600, ex),
]
assert xs.subscriptions == [subscribe(200, 600)]
def test_publish_lambda_zip_dispose(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(110, 7),
on_next(220, 3),
on_next(280, 4),
on_next(290, 1),
on_next(340, 8),
on_next(360, 5),
on_next(370, 6),
on_next(390, 7),
on_next(410, 13),
on_next(430, 2),
on_next(450, 9),
on_next(520, 11),
on_next(560, 20),
on_completed(600),
)
def create():
def mapper(_xs):
return _xs.pipe(ops.zip(_xs.pipe(ops.skip(1))), ops.map(sum))
return xs.pipe(ops.publish(mapper))
results = scheduler.start(create, disposed=470)
assert results.messages == [
on_next(280, 7),
on_next(290, 5),
on_next(340, 9),
on_next(360, 13),
on_next(370, 11),
on_next(390, 13),
on_next(410, 20),
on_next(430, 15),
on_next(450, 11),
]
assert xs.subscriptions == [subscribe(200, 470)]
|
af93ff0841d7053ebcb62847e200d986201ec89e
|
0c141972d779ec44c1f252194514ad9c464aef2e
|
/mail/components/im/components.conf
|
c3153690f72be7e138bcd93e30f378bfd6b0b0aa
|
[] |
no_license
|
mozilla/releases-comm-central
|
6a47ae14f6e24b4ff22998ca957078b7956b04e2
|
d4838a15a8dbad6d5d9b7e6f486e43bf1511f87a
|
refs/heads/master
| 2022-02-12T03:24:52.518381
| 2022-02-12T00:34:15
| 2022-02-12T00:34:15
| 4,578,344
| 137
| 101
| null | 2023-07-25T20:56:40
| 2012-06-06T21:42:18
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 748
|
conf
|
components.conf
|
# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
# vim: set filetype=python:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
Classes = [
{
'cid': '{13118758-dad2-418c-a03d-1acbfed0cd01}',
'contract_ids': ['@mozilla.org/messenger/protocol/info;1?type=im'],
'jsm': 'resource:///modules/IMProtocolInfo.jsm',
'constructor': 'IMProtocolInfo',
},
{
'cid': '{9dd7f36b-5960-4f0a-8789-f5f516bd083d}',
'contract_ids': ['@mozilla.org/messenger/server;1?type=im'],
'jsm': 'resource:///modules/IMIncomingServer.jsm',
'constructor': 'IMIncomingServer',
},
]
|
63c837c96e2f677a216feb78da70497bc861f386
|
7efabf599aaf53728a681639bc57cadc3abe6bde
|
/python/cugraph/cugraph/dask/link_analysis/pagerank.py
|
2dfd25fa5224cefa97f86406e1ab25883cb37394
|
[
"Apache-2.0"
] |
permissive
|
rapidsai/cugraph
|
49b5378271c72c155f55d916a3c1cc1fbe05ceca
|
cafded113c9545e5e7211cc965f53c00939307c0
|
refs/heads/branch-23.10
| 2023-08-26T19:36:33.631587
| 2023-08-25T13:49:23
| 2023-08-25T13:49:23
| 157,752,451
| 1,403
| 310
|
Apache-2.0
| 2023-09-13T17:01:25
| 2018-11-15T18:07:11
|
Cuda
|
UTF-8
|
Python
| false
| false
| 16,345
|
py
|
pagerank.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
import dask
from dask.distributed import wait, default_client
import dask_cudf
import cudf
import numpy as np
from pylibcugraph import (
pagerank as plc_pagerank,
personalized_pagerank as plc_p_pagerank,
exceptions as plc_exceptions,
ResourceHandle,
)
import cugraph.dask.comms.comms as Comms
from cugraph.dask.common.input_utils import get_distributed_data
from cugraph.exceptions import FailedToConvergeError
def convert_to_return_tuple(plc_pr_retval):
"""
Using the PLC pagerank return tuple, creates a cudf DataFrame from the cupy
arrays and extracts the (optional) bool.
"""
if len(plc_pr_retval) == 3:
cupy_vertices, cupy_pagerank, converged = plc_pr_retval
else:
cupy_vertices, cupy_pagerank = plc_pr_retval
converged = True
df = cudf.DataFrame()
df["vertex"] = cupy_vertices
df["pagerank"] = cupy_pagerank
return (df, converged)
# FIXME: Move this function to the utility module so that it can be
# shared by other algos
def ensure_valid_dtype(input_graph, input_df, input_df_name):
if input_graph.properties.weighted is False:
# If the graph is not weighted, an artificial weight column
# of type 'float32' is added and it must match the user
# personalization/nstart values.
edge_attr_dtype = np.float32
else:
edge_attr_dtype = input_graph.input_df["value"].dtype
if "values" in input_df.columns:
input_df_values_dtype = input_df["values"].dtype
if input_df_values_dtype != edge_attr_dtype:
warning_msg = (
f"PageRank requires '{input_df_name}' values "
"to match the graph's 'edge_attr' type. "
f"edge_attr type is: {edge_attr_dtype} and got "
f"'{input_df_name}' values of type: "
f"{input_df_values_dtype}."
)
warnings.warn(warning_msg, UserWarning)
input_df = input_df.astype({"values": edge_attr_dtype})
vertex_dtype = input_graph.edgelist.edgelist_df.dtypes[0]
input_df_vertex_dtype = input_df["vertex"].dtype
if input_df_vertex_dtype != vertex_dtype:
warning_msg = (
f"PageRank requires '{input_df_name}' vertex "
"to match the graph's 'vertex' type. "
f"input graph's vertex type is: {vertex_dtype} and got "
f"'{input_df_name}' vertex of type: "
f"{input_df_vertex_dtype}."
)
warnings.warn(warning_msg, UserWarning)
input_df = input_df.astype({"vertex": vertex_dtype})
return input_df
def renumber_vertices(input_graph, input_df):
input_df = input_graph.add_internal_vertex_id(
input_df, "vertex", "vertex"
).compute()
return input_df
def _call_plc_pagerank(
sID,
mg_graph_x,
pre_vtx_o_wgt_vertices,
pre_vtx_o_wgt_sums,
initial_guess_vertices,
initial_guess_values,
alpha,
epsilon,
max_iterations,
do_expensive_check,
fail_on_nonconvergence,
):
try:
return plc_pagerank(
resource_handle=ResourceHandle(Comms.get_handle(sID).getHandle()),
graph=mg_graph_x,
precomputed_vertex_out_weight_vertices=pre_vtx_o_wgt_vertices,
precomputed_vertex_out_weight_sums=pre_vtx_o_wgt_sums,
initial_guess_vertices=initial_guess_vertices,
initial_guess_values=initial_guess_values,
alpha=alpha,
epsilon=epsilon,
max_iterations=max_iterations,
do_expensive_check=do_expensive_check,
fail_on_nonconvergence=fail_on_nonconvergence,
)
# Re-raise this as a cugraph exception so users trying to catch this do not
# have to know to import another package.
except plc_exceptions.FailedToConvergeError as exc:
raise FailedToConvergeError from exc
def _call_plc_personalized_pagerank(
sID,
mg_graph_x,
pre_vtx_o_wgt_vertices,
pre_vtx_o_wgt_sums,
data_personalization,
initial_guess_vertices,
initial_guess_values,
alpha,
epsilon,
max_iterations,
do_expensive_check,
fail_on_nonconvergence,
):
personalization_vertices = data_personalization["vertex"]
personalization_values = data_personalization["values"]
try:
return plc_p_pagerank(
resource_handle=ResourceHandle(Comms.get_handle(sID).getHandle()),
graph=mg_graph_x,
precomputed_vertex_out_weight_vertices=pre_vtx_o_wgt_vertices,
precomputed_vertex_out_weight_sums=pre_vtx_o_wgt_sums,
personalization_vertices=personalization_vertices,
personalization_values=personalization_values,
initial_guess_vertices=initial_guess_vertices,
initial_guess_values=initial_guess_values,
alpha=alpha,
epsilon=epsilon,
max_iterations=max_iterations,
do_expensive_check=do_expensive_check,
fail_on_nonconvergence=fail_on_nonconvergence,
)
# Re-raise this as a cugraph exception so users trying to catch this do not
# have to know to import another package.
except plc_exceptions.FailedToConvergeError as exc:
raise FailedToConvergeError from exc
def pagerank(
input_graph,
alpha=0.85,
personalization=None,
precomputed_vertex_out_weight=None,
max_iter=100,
tol=1.0e-5,
nstart=None,
fail_on_nonconvergence=True,
):
"""
Find the PageRank values for each vertex in a graph using multiple GPUs.
cuGraph computes an approximation of the Pagerank using the power method.
The input graph must contain edge list as dask-cudf dataframe with
one partition per GPU.
All edges will have an edge_attr value of 1.0 if not provided.
Parameters
----------
input_graph : cugraph.Graph
cuGraph graph descriptor, should contain the connectivity information
as dask cudf edge list dataframe(edge weights are not used for this
algorithm).
alpha : float, optional (default=0.85)
The damping factor alpha represents the probability to follow an
outgoing edge, standard value is 0.85.
Thus, 1.0-alpha is the probability to “teleport” to a random vertex.
Alpha should be greater than 0.0 and strictly lower than 1.0.
personalization : cudf.Dataframe, optional (default=None)
GPU Dataframe containing the personalization information.
(a performance optimization)
personalization['vertex'] : cudf.Series
Subset of vertices of graph for personalization
personalization['values'] : cudf.Series
Personalization values for vertices
precomputed_vertex_out_weight : cudf.Dataframe, optional (default=None)
GPU Dataframe containing the precomputed vertex out weight
(a performance optimization)
information.
precomputed_vertex_out_weight['vertex'] : cudf.Series
Subset of vertices of graph for precomputed_vertex_out_weight
precomputed_vertex_out_weight['sums'] : cudf.Series
Corresponding precomputed sum of outgoing vertices weight
max_iter : int, optional (default=100)
The maximum number of iterations before an answer is returned. This can
be used to limit the execution time and do an early exit before the
solver reaches the convergence tolerance.
If this value is lower or equal to 0 cuGraph will use the default
value, which is 100.
tol : float, optional (default=1e-05)
Set the tolerance the approximation, this parameter should be a small
magnitude value.
The lower the tolerance the better the approximation. If this value is
0.0f, cuGraph will use the default value which is 1.0E-5.
Setting too small a tolerance can lead to non-convergence due to
numerical roundoff. Usually values between 0.01 and 0.00001 are
acceptable.
nstart : cudf.Dataframe, optional (default=None)
GPU Dataframe containing the initial guess for pagerank.
(a performance optimization)
nstart['vertex'] : cudf.Series
Subset of vertices of graph for initial guess for pagerank values
nstart['values'] : cudf.Series
Pagerank values for vertices
fail_on_nonconvergence : bool (default=True)
If the solver does not reach convergence, raise an exception if
fail_on_nonconvergence is True. If fail_on_nonconvergence is False,
the return value is a tuple of (pagerank, converged) where pagerank is
a cudf.DataFrame as described below, and converged is a boolean
indicating if the solver converged (True) or not (False).
Returns
-------
The return value varies based on the value of the fail_on_nonconvergence
paramter. If fail_on_nonconvergence is True:
PageRank : dask_cudf.DataFrame
GPU data frame containing two dask_cudf.Series of size V: the
vertex identifiers and the corresponding PageRank values.
NOTE: if the input cugraph.Graph was created using the renumber=False
option of any of the from_*_edgelist() methods, pagerank assumes that
the vertices in the edgelist are contiguous and start from 0.
If the actual set of vertices in the edgelist is not
contiguous (has gaps) or does not start from zero, pagerank will assume
the "missing" vertices are isolated vertices in the graph, and will
compute and return pagerank values for each. If this is not the desired
behavior, ensure the input cugraph.Graph is created from the
from_*_edgelist() functions with the renumber=True option (the default)
ddf['vertex'] : dask_cudf.Series
Contains the vertex identifiers
ddf['pagerank'] : dask_cudf.Series
Contains the PageRank score
If fail_on_nonconvergence is False:
(PageRank, converged) : tuple of (dask_cudf.DataFrame, bool)
PageRank is the GPU dataframe described above, converged is a bool
indicating if the solver converged (True) or not (False).
Examples
--------
>>> import cugraph.dask as dcg
>>> import dask_cudf
>>> # ... Init a DASK Cluster
>>> # see https://docs.rapids.ai/api/cugraph/stable/dask-cugraph.html
>>> # Download dataset from https://github.com/rapidsai/cugraph/datasets/..
>>> chunksize = dcg.get_chunksize(datasets_path / "karate.csv")
>>> ddf = dask_cudf.read_csv(datasets_path / "karate.csv",
... chunksize=chunksize, delimiter=" ",
... names=["src", "dst", "value"],
... dtype=["int32", "int32", "float32"])
>>> dg = cugraph.Graph(directed=True)
>>> dg.from_dask_cudf_edgelist(ddf, source='src', destination='dst')
>>> pr = dcg.pagerank(dg)
"""
# Initialize dask client
client = default_client()
if input_graph.store_transposed is False:
warning_msg = (
"Pagerank expects the 'store_transposed' flag "
"to be set to 'True' for optimal performance during "
"the graph creation"
)
warnings.warn(warning_msg, UserWarning)
initial_guess_vertices = None
initial_guess_values = None
precomputed_vertex_out_weight_vertices = None
precomputed_vertex_out_weight_sums = None
do_expensive_check = False
# FIXME: Distribute the 'precomputed_vertex_out_weight'
# across GPUs for performance optimization
if precomputed_vertex_out_weight is not None:
if input_graph.renumbered is True:
precomputed_vertex_out_weight = renumber_vertices(
input_graph, precomputed_vertex_out_weight
)
precomputed_vertex_out_weight = ensure_valid_dtype(
input_graph, precomputed_vertex_out_weight, "precomputed_vertex_out_weight"
)
precomputed_vertex_out_weight_vertices = precomputed_vertex_out_weight["vertex"]
precomputed_vertex_out_weight_sums = precomputed_vertex_out_weight["sums"]
# FIXME: Distribute the 'nstart' across GPUs for performance optimization
if nstart is not None:
if input_graph.renumbered is True:
nstart = renumber_vertices(input_graph, nstart)
nstart = ensure_valid_dtype(input_graph, nstart, "nstart")
initial_guess_vertices = nstart["vertex"]
initial_guess_values = nstart["values"]
if personalization is not None:
if input_graph.renumbered is True:
personalization = renumber_vertices(input_graph, personalization)
personalization = ensure_valid_dtype(
input_graph, personalization, "personalization"
)
personalization_ddf = dask_cudf.from_cudf(
personalization, npartitions=len(Comms.get_workers())
)
data_prsztn = get_distributed_data(personalization_ddf)
result = [
client.submit(
_call_plc_personalized_pagerank,
Comms.get_session_id(),
input_graph._plc_graph[w],
precomputed_vertex_out_weight_vertices,
precomputed_vertex_out_weight_sums,
data_personalization[0],
initial_guess_vertices,
initial_guess_values,
alpha,
tol,
max_iter,
do_expensive_check,
fail_on_nonconvergence,
workers=[w],
allow_other_workers=False,
)
for w, data_personalization in data_prsztn.worker_to_parts.items()
]
else:
result = [
client.submit(
_call_plc_pagerank,
Comms.get_session_id(),
input_graph._plc_graph[w],
precomputed_vertex_out_weight_vertices,
precomputed_vertex_out_weight_sums,
initial_guess_vertices,
initial_guess_values,
alpha,
tol,
max_iter,
do_expensive_check,
fail_on_nonconvergence,
workers=[w],
allow_other_workers=False,
)
for w in Comms.get_workers()
]
wait(result)
vertex_dtype = input_graph.edgelist.edgelist_df.dtypes[0]
# Have each worker convert tuple of arrays and bool from PLC to cudf
# DataFrames and bools. This will be a list of futures.
result_tuples = [
client.submit(convert_to_return_tuple, cp_arrays) for cp_arrays in result
]
# Convert the futures to dask delayed objects so the tuples can be
# split. nout=2 is passed since each tuple/iterable is a fixed length of 2.
result_tuples = [dask.delayed(r, nout=2) for r in result_tuples]
# Create the ddf and get the converged bool from the delayed objs. Use a
# meta DataFrame to pass the expected dtypes for the DataFrame to prevent
# another compute to determine them automatically.
meta = cudf.DataFrame(columns=["vertex", "pagerank"])
meta = meta.astype({"pagerank": "float64", "vertex": vertex_dtype})
ddf = dask_cudf.from_delayed([t[0] for t in result_tuples], meta=meta).persist()
converged = all(dask.compute(*[t[1] for t in result_tuples]))
wait(ddf)
# Wait until the inactive futures are released
wait([(r.release(), c_r.release()) for r, c_r in zip(result, result_tuples)])
if input_graph.renumbered:
ddf = input_graph.unrenumber(ddf, "vertex")
if fail_on_nonconvergence:
return ddf
else:
return (ddf, converged)
|
19f1fa8ef40979ad6061936afb8588d6aa1d887b
|
ccc00ce0041bd050a33880a84e68d6231610bfe5
|
/elasticdl/python/ps/parameter_server.py
|
85399b9c39b9c540399afa6928abb76cd401200c
|
[
"MIT"
] |
permissive
|
sql-machine-learning/elasticdl
|
009bfa5bcd0c2e8ffd032c559af5d3b0d13e2ed2
|
86cd6ff7e175939a4bcd7938e334c4865acacb6d
|
refs/heads/develop
| 2023-08-31T06:13:50.904537
| 2023-08-15T10:39:31
| 2023-08-15T10:39:31
| 154,232,678
| 724
| 122
|
MIT
| 2023-08-15T10:39:33
| 2018-10-22T23:53:10
|
Python
|
UTF-8
|
Python
| false
| false
| 6,423
|
py
|
parameter_server.py
|
# Copyright 2020 The ElasticDL Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from concurrent import futures
import grpc
from kubernetes import client, config
from elasticai_api.common.constants import GRPC
from elasticai_api.util.grpc_utils import build_channel
from elasticdl.proto import elasticdl_pb2_grpc
from elasticdl.python.common.constants import PodStatus
from elasticdl.python.common.log_utils import get_logger
from elasticdl.python.common.model_utils import (
get_module_file_path,
load_module,
)
from elasticdl.python.common.save_utils import CheckpointSaver
from elasticdl.python.ps.parameters import Parameters
from elasticdl.python.ps.servicer import PserverServicer
from elasticdl_client.common.k8s_client import get_master_pod_name
class ParameterServer(object):
def __init__(self, args):
self.logger = get_logger("PS", level=args.log_level.upper())
self.grads_to_wait = args.grads_to_wait
self.lr_staleness_modulation = args.lr_staleness_modulation
self.sync_version_tolerance = args.sync_version_tolerance
self.use_async = args.use_async
self.port = args.port
model_module = load_module(
get_module_file_path(args.model_zoo, args.model_def)
).__dict__
self.optimizer = model_module[args.optimizer]()
self.ps_id = args.ps_id
self.num_ps_pods = args.num_ps_pods
self.num_workers = args.num_workers
# Create Parameters instance
self.parameters = Parameters()
if args.master_addr is None:
raise ValueError("master_addr is missing for parameter servers")
self.master_channel = build_channel(args.master_addr)
self.evaluation_steps = args.evaluation_steps
self.master_name = get_master_pod_name(args.job_name)
self.namespace = args.namespace
self._init_checkpoint_saver(args)
self._restore_params_from_checkpoint(args.checkpoint_dir_for_init)
self._debug_info_needed = args.log_level.upper() == "DEBUG"
def _restore_params_from_checkpoint(self, checkpoint_dir_for_init):
"""Restore parameters from a checkpint directory for the PS instance
"""
if not checkpoint_dir_for_init:
self.logger.info("checkpoint directory for init is None")
return
if not CheckpointSaver.check_checkpoint_valid(checkpoint_dir_for_init):
raise ValueError("Invalid checkpoint directory")
self.parameters = CheckpointSaver.restore_params_from_checkpoint(
checkpoint_dir_for_init, self.ps_id, self.num_ps_pods
)
self.parameters.initialized = True
self.logger.info(
"The version of restored parameters is %d"
% self.parameters.version
)
def _init_checkpoint_saver(self, args):
if all([args.checkpoint_dir, args.checkpoint_steps]):
self.checkpoint_saver = CheckpointSaver(
args.checkpoint_dir,
args.checkpoint_steps,
args.keep_checkpoint_max,
include_evaluation=False,
)
else:
self.checkpoint_saver = None
self.logger.warning(
"Invalid checkpoint config and no model will be saved"
)
def prepare(self):
max_workers = min(self.num_workers, 64)
self.logger.info("The max threads in PS servers is %d" % max_workers)
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=max_workers),
options=[
("grpc.max_send_message_length", GRPC.MAX_SEND_MESSAGE_LENGTH),
(
"grpc.max_receive_message_length",
GRPC.MAX_RECEIVE_MESSAGE_LENGTH,
),
],
)
pserver_servicer = PserverServicer(
self.parameters,
self.grads_to_wait,
self.optimizer,
lr_staleness_modulation=self.lr_staleness_modulation,
sync_version_tolerance=self.sync_version_tolerance,
use_async=self.use_async,
evaluation_steps=self.evaluation_steps,
master_channel=self.master_channel,
checkpoint_saver=self.checkpoint_saver,
ps_id=self.ps_id,
num_ps_pods=self.num_ps_pods,
)
elasticdl_pb2_grpc.add_PserverServicer_to_server(
pserver_servicer, server
)
server.add_insecure_port("[::]:{}".format(self.port))
server.start()
self.server = server
self.logger.info("RPC Server started at port: %d", self.port)
def run(self):
config.load_incluster_config()
api = client.CoreV1Api()
try:
while True:
time.sleep(30)
master_pod = api.read_namespaced_pod(
namespace=self.namespace, name=self.master_name
)
if master_pod.status.phase == PodStatus.SUCCEEDED:
self.logger.info("Master pod is Succeeded")
break
elif master_pod.status.phase == PodStatus.FAILED:
self.logger.info("Master pod is Failed")
break
elif (
master_pod.status.phase == PodStatus.RUNNING
and master_pod.metadata.labels["status"]
== PodStatus.FINISHED
):
self.logger.info("Task is finished")
break
if self._debug_info_needed:
self.logger.debug(
"Parameters info:\n%s" % self.parameters.debug_info()
)
except KeyboardInterrupt:
self.logger.warning("Server stopping")
self.server.stop(0)
self.logger.info("RPC server stopped")
|
c23c6200bc7d4b725feb31cca246f2a23608cc37
|
93a116442c782730ef774688b8f41a8c4bdc2a59
|
/lldbhelpers/break_here.py
|
4cc21257d34c2a6be79d928341a582bd1928a5a2
|
[
"MIT"
] |
permissive
|
keith/dotfiles
|
016dddc45d194bd3f2b3224b3d830c833b0c9175
|
7ca92029cd03905159fa6433de915666c340d491
|
refs/heads/main
| 2023-08-04T00:49:38.487154
| 2023-07-26T04:05:51
| 2023-07-26T04:05:51
| 5,795,829
| 226
| 29
|
MIT
| 2022-07-25T15:00:22
| 2012-09-13T14:26:33
|
Shell
|
UTF-8
|
Python
| false
| false
| 174
|
py
|
break_here.py
|
import lldb
@lldb.command()
def bhere(debugger, _ignored, context, result, _):
pc = exe_ctx.frame.GetPC()
debugger.HandleCommand("breakpoint set -a {}".format(pc))
|
06b8bf3982ba6113ea73b98e7888ababfae4b079
|
cce6234bf89b91bfbe00abaf320e3ae45d7f8bc8
|
/tests/test_utils.py
|
13e2501f13139ca139ccb43bb03827d9b5be5504
|
[
"MIT"
] |
permissive
|
uuazed/numerapi
|
334e2eb1ae08073998956fb0333d53fac5d6e54b
|
9badfe023d5eaa557d4b01cae02698392a7828df
|
refs/heads/master
| 2023-08-18T05:14:30.061420
| 2023-08-07T07:13:56
| 2023-08-07T07:13:56
| 114,936,786
| 175
| 77
|
MIT
| 2023-07-05T13:53:16
| 2017-12-20T22:25:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,097
|
py
|
test_utils.py
|
import pytest
import datetime
import os
from dateutil.tz import tzutc
import responses
import requests
import decimal
from numerapi import utils
def test_parse_datetime_string():
s = "2017-12-24T20:48:25.90349Z"
t = datetime.datetime(2017, 12, 24, 20, 48, 25, 903490, tzinfo=tzutc())
assert utils.parse_datetime_string(s) == t
assert utils.parse_datetime_string(None) is None
def test_parse_float_string():
assert utils.parse_float_string(None) is None
assert utils.parse_float_string("") is None
assert utils.parse_float_string("1.23") == decimal.Decimal("1.23")
assert utils.parse_float_string("12") == decimal.Decimal("12.0")
assert utils.parse_float_string("1,000.0") == decimal.Decimal("1000.0")
assert utils.parse_float_string("0.4") == decimal.Decimal("0.4")
def test_replace():
d = None
assert utils.replace(d, "a", float) is None
# empty dict
d = {}
assert not utils.replace(d, "a", float)
# normal case
d = {"a": "1"}
utils.replace(d, "a", float)
assert d["a"] == 1.0
@responses.activate
def test_download_file(tmpdir):
url = "https://someurl"
responses.add(responses.GET, url)
# convert to string to make python<3.6 happy
path = str(tmpdir.join("somefilepath"))
utils.download_file("https://someurl", path)
assert os.path.exists(path)
@responses.activate
def test_post_with_err_handling(caplog):
# unreachable
responses.add(responses.POST, "https://someurl1", status=404)
utils.post_with_err_handling("https://someurl1", None, None)
assert 'Http Error' in caplog.text
caplog.clear()
# invalid resonse type
responses.add(responses.POST, "https://someurl2")
utils.post_with_err_handling("https://someurl2", None, None)
assert 'Did not receive a valid JSON' in caplog.text
caplog.clear()
# timeout
responses.add(responses.POST, "https://someurl3",
body=requests.exceptions.Timeout())
utils.post_with_err_handling("https://someurl3", None, None)
assert 'Timeout Error' in caplog.text
caplog.clear()
|
6bc88e2aaf24394432ede81baf698d5e32338682
|
61b95ee2aefbcfbd6c4abf9511d976d0b9d0e100
|
/faker/providers/address/pt_BR/__init__.py
|
91ba030c0e778e9580822d747898ff69e520e856
|
[
"MIT"
] |
permissive
|
joke2k/faker
|
fed7472580ced2bce326fe4ea0c3d1c810853d5e
|
33e36b1b6cc9c6f039fe387988853771bab60624
|
refs/heads/master
| 2023-09-04T00:43:33.599705
| 2023-08-31T16:15:04
| 2023-08-31T16:15:04
| 6,662,075
| 14,544
| 2,215
|
MIT
| 2023-09-11T16:06:14
| 2012-11-12T23:00:09
|
Python
|
UTF-8
|
Python
| false
| false
| 23,003
|
py
|
__init__.py
|
from typing import Tuple
from .. import Provider as AddressProvider
class Provider(AddressProvider):
city_suffixes = (
"do Sul",
"do Norte",
"de Minas",
"do Campo",
"Grande",
"da Serra",
"do Oeste",
"de Goiás",
"Paulista",
"da Mata",
"Alegre",
"da Praia",
"das Flores",
"das Pedras",
"dos Dourados",
"do Amparo",
"do Galho",
"da Prata",
"Verde",
)
street_prefixes = (
"Aeroporto",
"Alameda",
"Área",
"Avenida",
"Campo",
"Chácara",
"Colônia",
"Condomínio",
"Conjunto",
"Distrito",
"Esplanada",
"Estação",
"Estrada",
"Favela",
"Fazenda",
"Feira",
"Jardim",
"Ladeira",
"Lago",
"Lagoa",
"Largo",
"Loteamento",
"Morro",
"Núcleo",
"Parque",
"Passarela",
"Pátio",
"Praça",
"Praia",
"Quadra",
"Recanto",
"Residencial",
"Rodovia",
"Rua",
"Setor",
"Sítio",
"Travessa",
"Trecho",
"Trevo",
"Vale",
"Vereda",
"Via",
"Viaduto",
"Viela",
"Vila",
)
city_formats = (
"{{last_name}}",
"{{last_name}}",
"{{last_name}}",
"{{last_name}}",
"{{last_name}} {{city_suffix}}",
"{{last_name}} {{city_suffix}}",
"{{last_name}} {{city_suffix}}",
"{{last_name}} de {{last_name}}",
)
street_name_formats = (
"{{street_prefix}} {{last_name}}",
"{{street_prefix}} {{first_name}} {{last_name}}",
"{{street_prefix}} de {{last_name}}",
)
street_address_formats = (
"{{street_name}}",
"{{street_name}}, {{building_number}}",
"{{street_name}}, {{building_number}}",
"{{street_name}}, {{building_number}}",
"{{street_name}}, {{building_number}}",
"{{street_name}}, {{building_number}}",
"{{street_name}}, {{building_number}}",
)
address_formats = ("{{street_address}}\n{{bairro}}\n{{postcode}} {{city}} / {{estado_sigla}}",)
building_number_formats = ("%", "%#", "%#", "%#", "%##")
postcode_raw_formats = ("########",)
postcode_all_formats = postcode_raw_formats + ("#####-###",)
bairros = (
"Aarão Reis",
"Acaba Mundo",
"Acaiaca",
"Ademar Maldonado",
"Aeroporto",
"Aguas Claras",
"Alípio De Melo",
"Alpes",
"Alta Tensão 1ª Seção",
"Alta Tensão 2ª Seção",
"Alto Caiçaras",
"Alto Das Antenas",
"Alto Dos Pinheiros",
"Alto Vera Cruz",
"Álvaro Camargos",
"Ambrosina",
"Andiroba",
"Antonio Ribeiro De Abreu 1ª Seção",
"Aparecida 7ª Seção",
"Ápia",
"Apolonia",
"Araguaia",
"Atila De Paiva",
"Bacurau",
"Bairro Das Indústrias Ii",
"Baleia",
"Barão Homem De Melo 1ª Seção",
"Barão Homem De Melo 2ª Seção",
"Barão Homem De Melo 3ª Seção",
"Barreiro",
"Beija Flor",
"Beira Linha",
"Bela Vitoria",
"Belmonte",
"Bernadete",
"Betânia",
"Biquinhas",
"Boa Esperança",
"Boa União 1ª Seção",
"Boa União 2ª Seção",
"Boa Viagem",
"Boa Vista",
"Bom Jesus",
"Bonfim",
"Bonsucesso",
"Brasil Industrial",
"Braúnas",
"Buraco Quente",
"Cabana Do Pai Tomás",
"Cachoeirinha",
"Caetano Furquim",
"Caiçara - Adelaide",
"Calafate",
"Califórnia",
"Camargos",
"Campo Alegre",
"Camponesa 1ª Seção",
"Camponesa 2ª Seção",
"Canaa",
"Canadá",
"Candelaria",
"Capitão Eduardo",
"Cardoso",
"Casa Branca",
"Castanheira",
"Cdi Jatoba",
"Cenaculo",
"Céu Azul",
"Chácara Leonina",
"Cidade Jardim Taquaril",
"Cinquentenário",
"Colégio Batista",
"Comiteco",
"Concórdia",
"Cônego Pinheiro 1ª Seção",
"Cônego Pinheiro 2ª Seção",
"Confisco",
"Conjunto Bonsucesso",
"Conjunto Califórnia I",
"Conjunto Califórnia Ii",
"Conjunto Capitão Eduardo",
"Conjunto Celso Machado",
"Conjunto Floramar",
"Conjunto Jardim Filadélfia",
"Conjunto Jatoba",
"Conjunto Lagoa",
"Conjunto Minas Caixa",
"Conjunto Novo Dom Bosco",
"Conjunto Paulo Vi",
"Conjunto Providencia",
"Conjunto Santa Maria",
"Conjunto São Francisco De Assis",
"Conjunto Serra Verde",
"Conjunto Taquaril",
"Copacabana",
"Coqueiros",
"Corumbiara",
"Custodinha",
"Das Industrias I",
"Delta",
"Diamante",
"Distrito Industrial Do Jatoba",
"Dom Bosco",
"Dom Cabral",
"Dom Joaquim",
"Dom Silverio",
"Dona Clara",
"Embaúbas",
"Engenho Nogueira",
"Ermelinda",
"Ernesto Nascimento",
"Esperança",
"Estrela",
"Estrela Do Oriente",
"Etelvina Carneiro",
"Europa",
"Eymard",
"Fazendinha",
"Flamengo",
"Flavio De Oliveira",
"Flavio Marques Lisboa",
"Floramar",
"Frei Leopoldo",
"Gameleira",
"Garças",
"Glória",
"Goiania",
"Graça",
"Granja De Freitas",
"Granja Werneck",
"Grota",
"Grotinha",
"Guarani",
"Guaratã",
"Havaí",
"Heliopolis",
"Horto Florestal",
"Inconfidência",
"Indaiá",
"Independência",
"Ipe",
"Itapoa",
"Itatiaia",
"Jaqueline",
"Jaraguá",
"Jardim Alvorada",
"Jardim Atlântico",
"Jardim Do Vale",
"Jardim Dos Comerciarios",
"Jardim Felicidade",
"Jardim Guanabara",
"Jardim Leblon",
"Jardim Montanhês",
"Jardim São José",
"Jardim Vitoria",
"Jardinópolis",
"Jatobá",
"João Alfredo",
"João Paulo Ii",
"Jonas Veiga",
"Juliana",
"Lagoa",
"Lagoinha",
"Lagoinha Leblon",
"Lajedo",
"Laranjeiras",
"Leonina",
"Leticia",
"Liberdade",
"Lindéia",
"Lorena",
"Madre Gertrudes",
"Madri",
"Mala E Cuia",
"Manacas",
"Mangueiras",
"Mantiqueira",
"Marajó",
"Maravilha",
"Marçola",
"Maria Goretti",
"Maria Helena",
"Maria Tereza",
"Maria Virgínia",
"Mariano De Abreu",
"Marieta 1ª Seção",
"Marieta 2ª Seção",
"Marieta 3ª Seção",
"Marilandia",
"Mariquinhas",
"Marmiteiros",
"Milionario",
"Minas Brasil",
"Minas Caixa",
"Minaslandia",
"Mineirão",
"Miramar",
"Mirante",
"Mirtes",
"Monsenhor Messias",
"Monte Azul",
"Monte São José",
"Morro Dos Macacos",
"Nazare",
"Nossa Senhora Aparecida",
"Nossa Senhora Da Aparecida",
"Nossa Senhora Da Conceição",
"Nossa Senhora De Fátima",
"Nossa Senhora Do Rosário",
"Nova America",
"Nova Cachoeirinha",
"Nova Cintra",
"Nova Esperança",
"Nova Floresta",
"Nova Gameleira",
"Nova Pampulha",
"Novo Aarão Reis",
"Novo Das Industrias",
"Novo Glória",
"Novo Santa Cecilia",
"Novo Tupi",
"Oeste",
"Olaria",
"Olhos D'água",
"Ouro Minas",
"Pantanal",
"Paquetá",
"Paraíso",
"Parque São José",
"Parque São Pedro",
"Paulo Vi",
"Pedreira Padro Lopes",
"Penha",
"Petropolis",
"Pilar",
"Pindorama",
"Pindura Saia",
"Piraja",
"Piratininga",
"Pirineus",
"Pompéia",
"Pongelupe",
"Pousada Santo Antonio",
"Primeiro De Maio",
"Providencia",
"Ribeiro De Abreu",
"Rio Branco",
"Salgado Filho",
"Santa Amelia",
"Santa Branca",
"Santa Cecilia",
"Santa Cruz",
"Santa Helena",
"Santa Inês",
"Santa Isabel",
"Santa Margarida",
"Santa Maria",
"Santa Rita",
"Santa Rita De Cássia",
"Santa Sofia",
"Santa Terezinha",
"Santana Do Cafezal",
"Santo André",
"São Benedito",
"São Bernardo",
"São Cristóvão",
"São Damião",
"São Francisco",
"São Francisco Das Chagas",
"São Gabriel",
"São Geraldo",
"São Gonçalo",
"São João",
"São João Batista",
"São Jorge 1ª Seção",
"São Jorge 2ª Seção",
"São Jorge 3ª Seção",
"São José",
"São Marcos",
"São Paulo",
"São Salvador",
"São Sebastião",
"São Tomaz",
"São Vicente",
"Satelite",
"Saudade",
"Senhor Dos Passos",
"Serra Do Curral",
"Serra Verde",
"Serrano",
"Solar Do Barreiro",
"Solimoes",
"Sport Club",
"Suzana",
"Taquaril",
"Teixeira Dias",
"Tiradentes",
"Tirol",
"Tres Marias",
"Trevo",
"Túnel De Ibirité",
"Tupi A",
"Tupi B",
"União",
"Unidas",
"Universitário",
"Universo",
"Urca",
"Vale Do Jatoba",
"Varzea Da Palma",
"Venda Nova",
"Ventosa",
"Vera Cruz",
"Vila Aeroporto",
"Vila Aeroporto Jaraguá",
"Vila Antena",
"Vila Antena Montanhês",
"Vila Atila De Paiva",
"Vila Bandeirantes",
"Vila Barragem Santa Lúcia",
"Vila Batik",
"Vila Betânia",
"Vila Boa Vista",
"Vila Calafate",
"Vila Califórnia",
"Vila Canto Do Sabiá",
"Vila Cemig",
"Vila Cloris",
"Vila Copacabana",
"Vila Copasa",
"Vila Coqueiral",
"Vila Da Amizade",
"Vila Da Ária",
"Vila Da Luz",
"Vila Da Paz",
"Vila Das Oliveiras",
"Vila Do Pombal",
"Vila Dos Anjos",
"Vila Ecológica",
"Vila Engenho Nogueira",
"Vila Esplanada",
"Vila Formosa",
"Vila Fumec",
"Vila Havaí",
"Vila Independencia 1ª Seção",
"Vila Independencia 2ª Seção",
"Vila Independencia 3ª Seção",
"Vila Inestan",
"Vila Ipiranga",
"Vila Jardim Alvorada",
"Vila Jardim Leblon",
"Vila Jardim São José",
"Vila Madre Gertrudes 1ª Seção",
"Vila Madre Gertrudes 2ª Seção",
"Vila Madre Gertrudes 3ª Seção",
"Vila Madre Gertrudes 4ª Seção",
"Vila Maloca",
"Vila Mangueiras",
"Vila Mantiqueira",
"Vila Maria",
"Vila Minaslandia",
"Vila Nossa Senhora Do Rosário",
"Vila Nova",
"Vila Nova Cachoeirinha 1ª Seção",
"Vila Nova Cachoeirinha 2ª Seção",
"Vila Nova Cachoeirinha 3ª Seção",
"Vila Nova Dos Milionarios",
"Vila Nova Gameleira 1ª Seção",
"Vila Nova Gameleira 2ª Seção",
"Vila Nova Gameleira 3ª Seção",
"Vila Nova Paraíso",
"Vila Novo São Lucas",
"Vila Oeste",
"Vila Olhos D'água",
"Vila Ouro Minas",
"Vila Paquetá",
"Vila Paraíso",
"Vila Petropolis",
"Vila Pilar",
"Vila Pinho",
"Vila Piratininga",
"Vila Piratininga Venda Nova",
"Vila Primeiro De Maio",
"Vila Puc",
"Vila Real 1ª Seção",
"Vila Real 2ª Seção",
"Vila Rica",
"Vila Santa Monica 1ª Seção",
"Vila Santa Monica 2ª Seção",
"Vila Santa Rosa",
"Vila Santo Antônio",
"Vila Santo Antônio Barroquinha",
"Vila São Dimas",
"Vila São Francisco",
"Vila São Gabriel",
"Vila São Gabriel Jacui",
"Vila São Geraldo",
"Vila São João Batista",
"Vila São Paulo",
"Vila São Rafael",
"Vila Satélite",
"Vila Sesc",
"Vila Sumaré",
"Vila Suzana Primeira Seção",
"Vila Suzana Segunda Seção",
"Vila Tirol",
"Vila Trinta E Um De Março",
"Vila União",
"Vila Vista Alegre",
"Virgínia",
"Vista Alegre",
"Vista Do Sol",
"Vitoria",
"Vitoria Da Conquista",
"Xangri-Lá",
"Xodo-Marize",
"Zilah Sposito",
"Outro",
"Novo São Lucas",
"Esplanada",
"Estoril",
"Novo Ouro Preto",
"Ouro Preto",
"Padre Eustáquio",
"Palmares",
"Palmeiras",
"Vila De Sá",
"Floresta",
"Anchieta",
"Aparecida",
"Grajaú",
"Planalto",
"Bandeirantes",
"Gutierrez",
"Jardim América",
"Renascença",
"Barro Preto",
"Barroca",
"Sagrada Família",
"Ipiranga",
"Belvedere",
"Santa Efigênia",
"Santa Lúcia",
"Santa Monica",
"Vila Jardim Montanhes",
"Santa Rosa",
"Santa Tereza",
"Buritis",
"Vila Paris",
"Santo Agostinho",
"Santo Antônio",
"Caiçaras",
"São Bento",
"Prado",
"Lourdes",
"Fernão Dias",
"Carlos Prates",
"Carmo",
"Luxemburgo",
"São Lucas",
"São Luiz",
"Mangabeiras",
"São Pedro",
"Horto",
"Cidade Jardim",
"Castelo",
"Cidade Nova",
"Savassi",
"Serra",
"Silveira",
"Sion",
"Centro",
"Alto Barroca",
"Nova Vista",
"Coração De Jesus",
"Coração Eucarístico",
"Funcionários",
"Cruzeiro",
"João Pinheiro",
"Nova Granada",
"Nova Suíça",
"Itaipu",
)
countries = (
"Afeganistão",
"África do Sul",
"Akrotiri",
"Albânia",
"Alemanha",
"Andorra",
"Angola",
"Anguila",
"Antártica",
"Antígua e Barbuda",
"Antilhas Holandesas",
"Arábia Saudita",
"Argélia",
"Argentina",
"Armênia",
"Aruba",
"Ashmore and Cartier Islands",
"Austrália",
"Áustria",
"Azerbaijão",
"Bahamas",
"Bangladesh",
"Barbados",
"Barein",
"Bélgica",
"Belize",
"Benim",
"Bermudas",
"Bielorrússia",
"Birmânia",
"Bolívia",
"Bósnia e Herzegovina",
"Botsuana",
"Brasil",
"Brunei",
"Bulgária",
"Burquina Faso",
"Burundi",
"Butão",
"Cabo Verde",
"Camarões",
"Camboja",
"Canadá",
"Catar",
"Cazaquistão",
"Chade",
"Chile",
"China",
"Chipre",
"Clipperton Island",
"Colômbia",
"Comores",
"Congo-Brazzaville",
"Congo-Kinshasa",
"Coral Sea Islands",
"Coreia do Norte",
"Coreia do Sul",
"Costa do Marfim",
"Costa Rica",
"Croácia",
"Cuba",
"Dhekelia",
"Dinamarca",
"Domínica",
"Egito",
"Costa do Marfim",
"Costa Rica",
"Croácia",
"Cuba",
"Dhekelia",
"Dinamarca",
"Domínica",
"Egito",
"Emirados Árabes Unidos",
"Equador",
"Eritreia",
"Eslováquia",
"Eslovênia",
"Espanha",
"Estados Unidos",
"Estônia",
"Etiópia",
"Faroé",
"Fiji",
"Filipinas",
"Finlândia",
"França",
"Gabão",
"Gâmbia",
"Gana",
"Geórgia",
"Geórgia do Sul e Sandwich do Sul",
"Gibraltar",
"Granada",
"Grécia",
"Gronelândia",
"Guam",
"Guatemala",
"Guernsey",
"Guiana",
"Guiné",
"Guiné Equatorial",
"Guiné-Bissau",
"Haiti",
"Honduras",
"Hong Kong",
"Hungria",
"Iêmen",
"Ilha Bouvet",
"Ilha do Natal",
"Ilha Norfolk",
"Ilhas Caiman",
"Ilhas Cook",
"Ilhas dos Cocos",
"Ilhas Falkland",
"Ilhas Heard e McDonald",
"Ilhas Marshall",
"Ilhas Salomão",
"Ilhas Turcas e Caicos",
"Ilhas Virgens Americanas",
"Ilhas Virgens Britânicas",
"Índia",
"Indonésia",
"Iran",
"Iraque",
"Irlanda",
"Islândia",
"Israel",
"Itália",
"Jamaica",
"Jan Mayen",
"Japão",
"Jersey",
"Jibuti",
"Jordânia",
"Kuwait",
"Laos",
"Lesoto",
"Letônia",
"Líbano",
"Libéria",
"Líbia",
"Liechtenstein",
"Lituânia",
"Luxemburgo",
"Macau",
"Macedónia do Norte",
"Madagáscar",
"Malásia",
"Malávi",
"Maldivas",
"Mali",
"Malta",
"Man, Isle of",
"Marianas do Norte",
"Marrocos",
"Maurícia",
"Mauritânia",
"Mayotte",
"México",
"Micronésia",
"Moçambique",
"Moldávia",
"Mônaco",
"Mongólia",
"Monserrate",
"Montenegro",
"Namíbia",
"Nauru",
"Navassa Island",
"Nepal",
"Nicarágua",
"Níger",
"Nigéria",
"Niue",
"Noruega",
"Nova Caledónia",
"Nova Zelândia",
"Omã",
"Países Baixos",
"Palau",
"Panamá",
"Papua-Nova Guiné",
"Paquistão",
"Paracel Islands",
"Paraguai",
"Peru",
"Pitcairn",
"Polinésia Francesa",
"Polônia",
"Porto Rico",
"Portugal",
"Quênia",
"Quirguizistão",
"Quiribáti",
"Reino Unido",
"República Centro-Africana",
"República Checa",
"República Dominicana",
"Roménia",
"Ruanda",
"Rússia",
"Salvador",
"Samoa",
"Samoa Americana",
"Santa Helena",
"Santa Lúcia",
"São Cristóvão e Neves",
"São Marinho",
"São Pedro e Miquelon",
"São Tomé e Príncipe",
"São Vicente e Granadinas",
"Sara Ocidental",
"Seicheles",
"Senegal",
"Serra Leoa",
"Sérvia",
"Singapura",
"Síria",
"Somália",
"Sri Lanka",
"Suazilândia",
"Sudão",
"Suécia",
"Suíça",
"Suriname",
"Svalbard e Jan Mayen",
"Tailândia",
"Taiwan",
"Tajiquistão",
"Tanzânia",
"Território Britânico do Oceano Índico",
"Territórios Austrais Franceses",
"Timor Leste",
"Togo",
"Tokelau",
"Tonga",
"Trindade e Tobago",
"Tunísia",
"Turquemenistão",
"Turquia",
"Tuvalu",
"Ucrânia",
"Uganda",
"União Europeia",
"Uruguai",
"Usbequistão",
"Vanuatu",
"Vaticano",
"Venezuela",
"Vietnam",
"Wake Island",
"Wallis e Futuna",
"Zâmbia",
"Zimbabué",
)
estados = (
("AC", "Acre"),
("AL", "Alagoas"),
("AP", "Amapá"),
("AM", "Amazonas"),
("BA", "Bahia"),
("CE", "Ceará"),
("DF", "Distrito Federal"),
("ES", "Espírito Santo"),
("GO", "Goiás"),
("MA", "Maranhão"),
("MT", "Mato Grosso"),
("MS", "Mato Grosso do Sul"),
("MG", "Minas Gerais"),
("PA", "Pará"),
("PB", "Paraíba"),
("PR", "Paraná"),
("PE", "Pernambuco"),
("PI", "Piauí"),
("RJ", "Rio de Janeiro"),
("RN", "Rio Grande do Norte"),
("RS", "Rio Grande do Sul"),
("RO", "Rondônia"),
("RR", "Roraima"),
("SC", "Santa Catarina"),
("SP", "São Paulo"),
("SE", "Sergipe"),
("TO", "Tocantins"),
)
def street_prefix(self) -> str:
"""
:example: 'rua'
"""
return self.random_element(self.street_prefixes)
def estado(self) -> Tuple[str, str]:
"""
Randomly returns a Brazilian State ('sigla' , 'nome').
:example: ('MG' . 'Minas Gerais')
"""
return self.random_element(self.estados)
def estado_nome(self) -> str:
"""
Randomly returns a Brazilian State Name
:example: 'Minas Gerais'
"""
return self.estado()[1]
def estado_sigla(self) -> str:
"""
Randomly returns the abbreviation of a Brazilian State
:example: 'MG'
"""
return self.estado()[0]
def bairro(self) -> str:
"""
Randomly returns a bairro (neighborhood) name.
The names were taken from the city of Belo Horizonte - Minas Gerais
:example: 'Serra'
"""
return self.random_element(self.bairros)
def postcode(self, formatted: bool = True) -> str:
"""
Randomly returns a postcode.
:param formatted: True to allow formatted postcodes, else False (default True)
:example formatted: '41224-212' '83992-291' '12324322'
:example raw: '43920231' '34239530'
"""
template = self.postcode_all_formats if formatted else self.postcode_raw_formats
return self.bothify(self.random_element(template))
# aliases
def neighborhood(self) -> str:
return self.bairro()
def administrative_unit(self) -> str:
return self.estado_nome()
state = administrative_unit
def state_abbr(self) -> str:
return self.estado_sigla()
|
004907ac5b11b148f06baf0d363869f1eec67493
|
2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5
|
/tools/mo/openvino/tools/mo/front/mxnet/activation.py
|
8efd34633faaf309d30cb69428b3f029f84bba91
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/openvino
|
38ea745a247887a4e14580dbc9fc68005e2149f9
|
e4bed7a31c9f00d8afbfcabee3f64f55496ae56a
|
refs/heads/master
| 2023-08-18T03:47:44.572979
| 2023-08-17T21:24:59
| 2023-08-17T21:24:59
| 153,097,643
| 3,953
| 1,492
|
Apache-2.0
| 2023-09-14T21:42:24
| 2018-10-15T10:54:40
|
C++
|
UTF-8
|
Python
| false
| false
| 1,864
|
py
|
activation.py
|
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.ops.activation_ops import SoftPlus, Sigmoid, Tanh, ReLU, \
Asinh, Acosh, Atanh, SoftSign
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from openvino.tools.mo.utils.error import Error
from openvino.tools.mo.utils.utils import refer_to_faq_msg
class ActivationFrontExtractor(FrontExtractorOp):
op = 'Activation'
enabled = True
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
act_type = attrs.str('act_type', 'relu')
if act_type == 'sigmoid':
act_class = Sigmoid
elif act_type == 'tanh':
act_class = Tanh
elif act_type == 'relu':
act_class = ReLU
elif act_type == 'softrelu':
act_class = SoftPlus
elif act_type == 'softsign':
act_class = SoftSign
else:
raise Error(
"Operation '{}' not supported. Please register it as custom op. " +
refer_to_faq_msg(86),
act_type)
act_class.update_node_stat(node)
return cls.enabled
class AsinhFrontExtractor(FrontExtractorOp):
op = 'arcsinh'
enabled = True
@classmethod
def extract(cls, node):
Asinh.update_node_stat(node)
return cls.enabled
class AcoshFrontExtractor(FrontExtractorOp):
op = 'arccosh'
enabled = True
@classmethod
def extract(cls, node):
Acosh.update_node_stat(node)
return cls.enabled
class AtanhFrontExtractor(FrontExtractorOp):
op = 'arctanh'
enabled = True
@classmethod
def extract(cls, node):
Atanh.update_node_stat(node)
return cls.enabled
|
6ace4da478db23e4f466b6d42b378e8e56ef70c9
|
9f73d653197b5218f1a5a02e06cb7f56d858a572
|
/scripts/download_dump.py
|
9993e4ff2f377dbe302693ddcbeecbf023e5f3d7
|
[
"MIT"
] |
permissive
|
wikimedia/pywikibot
|
b32fbc2eb3d688f57668aed4dc488b4055196e8f
|
5c01e6bfcd328bc6eae643e661f1a0ae57612808
|
refs/heads/master
| 2023-09-03T19:22:13.926740
| 2023-09-03T14:56:01
| 2023-09-03T14:59:45
| 10,798,864
| 432
| 166
|
MIT
| 2023-08-10T23:36:48
| 2013-06-19T16:18:45
|
Python
|
UTF-8
|
Python
| false
| false
| 8,728
|
py
|
download_dump.py
|
#!/usr/bin/env python3
"""
This bot downloads dump from dumps.wikimedia.org.
This script supports the following command line parameters:
-filename:# The name of the file (e.g. abstract.xml)
-storepath:# The stored file's path.
-dumpdate:# The dumpdate date of the dump (default to `latest`)
formatted as YYYYMMDD.
.. note:: This script is a
:py:obj:`ConfigParserBot <bot.ConfigParserBot>`. All options
can be set within a settings file which is scripts.ini by default.
"""
#
# (C) Pywikibot team, 2017-2022
#
# Distributed under the terms of the MIT license.
#
import binascii
import os.path
from http import HTTPStatus
from os import remove, replace, symlink, urandom
import pywikibot
from pywikibot.bot import Bot, ConfigParserBot
from pywikibot.comms.http import fetch
class DownloadDumpBot(Bot, ConfigParserBot):
"""Download dump bot.
.. versionchanged:: 7.0
DownloadDumpBot is a ConfigParserBot
"""
available_options = {
'wikiname': '',
'filename': '',
'storepath': './',
'dumpdate': 'latest',
}
@staticmethod
def get_dump_name(db_name, typ, dumpdate):
"""Check if dump file exists locally in a Toolforge server."""
db_path = f'/public/dumps/public/{db_name}/'
if os.path.isdir(db_path):
dump_filepath_template = (
'/public/dumps/public/{db_name}/{date}/{db_name}-{date}-{typ}')
if dumpdate != 'latest':
dump_filepath = dump_filepath_template.format(
db_name=db_name, date=dumpdate, typ=typ)
if os.path.isfile(dump_filepath):
return dump_filepath
else:
# Search for the "latest" dump
dirs = [directory for directory in os.listdir(db_path) if
directory.isdigit()]
dates = map(int, dirs)
dates = sorted(dates, reverse=True)
for date in dates:
dump_filepath = dump_filepath_template.format(
db_name=db_name, date=date, typ=typ)
if os.path.isfile(dump_filepath):
return dump_filepath
return None
def run(self) -> None:
"""Run bot."""
def convert_from_bytes(total_bytes):
for unit in ['B', 'K', 'M', 'G', 'T']:
if abs(total_bytes) < 1024:
return str(total_bytes) + unit
total_bytes = float(format(total_bytes / 1024.0, '.2f'))
return str(total_bytes) + 'P'
pywikibot.info('Downloading dump from ' + self.opt.wikiname)
download_filename = '{wikiname}-{dumpdate}-{filename}'.format_map(
self.opt)
temp_filename = download_filename + '-' \
+ binascii.b2a_hex(urandom(8)).decode('ascii') + '.part'
file_final_storepath = os.path.join(
self.opt.storepath, download_filename)
file_current_storepath = os.path.join(
self.opt.storepath, temp_filename)
# https://wikitech.wikimedia.org/wiki/Help:Toolforge/Dumps
toolforge_dump_filepath = self.get_dump_name(
self.opt.wikiname, self.opt.filename, self.opt.dumpdate)
# First iteration for atomic download with temporary file
# Second iteration for fallback non-atomic download
for non_atomic in range(2):
try:
if toolforge_dump_filepath:
pywikibot.info('Symlinking file from '
+ toolforge_dump_filepath)
if non_atomic and os.path.exists(file_final_storepath):
remove(file_final_storepath)
symlink(toolforge_dump_filepath, file_current_storepath)
else:
url = 'https://dumps.wikimedia.org/{}/{}/{}'.format(
self.opt.wikiname, self.opt.dumpdate,
download_filename)
pywikibot.info('Downloading file from ' + url)
response = fetch(url, stream=True)
if response.status_code != HTTPStatus.OK:
if response.status_code == HTTPStatus.NOT_FOUND:
pywikibot.info(
'File with name {filename!r}, from dumpdate '
'{dumpdate!r}, and wiki {wikiname!r} ({url}) '
"isn't available in the Wikimedia Dumps"
.format(url=url, **self.opt))
else:
pywikibot.info(
HTTPStatus(response.status_code).description)
return
with open(file_current_storepath, 'wb') as result_file:
total = int(response.headers['content-length'])
if total == -1:
pywikibot.warning("'content-length' missing in "
'response headers')
downloaded = 0
parts = 50
display_string = ''
pywikibot.info()
for data in response.iter_content(100 * 1024):
result_file.write(data)
if total <= 0:
continue
downloaded += len(data)
done = int(parts * downloaded / total)
display = map(convert_from_bytes,
(downloaded, total))
prior_display = display_string
display_string = '\r|{}{}|{}{}/{}'.format(
'=' * done,
'-' * (parts - done),
' ' * 5,
*display)
# Add whitespace to cover up prior bar
display_string += ' ' * (
len(prior_display.rstrip())
- len(display_string.rstrip()))
pywikibot.info(display_string, newline=False)
pywikibot.info()
# Rename the temporary file to the target file
# if the download completes successfully
if not non_atomic:
replace(file_current_storepath, file_final_storepath)
break
except OSError as e:
pywikibot.error(e)
try:
remove(file_current_storepath)
except OSError as e:
pywikibot.error(e)
# If the atomic download fails, try without a temporary file
# If the non-atomic download also fails, exit the script
if non_atomic:
return
pywikibot.info('Cannot make temporary file, '
'falling back to non-atomic download')
file_current_storepath = file_final_storepath
pywikibot.info('Done! File stored as ' + file_final_storepath)
def main(*args: str) -> None:
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
:param args: command line arguments
"""
opts = {}
unknown_args = []
local_args = pywikibot.handle_args(args)
for arg in local_args:
option, _, value = arg.partition(':')
if option.startswith('-'):
option = option[1:]
if option == 'filename':
opts[option] = value or pywikibot.input('Enter the filename: ')
continue
if option == 'storepath':
opts[option] = os.path.abspath(value) or pywikibot.input(
'Enter the store path: ')
continue
if option == 'dumpdate':
opts[option] = value or pywikibot.input(
'Enter the dumpdate of the dump: ')
continue
unknown_args.append(arg)
missing = []
if 'filename' not in opts:
missing.append('-filename')
if pywikibot.bot.suggest_help(missing_parameters=missing,
unknown_parameters=unknown_args):
return
site = pywikibot.Site()
opts['wikiname'] = site.dbName()
bot = DownloadDumpBot(**opts)
bot.run()
if __name__ == '__main__':
main()
|
87ce99706741faaa7972a3d79ed73fc3f07ac4aa
|
90b974771cff3addd43ded8c62c3667c64045976
|
/tensorflow_data_validation/skew/feature_skew_detector.py
|
4b80f5e63303731f622a755ad426926bdb0d7a1f
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/data-validation
|
84066c9c8db76d6b49bab0cec95113508d86dbdb
|
606cd26b69f648e1f151c024d38baa6ab1d7d0c8
|
refs/heads/master
| 2023-09-01T11:54:32.797061
| 2023-08-21T19:21:20
| 2023-08-21T19:21:48
| 139,463,182
| 736
| 180
|
Apache-2.0
| 2023-08-14T06:28:47
| 2018-07-02T15:47:02
|
Python
|
UTF-8
|
Python
| false
| false
| 29,710
|
py
|
feature_skew_detector.py
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finds feature skew between baseline and test examples.
Feature skew is detected by joining baseline and test examples on a
fingerprint computed based on the provided identifier features. For each pair,
the feature skew detector compares the fingerprint of each baseline feature
value to the fingerprint of the corresponding test feature value.
If there is a mismatch in feature values, if the feature is only in the baseline
example, or if the feature is only in the test example, feature skew is
reported in the skew results and (optionally) a skew sample is output with
baseline-test example pairs that exhibit the feature skew.
For example, given the following examples with an identifier feature of 'id':
Baseline
features {
feature {
key: "id"
value { bytes_list {
value: "id_1"
}
}
feature {
key: "float_values"
value { float_list {
value: 1.0
value: 2.0
}}
}
}
Test
features {
feature {
key: "id"
value { bytes_list {
value: "id_1"
}
}
feature {
key: "float_values"
value { float_list {
value: 1.0
value: 3.0
}}
}
}
The following feature skew will be detected:
feature_name: "float_values"
baseline_count: 1
test_count: 1
mismatch_count: 1
diff_count: 1
In addition to feature level skew information, the pipeline will also produce
overall metadata describing information about the matching process. See
feature_skew_results_pb2.MatchStats.
Confusion counts can also be generated by passing a list of ConfusionConfig
objects specifying features for analysis. If enabled for a feature, the output
will include a collection of counts in the form (base-value, test-value, count).
This represents a count across baseline feature value and test feature value
tuples for matched (by id) examples.
"""
from typing import Any, Dict, Iterable, Iterator, List, Optional, Tuple, Union
import apache_beam as beam
import farmhash
import numpy as np
import tensorflow as tf
from tensorflow_data_validation import constants
from tensorflow_data_validation import types
from tensorflow_data_validation.utils import artifacts_io_impl
from tensorflow_data_validation.skew.protos import feature_skew_results_pb2
_BASELINE_KEY = "base"
_TEST_KEY = "test"
SKEW_RESULTS_KEY = "skew_results"
SKEW_PAIRS_KEY = "skew_pairs"
CONFUSION_KEY = "confusion_pairs"
MATCH_STATS_KEY = "match_stats"
_KEYED_EXAMPLE_KEY = "keyed_example"
_MISSING_IDS_KEY = "missing_ids"
_EXAMPLES_WITH_MISSING_IDENTIFIER_COUNTER = beam.metrics.Metrics.counter(
constants.METRICS_NAMESPACE, "examples_with_missing_identifier_features")
_PerFeatureSkew = List[Tuple[str, feature_skew_results_pb2.FeatureSkew]]
_PairOrFeatureSkew = Union[feature_skew_results_pb2.SkewPair,
Tuple[str, feature_skew_results_pb2.FeatureSkew]]
def _get_serialized_feature(feature: tf.train.Feature,
float_round_ndigits: Optional[int]) -> str:
"""Gets serialized feature, rounding floats as specified.
Args:
feature: The feature to serialize.
float_round_ndigits: Number of digits of precision after the decimal point
to which to round float values before serializing the feature.
Returns:
The serialized feature.
"""
kind = feature.WhichOneof("kind")
if (kind == "bytes_list" or kind == "int64_list"):
return str(feature.SerializePartialToString(deterministic=True))
elif kind == "float_list":
if float_round_ndigits is None:
return str(feature.SerializePartialToString(deterministic=True))
else:
rounded_feature = tf.train.Feature()
for value in feature.float_list.value:
rounded_feature.float_list.value.append(
round(value, float_round_ndigits))
return str(rounded_feature.SerializePartialToString(deterministic=True))
else:
raise ValueError("Unknown feature type detected: %s" % kind)
def _compute_skew_for_features(
base_feature: tf.train.Feature, test_feature: tf.train.Feature,
float_round_ndigits: Optional[int],
feature_name: str) -> feature_skew_results_pb2.FeatureSkew:
"""Computes feature skew for a pair of baseline and test features.
Args:
base_feature: The feature to compare from the baseline example.
test_feature: The feature to compare from the test example.
float_round_ndigits: Number of digits precision after the decimal point to
which to round float values before comparison.
feature_name: The name of the feature for which to compute skew between the
examples.
Returns:
A FeatureSkew proto containing information about skew for the specified
feature.
"""
skew_results = feature_skew_results_pb2.FeatureSkew()
skew_results.feature_name = feature_name
if not _empty_or_null(base_feature) and not _empty_or_null(test_feature):
skew_results.base_count = 1
skew_results.test_count = 1
if (farmhash.fingerprint64(
_get_serialized_feature(base_feature,
float_round_ndigits)) == farmhash.fingerprint64(
_get_serialized_feature(
test_feature, float_round_ndigits))):
skew_results.match_count = 1
else:
skew_results.mismatch_count = 1
elif not _empty_or_null(base_feature):
skew_results.base_count = 1
skew_results.base_only = 1
elif not _empty_or_null(test_feature):
skew_results.test_count = 1
skew_results.test_only = 1
elif (test_feature is None) == (base_feature is None):
# Both features are None, or present with zero values.
skew_results.match_count = 1
return skew_results
def _compute_skew_for_examples(
base_example: tf.train.Example, test_example: tf.train.Example,
features_to_ignore: List[tf.train.Feature],
float_round_ndigits: Optional[int]) -> Tuple[_PerFeatureSkew, bool]:
"""Computes feature skew for a pair of baseline and test examples.
Args:
base_example: The baseline example to compare.
test_example: The test example to compare.
features_to_ignore: The features not to compare.
float_round_ndigits: Number of digits precision after the decimal point to
which to round float values before comparison.
Returns:
A tuple containing a list of the skew information for each feature
and a boolean indicating whether skew was found in any feature, in which
case the examples are considered skewed.
"""
all_feature_names = set()
all_feature_names.update(base_example.features.feature.keys())
all_feature_names.update(test_example.features.feature.keys())
feature_names = all_feature_names.difference(set(features_to_ignore))
result = list()
is_skewed = False
for name in feature_names:
base_feature = base_example.features.feature.get(name)
test_feature = test_example.features.feature.get(name)
skew = _compute_skew_for_features(base_feature, test_feature,
float_round_ndigits, name)
if skew.match_count == 0:
# If any features have a mismatch or are found only in the baseline or
# test example, the examples are considered skewed.
is_skewed = True
result.append((name, skew))
return result, is_skewed
def _merge_feature_skew_results(
skew_results: Iterable[feature_skew_results_pb2.FeatureSkew]
) -> feature_skew_results_pb2.FeatureSkew:
"""Merges multiple FeatureSkew protos into a single FeatureSkew proto.
Args:
skew_results: An iterable of FeatureSkew protos.
Returns:
A FeatureSkew proto containing the result of merging the inputs.
"""
result = feature_skew_results_pb2.FeatureSkew()
for skew_result in skew_results:
if not result.feature_name:
result.feature_name = skew_result.feature_name
elif result.feature_name != skew_result.feature_name:
raise ValueError("Attempting to merge skew results with different names.")
result.base_count += skew_result.base_count
result.test_count += skew_result.test_count
result.match_count += skew_result.match_count
result.base_only += skew_result.base_only
result.test_only += skew_result.test_only
result.mismatch_count += skew_result.mismatch_count
result.diff_count = (
result.base_only + result.test_only + result.mismatch_count)
return result
def _construct_skew_pair(
per_feature_skew: List[Tuple[str, feature_skew_results_pb2.FeatureSkew]],
base_example: tf.train.Example,
test_example: tf.train.Example) -> feature_skew_results_pb2.SkewPair:
"""Constructs a SkewPair from baseline and test examples.
Args:
per_feature_skew: Skew results for each feature in the input examples.
base_example: The baseline example to include.
test_example: The test example to include.
Returns:
A SkewPair containing examples that exhibit some skew.
"""
skew_pair = feature_skew_results_pb2.SkewPair()
skew_pair.base = base_example.SerializeToString()
skew_pair.test = test_example.SerializeToString()
for feature_name, skew_result in per_feature_skew:
if skew_result.match_count == 1:
skew_pair.matched_features.append(feature_name)
elif skew_result.base_only == 1:
skew_pair.base_only_features.append(feature_name)
elif skew_result.test_only == 1:
skew_pair.test_only_features.append(feature_name)
elif skew_result.mismatch_count == 1:
skew_pair.mismatched_features.append(feature_name)
return skew_pair
def _empty_or_null(feature: Optional[tf.train.Feature]) -> bool:
"""True if feature is None or holds no values."""
if feature is None:
return True
if len(feature.bytes_list.value) + len(feature.int64_list.value) + len(
feature.float_list.value) == 0:
return True
return False
class _ExtractIdentifiers(beam.DoFn):
"""DoFn that extracts a unique fingerprint for each example.
This class computes fingerprints by combining the identifier features.
"""
def __init__(self, identifier_features: List[types.FeatureName],
float_round_ndigits: Optional[int]) -> None:
"""Initializes _ExtractIdentifiers.
Args:
identifier_features: The names of the features to use to compute a
fingerprint for the example.
float_round_ndigits: Number of digits precision after the decimal point to
which to round float values before generating the fingerprint.
"""
self._identifier_features = sorted(identifier_features)
self._float_round_ndigits = float_round_ndigits
def process(self, example: tf.train.Example):
serialized_feature_values = []
for identifier_feature in self._identifier_features:
feature = example.features.feature.get(identifier_feature)
if _empty_or_null(feature):
_EXAMPLES_WITH_MISSING_IDENTIFIER_COUNTER.inc()
yield beam.pvalue.TaggedOutput(_MISSING_IDS_KEY, 1)
return
else:
serialized_feature_values.append(
_get_serialized_feature(feature, self._float_round_ndigits))
keyed_example = (str(
farmhash.fingerprint64("".join(serialized_feature_values))), example)
yield beam.pvalue.TaggedOutput(_KEYED_EXAMPLE_KEY, keyed_example)
class ConfusionConfig(object):
"""Configures confusion analysis."""
def __init__(self, name: types.FeatureName):
self.name = name
_ConfusionFeatureValue = bytes
_MISSING_VALUE_PLACEHOLDER = b"__MISSING_VALUE__"
def _get_confusion_feature_value(
ex: tf.train.Example,
name: types.FeatureName) -> Optional[_ConfusionFeatureValue]:
"""Returns a value for a named feature for confusion analysis."""
f = ex.features.feature.get(name, None)
if f is None:
return _MISSING_VALUE_PLACEHOLDER
if f.int64_list.value:
raise ValueError("int64 features unsupported for confusion analysis.")
if f.float_list.value:
raise ValueError("float features unsupported for confusion analysis.")
if len(f.bytes_list.value) > 1:
raise ValueError("multivalent features unsupported for confusion analysis.")
if not f.bytes_list.value:
return _MISSING_VALUE_PLACEHOLDER
return f.bytes_list.value[0]
def _yield_confusion_pairs(
ex_base: tf.train.Example, ex_test: tf.train.Example,
configs: List[ConfusionConfig]
) -> Iterator[Tuple[_ConfusionFeatureValue, _ConfusionFeatureValue,
types.FeatureName]]:
"""Yield base/test value pairs from a matching pair of examples."""
for config in configs:
base_val = _get_confusion_feature_value(ex_base, config.name)
test_val = _get_confusion_feature_value(ex_test, config.name)
if base_val is not None and test_val is not None:
yield base_val, test_val, config.name
def _confusion_count_to_proto(
values_count: Tuple[Tuple[_ConfusionFeatureValue, _ConfusionFeatureValue,
types.FeatureName], int]
) -> feature_skew_results_pb2.ConfusionCount:
"""Convert a confusion count tuple and count to string."""
(base_val, test_val, feature_name), count = values_count
cc = feature_skew_results_pb2.ConfusionCount(
feature_name=feature_name, count=count)
cc.base.bytes_value = base_val
cc.test.bytes_value = test_val
return cc
def _make_match_stats_counter(base_with_id_count=0,
test_with_id_count=0,
id_count=0,
missing_base_count=0,
missing_test_count=0,
pairs_count=0,
duplicate_id_count=0,
ids_missing_in_base_count=0,
ids_missing_in_test_count=0) -> np.ndarray:
return np.array([
base_with_id_count, test_with_id_count, id_count, missing_base_count,
missing_test_count, pairs_count, duplicate_id_count,
ids_missing_in_base_count, ids_missing_in_test_count
],
dtype=np.int64)
class _MergeMatchStatsFn(beam.CombineFn):
"""CombineFn to generate MatchStats."""
def create_accumulator(self):
return _make_match_stats_counter()
def add_input(self, mutable_accumulator: np.ndarray,
element: np.ndarray) -> np.ndarray:
mutable_accumulator += element
return mutable_accumulator
def merge_accumulators(self,
accumulators: Iterable[np.ndarray]) -> np.ndarray:
it = iter(accumulators)
acc = next(it)
for a in it:
acc += a
return acc
def extract_output(
self, accumulator: np.ndarray) -> feature_skew_results_pb2.MatchStats:
return feature_skew_results_pb2.MatchStats(
base_with_id_count=accumulator[0],
test_with_id_count=accumulator[1],
identifiers_count=accumulator[2],
ids_missing_in_base_count=accumulator[3],
ids_missing_in_test_count=accumulator[4],
matching_pairs_count=accumulator[5],
duplicate_id_count=accumulator[6],
base_missing_id_count=accumulator[7],
test_missing_id_count=accumulator[8])
class _ComputeSkew(beam.DoFn):
"""DoFn that computes skew for each pair of examples."""
def __init__(self, features_to_ignore: List[tf.train.Feature],
float_round_ndigits: Optional[int], allow_duplicate_identifiers,
confusion_configs: List[ConfusionConfig]) -> None:
"""Initializes _ComputeSkew.
Args:
features_to_ignore: Names of features that are ignored in skew detection.
float_round_ndigits: Number of digits precision after the decimal point to
which to round float values before detecting skew.
allow_duplicate_identifiers: If set, skew detection will be done on
examples for which there are duplicate identifier feature values. In
this case, the counts in the FeatureSkew result are based on each
baseline-test example pair analyzed. Examples with given identifier
feature values must all fit in memory.
confusion_configs: Optional list of ConfusionConfig objects describing
per-feature config for value confusion analysis.
"""
self._features_to_ignore = features_to_ignore
self._float_round_ndigits = float_round_ndigits
self._allow_duplicate_identifiers = allow_duplicate_identifiers
self._skipped_duplicate_identifiers_counter = beam.metrics.Metrics.counter(
constants.METRICS_NAMESPACE, "examplediff_skip_dupe_id")
self._ids_counter = beam.metrics.Metrics.counter(
constants.METRICS_NAMESPACE, "examplediff_ids_counter")
self._pairs_counter = beam.metrics.Metrics.counter(
constants.METRICS_NAMESPACE, "examplediff_pairs_counter")
self._confusion_configs = confusion_configs
def process(
self, element: Tuple[str, Dict[str, Iterable[Any]]]
) -> Iterable[_PairOrFeatureSkew]:
(_, examples) = element
base_examples = list(examples.get(_BASELINE_KEY))
test_examples = list(examples.get(_TEST_KEY))
match_stats = _make_match_stats_counter(
len(base_examples),
len(test_examples),
1,
0 if base_examples else 1,
0 if test_examples else 1,
len(base_examples) * len(test_examples),
1 if len(base_examples) > 1 or len(test_examples) > 1 else 0,
)
yield beam.pvalue.TaggedOutput(MATCH_STATS_KEY, match_stats)
self._ids_counter.inc(1)
self._pairs_counter.inc(len(base_examples) * len(test_examples))
if not self._allow_duplicate_identifiers:
if len(base_examples) > 1 or len(test_examples) > 1:
self._skipped_duplicate_identifiers_counter.inc(1)
return
if base_examples and test_examples:
for base_example in base_examples:
for test_example in test_examples:
result, is_skewed = _compute_skew_for_examples(
base_example, test_example, self._features_to_ignore,
self._float_round_ndigits)
if is_skewed:
skew_pair = _construct_skew_pair(result, base_example,
test_example)
yield beam.pvalue.TaggedOutput(SKEW_PAIRS_KEY, skew_pair)
for each in result:
yield beam.pvalue.TaggedOutput(SKEW_RESULTS_KEY, each)
if self._confusion_configs is not None:
for pair in _yield_confusion_pairs(base_example, test_example,
self._confusion_configs):
yield beam.pvalue.TaggedOutput(CONFUSION_KEY, pair)
def _extract_compute_skew_result(
results: beam.pvalue.DoOutputsTuple
) -> Tuple[beam.PCollection[Tuple[str, feature_skew_results_pb2.FeatureSkew]],
beam.PCollection[feature_skew_results_pb2.SkewPair],
beam.PCollection[np.ndarray], Optional[beam.PCollection[Tuple[
_ConfusionFeatureValue, _ConfusionFeatureValue, str]]]]:
"""Extracts results of _ComputeSkew and fixes type hints."""
# Fix output type hints.
# TODO(b/211806179): Revert this hack.
results_skew_results = (
results[SKEW_RESULTS_KEY]
| "FixSkewResultsTypeHints" >> beam.Map(lambda x: x).with_output_types(
Tuple[str, feature_skew_results_pb2.FeatureSkew]))
results_skew_pairs = (
results[SKEW_PAIRS_KEY]
| "FixSkewPairsTypeHints" >> beam.Map(lambda x: x).with_output_types(
feature_skew_results_pb2.SkewPair))
results_match_stats = (
results[MATCH_STATS_KEY]
| "FixMatchStatsTypeHints" >> beam.Map(lambda x: x).with_output_types(
np.ndarray))
try:
results_confusion_tuples = (
results[CONFUSION_KEY]
| "FixConfusionTypeHints" >> beam.Map(lambda x: x).with_output_types(
Tuple[_ConfusionFeatureValue, _ConfusionFeatureValue, str]))
except ValueError:
results_confusion_tuples = None
return (results_skew_results, results_skew_pairs, results_match_stats,
results_confusion_tuples)
def _extract_extract_identifiers_result(
results_base: beam.pvalue.DoOutputsTuple,
results_test: beam.pvalue.DoOutputsTuple
) -> Tuple[beam.PCollection[Tuple[str, tf.train.Example]],
beam.PCollection[np.ndarray], beam.PCollection[Tuple[
str, tf.train.Example]], beam.PCollection[np.ndarray]]:
"""Extracts results of _ExtractIdentifiers and fixes type hints."""
keyed_base_examples = (
results_base[_KEYED_EXAMPLE_KEY] | "FixKeyedBaseType" >>
beam.Map(lambda x: x).with_output_types(Tuple[str, tf.train.Example]))
missing_id_base_examples = (
results_base[_MISSING_IDS_KEY]
| "BaseMissingCountsToMatchCounter" >>
beam.Map(lambda x: _make_match_stats_counter(ids_missing_in_base_count=x))
)
keyed_test_examples = (
results_test[_KEYED_EXAMPLE_KEY] | "FixKeyedTestType" >>
beam.Map(lambda x: x).with_output_types(Tuple[str, tf.train.Example]))
missing_id_test_examples = (
results_test[_MISSING_IDS_KEY]
| "TestMissingCountsToMatchCounter" >>
beam.Map(lambda x: _make_match_stats_counter(ids_missing_in_test_count=x))
)
return (keyed_base_examples, missing_id_base_examples, keyed_test_examples,
missing_id_test_examples)
class DetectFeatureSkewImpl(beam.PTransform):
"""Identifies feature skew in baseline and test examples.
This PTransform returns a dict of PCollections containing:
SKEW_RESULTS_KEY: Aggregated skew statistics (containing, e.g., mismatch
count, baseline only, test only) for each feature; and
SKEW_PAIRS_KEY: A sample of skewed example pairs (if sample_size is > 0).
MATCH_STATS: A PCollection containing a single MatchStats proto.
CONFUSION_KEY: (if configured) counts of paired feature values.
"""
def __init__(
self,
identifier_features: List[types.FeatureName],
features_to_ignore: Optional[List[types.FeatureName]] = None,
sample_size: int = 0,
float_round_ndigits: Optional[int] = None,
allow_duplicate_identifiers: bool = False,
confusion_configs: Optional[List[ConfusionConfig]] = None) -> None:
"""Initializes DetectFeatureSkewImpl.
Args:
identifier_features: The names of the features to use to identify an
example.
features_to_ignore: The names of the features for which skew detection is
not done.
sample_size: Size of the sample of baseline-test example pairs that
exhibit skew to include in the skew results.
float_round_ndigits: Number of digits of precision after the decimal point
to which to round float values before detecting skew.
allow_duplicate_identifiers: If set, skew detection will be done on
examples for which there are duplicate identifier feature values. In
this case, the counts in the FeatureSkew result are based on each
baseline-test example pair analyzed. Examples with given identifier
feature values must all fit in memory.
confusion_configs: Optional list of ConfusionConfig objects describing
per-feature config for value confusion analysis. If provided, the result
will contain a value keyed under CONFUSION_KEY containing a PCollection
of ConfusionCount protos.
"""
if not identifier_features:
raise ValueError("At least one feature name must be specified in "
"identifier_features.")
self._identifier_features = identifier_features
self._sample_size = sample_size
self._float_round_ndigits = float_round_ndigits
if features_to_ignore is not None:
self._features_to_ignore = features_to_ignore + identifier_features
else:
self._features_to_ignore = identifier_features
self._allow_duplicate_identifiers = allow_duplicate_identifiers
self._confusion_configs = ([] if confusion_configs is None else
confusion_configs)
def expand(
self, pcollections: Tuple[beam.pvalue.PCollection,
beam.pvalue.PCollection]
) -> Dict[str, beam.pvalue.PCollection]:
base_examples, test_examples = pcollections
# Extract keyed base examples and counts of missing keys.
keyed_base_examples_result = (
base_examples | "ExtractBaseIdentifiers" >> beam.ParDo(
_ExtractIdentifiers(self._identifier_features,
self._float_round_ndigits)).with_outputs(
_KEYED_EXAMPLE_KEY, _MISSING_IDS_KEY))
# Extract keyed test examples and counts of missing keys.
keyed_test_examples_result = (
test_examples | "ExtractTestIdentifiers" >> beam.ParDo(
_ExtractIdentifiers(self._identifier_features,
self._float_round_ndigits)).with_outputs(
_KEYED_EXAMPLE_KEY, _MISSING_IDS_KEY))
(keyed_base_examples, missing_id_base_examples, keyed_test_examples,
missing_id_test_examples) = _extract_extract_identifiers_result(
keyed_base_examples_result, keyed_test_examples_result)
outputs = [SKEW_RESULTS_KEY, SKEW_PAIRS_KEY, MATCH_STATS_KEY]
if self._confusion_configs:
outputs.append(CONFUSION_KEY)
results = (
{
"base": keyed_base_examples,
"test": keyed_test_examples
} | "JoinExamples" >> beam.CoGroupByKey()
| "ComputeSkew" >> beam.ParDo(
_ComputeSkew(self._features_to_ignore, self._float_round_ndigits,
self._allow_duplicate_identifiers,
self._confusion_configs)).with_outputs(*outputs))
(results_skew_results, results_skew_pairs, results_match_stats,
results_confusion_tuples) = _extract_compute_skew_result(results)
outputs = {}
# Merge skew results.
skew_results = (
results_skew_results
| "MergeSkewResultsPerFeature" >> # pytype: disable=attribute-error
beam.CombinePerKey(_merge_feature_skew_results)
| "DropKeys" >> beam.Values())
outputs[SKEW_RESULTS_KEY] = skew_results
# Merge match stats.
match_stats = (
[
results_match_stats, missing_id_test_examples,
missing_id_base_examples
]
| "FlattenMatchStats" >> beam.Flatten()
| "MergeMatchStats" >> beam.CombineGlobally(_MergeMatchStatsFn()))
outputs[MATCH_STATS_KEY] = match_stats
# Sample skew pairs.
skew_pairs = (
results_skew_pairs | "SampleSkewPairs" >> # pytype: disable=attribute-error
beam.combiners.Sample.FixedSizeGlobally(self._sample_size)
# Sampling results in a pcollection with a single element consisting of
# a list of the samples. Convert this to a pcollection of samples.
| "Flatten" >> beam.FlatMap(lambda x: x))
outputs[SKEW_PAIRS_KEY] = skew_pairs
if results_confusion_tuples is not None:
confusion_counts = (
results_confusion_tuples
| "CountConfusion" >> beam.combiners.Count.PerElement()
| "MakeConfusionProto" >> beam.Map(_confusion_count_to_proto))
outputs[CONFUSION_KEY] = confusion_counts
return outputs
def skew_results_sink(output_path_prefix: str) -> beam.PTransform:
"""Record based PSink for FeatureSkew protos."""
return artifacts_io_impl.feature_skew_sink(
output_path_prefix,
feature_skew_results_pb2.FeatureSkew)
def skew_pair_sink(output_path_prefix: str) -> beam.PTransform:
"""Record based PSink for SkewPair protos."""
return artifacts_io_impl.feature_skew_sink(
output_path_prefix,
feature_skew_results_pb2.SkewPair)
def confusion_count_sink(output_path_prefix: str) -> beam.PTransform:
"""Record based PSink for ConfusionCount protos."""
return artifacts_io_impl.feature_skew_sink(
output_path_prefix,
feature_skew_results_pb2.ConfusionCount)
def match_stats_sink(output_path_prefix: str) -> beam.PTransform:
"""Record based PSink for MatchStats protos."""
return artifacts_io_impl.feature_skew_sink(
output_path_prefix,
feature_skew_results_pb2.MatchStats)
def skew_results_iterator(
input_pattern_prefix) -> Iterator[feature_skew_results_pb2.FeatureSkew]:
"""Reads records written by skew_results_sink."""
return artifacts_io_impl.default_record_reader(
input_pattern_prefix + "*-of-*", feature_skew_results_pb2.FeatureSkew)
def skew_pair_iterator(
input_pattern_prefix) -> Iterator[feature_skew_results_pb2.SkewPair]:
"""Reads records written by skew_pair_sink."""
return artifacts_io_impl.default_record_reader(
input_pattern_prefix + "*-of-*", feature_skew_results_pb2.SkewPair)
def match_stats_iterator(
input_pattern_prefix) -> Iterator[feature_skew_results_pb2.MatchStats]:
"""Reads records written by match_stats_sink."""
return artifacts_io_impl.default_record_reader(
input_pattern_prefix + "*-of-*", feature_skew_results_pb2.MatchStats)
def confusion_count_iterator(
input_pattern_prefix) -> Iterator[feature_skew_results_pb2.ConfusionCount]:
"""Reads records written by confusion_count_sink."""
return artifacts_io_impl.default_record_reader(
input_pattern_prefix + "*-of-*", feature_skew_results_pb2.ConfusionCount)
|
cd98995f07a9578619cba93d4fdd0ff7a5404f48
|
01b9d7d2da922589906d755e4c5d63799281d4c8
|
/porcupine/pluginloader.py
|
deaccf938d42497f6b8cf8ef3da1c36d7647c2d4
|
[
"MIT"
] |
permissive
|
Akuli/porcupine
|
a2c3c0b3263a8b16649d5007f7c1d96cbec7dfc0
|
2f705481ef7bd9ed4516743ad3fd6469c5f2b276
|
refs/heads/main
| 2023-08-25T00:09:19.294259
| 2023-08-05T10:28:03
| 2023-08-05T10:28:03
| 81,675,927
| 131
| 59
|
MIT
| 2023-08-11T21:46:05
| 2017-02-11T19:17:09
|
Python
|
UTF-8
|
Python
| false
| false
| 12,798
|
py
|
pluginloader.py
|
"""Loads plugins from ``porcupine.plugins``."""
# many things are wrapped in try/except here to allow writing Porcupine
# plugins using Porcupine, so Porcupine must run if the plugins are
# broken
from __future__ import annotations
import argparse
import dataclasses
import enum
import importlib.machinery
import logging
import pkgutil
import random
import time
import traceback
from typing import Any, Iterable, List, Sequence
import toposort
from porcupine import get_main_window
from porcupine.plugins import __path__ as plugin_paths
from porcupine.settings import global_settings
log = logging.getLogger(__name__)
class Status(enum.Enum):
"""
This :mod:`enum` represents the status of the plugin in the
currently running Porcupine process.
.. data:: LOADING
The plugin hasn't been set up successfully yet, but no errors
preventing the setup have occurred.
.. data:: ACTIVE
The plugin was imported and its ``setup()`` function was called successfully.
.. data:: DISABLED_BY_SETTINGS
The plugin wasn't loaded because it's in the ``disabled_plugins``
setting. See :mod:`porcupine.settings`.
.. data:: DISABLED_ON_COMMAND_LINE
The plugin wasn't loaded because it was listed in a
``--without-plugins`` argument given to Porcupine.
.. data:: IMPORT_FAILED
Importing the plugin raised an error.
.. data:: SETUP_FAILED
The plugin was imported successfully, but its ``setup()`` function
raised an exception or logged an error.
In a plugin named ``foo``, any message logged with severity ``ERROR``
or ``CRITICAL`` to the logger named ``porcupine.plugins.foo`` counts as
logging an error. Therefore you can do this::
import logging
log = logging.getLogger(__name__) # __name__ == "porcupine.plugins.foo"
def setup() -> None:
if bar_is_installed:
...
else:
log.error("bar is not installed")
When bar is not installed, this plugin will show a one-line error
message in the plugin manager and the terminal. If an exception is
raised, the full traceback is shown instead.
.. data:: CIRCULAR_DEPENDENCY_ERROR
Plugins with this status were imported, but their ``setup_before`` and
``setup_after`` lists make it impossible to determine the correct order
for calling their ``setup()`` function. For example, if plugin *A*
should be set up before *B*, *B* should be set up before *C* and *C*
should be set up before *A*, then *A*, *B* and *C* will all fail with
``CIRCULAR_DEPENDENCY_ERROR``.
"""
LOADING = enum.auto()
ACTIVE = enum.auto()
DISABLED_BY_SETTINGS = enum.auto()
DISABLED_ON_COMMAND_LINE = enum.auto()
IMPORT_FAILED = enum.auto()
SETUP_FAILED = enum.auto()
CIRCULAR_DEPENDENCY_ERROR = enum.auto()
@dataclasses.dataclass(eq=False)
class PluginInfo:
"""
This :mod:`dataclass <dataclasses>` represents a plugin.
It's usually better to use ``info.setup_before``
instead of accessing ``info.module.setup_before`` directly.
Not all plugins define a ``setup_before`` variable, and if it's not present,
then ``info.setup_before`` is an empty set.
This also applies to ``setup_after``.
The value of *error* depends on *status*:
* If *status* is ``LOADING``, ``ACTIVE``, ``DISABLED_BY_SETTINGS`` or
``DISABLED_ON_COMMAND_LINE``, then *error* is ``None``.
* If *status* is ``IMPORT_FAILED`` or ``SETUP_FAILED``, then *error*
is a Python error message, starting with
``Traceback (most recent call last):``.
* If *status* is ``CIRCULAR_DEPENDENCY_ERROR``, then *error* is a
user-readable one-line message.
"""
name: str
came_with_porcupine: bool
status: Status
module: Any | None # you have to check for None, otherwise mypy won't complain
error: str | None
_mutable_plugin_infos: list[PluginInfo] = []
plugin_infos: Sequence[PluginInfo] = _mutable_plugin_infos # changing content is mypy error
_dependencies: dict[PluginInfo, set[PluginInfo]] = {}
def _run_setup_argument_parser_function(info: PluginInfo, parser: argparse.ArgumentParser) -> None:
assert info.status == Status.LOADING
assert info.module is not None
if hasattr(info.module, "setup_argument_parser"):
start = time.perf_counter()
try:
info.module.setup_argument_parser(parser)
except Exception:
log.exception(f"{info.name}.setup_argument_parser() doesn't work")
info.status = Status.SETUP_FAILED
info.error = traceback.format_exc()
duration = time.perf_counter() - start
log.debug("ran %s.setup_argument_parser() in %.3f milliseconds", info.name, duration * 1000)
def _import_plugin(info: PluginInfo) -> None:
assert info.status == Status.LOADING
assert info.module is None
log.debug(f"trying to import porcupine.plugins.{info.name}")
start = time.perf_counter()
try:
info.module = importlib.import_module(f"porcupine.plugins.{info.name}")
setup_before = set(getattr(info.module, "setup_before", []))
setup_after = set(getattr(info.module, "setup_after", []))
except Exception:
log.exception(f"can't import porcupine.plugins.{info.name}")
info.status = Status.IMPORT_FAILED
info.error = traceback.format_exc()
return
for dep_info in plugin_infos:
if dep_info.name in setup_after:
_dependencies[info].add(dep_info)
if dep_info.name in setup_before:
_dependencies[dep_info].add(info)
duration = time.perf_counter() - start
log.debug("imported porcupine.plugins.%s in %.3f milliseconds", info.name, duration * 1000)
# Remember to generate <<PluginsLoaded>> when this succeeds
def _run_setup_and_set_status(info: PluginInfo) -> None:
assert info.status == Status.LOADING
assert info.module is not None
error_log: list[logging.LogRecord] = []
logger = logging.getLogger(f"porcupine.plugins.{info.name}")
handler = logging.Handler()
handler.setLevel(logging.ERROR)
handler.emit = error_log.append # type: ignore
logger.addHandler(handler)
if hasattr(info.module, "setup"):
start = time.perf_counter()
try:
log.debug(f"calling porcupine.plugins.{info.name}.setup()")
info.module.setup()
except Exception:
log.exception(f"{info.name}.setup() doesn't work")
info.status = Status.SETUP_FAILED
info.error = traceback.format_exc()
else:
if error_log:
info.status = Status.SETUP_FAILED
info.error = "".join(
f"{record.levelname}: {record.message}\n" for record in error_log
)
else:
info.status = Status.ACTIVE
duration = time.perf_counter() - start
logger.debug("ran %s.setup() in %.3f milliseconds", info.name, duration * 1000)
else:
info.status = Status.SETUP_FAILED
info.error = (
"There is no setup() function. Make sure to include a setup function into your"
" plugin.\nTo learn more about Porcupine's plugin API, visit"
" https://akuli.github.io/porcupine/plugin-intro.html"
)
log.warning(f"Calling {info.name!r} plugin's setup() function failed.\n{info.error}")
logger.removeHandler(handler)
def _did_plugin_come_with_porcupine(finder: object) -> bool:
return isinstance(finder, importlib.machinery.FileFinder) and finder.path == plugin_paths[-1]
# undocumented on purpose, don't use in plugins
def import_plugins(disabled_on_command_line: list[str]) -> None:
assert not _mutable_plugin_infos and not _dependencies
_mutable_plugin_infos.extend(
PluginInfo(
name=name,
came_with_porcupine=_did_plugin_come_with_porcupine(finder),
status=Status.LOADING,
module=None,
error=None,
)
for finder, name, is_pkg in pkgutil.iter_modules(plugin_paths)
if not name.startswith("_")
)
_dependencies.update({info: set() for info in plugin_infos})
for info in _mutable_plugin_infos:
# If it's disabled in settings and on command line, then status is set
# to DISABLED_BY_SETTINGS. This makes more sense for the user of the
# plugin manager dialog.
if info.name in global_settings.get("disabled_plugins", List[str]):
info.status = Status.DISABLED_BY_SETTINGS
continue
if info.name in disabled_on_command_line:
info.status = Status.DISABLED_ON_COMMAND_LINE
continue
_import_plugin(info)
# undocumented on purpose, don't use in plugins
# TODO: document what setup_argument_parser() function in a plugin does
def run_setup_argument_parser_functions(parser: argparse.ArgumentParser) -> None:
for info in plugin_infos:
if info.status == Status.LOADING:
_run_setup_argument_parser_function(info, parser)
# undocumented on purpose, don't use in plugins
def run_setup_functions(shuffle: bool) -> None:
imported_infos = [info for info in plugin_infos if info.status == Status.LOADING]
# the toposort will partially work even if there's a circular
# dependency, the CircularDependencyError is raised after doing
# everything possible (see source code)
loading_order = []
try:
toposort_result: Iterable[Iterable[PluginInfo]] = toposort.toposort(_dependencies)
for infos in toposort_result:
load_list = [info for info in infos if info.status == Status.LOADING]
if shuffle:
# for plugin developers wanting to make sure that the
# dependencies specified in setup_before and setup_after
# are correct
random.shuffle(load_list)
else:
# for consistency in UI (e.g. always same order of menu items)
load_list.sort(key=(lambda info: info.name))
loading_order.extend(load_list)
except toposort.CircularDependencyError as e:
log.exception("circular dependency")
for info in set(imported_infos) - set(loading_order):
info.status = Status.CIRCULAR_DEPENDENCY_ERROR
parts = ", ".join(f"{a} depends on {b}" for a, b in e.data.items())
info.error = f"Circular dependency error: {parts}"
for info in loading_order:
assert info.status == Status.LOADING
_run_setup_and_set_status(info)
get_main_window().event_generate("<<PluginsLoaded>>")
def can_setup_while_running(info: PluginInfo) -> bool:
"""
Returns whether the plugin can be set up now, without having to
restart Porcupine.
"""
if info.status not in {Status.DISABLED_BY_SETTINGS, Status.DISABLED_ON_COMMAND_LINE}:
return False
if info.module is None:
# Importing may give more information about dependencies, needed below
old_status = info.status
info.status = Status.LOADING
_import_plugin(info)
if info.status != Status.LOADING: # error
return False
info.status = old_status
# If a plugin defines setup_argument_parser, it likely wants it to run on
# startup, and now it's too late.
if hasattr(info.module, "setup_argument_parser"):
return False
# Check whether no other active plugin depends on loading after this plugin
setup_preventors = [
other.name
for other, other_must_setup_after_these in _dependencies.items()
if other.status == Status.ACTIVE and info in other_must_setup_after_these
]
if setup_preventors:
log.info(
f"can't setup {info.name} now because it must be done before setting up the following"
" plugins, which are already active: " + "\n".join(setup_preventors)
)
return False
return True
def setup_while_running(info: PluginInfo) -> None:
"""Run the ``setup_argument_parser()`` and ``setup()`` functions now.
Before calling this function, make sure that
:func:`can_setup_while_running` returns ``True``.
"""
info.status = Status.LOADING
dummy_parser = argparse.ArgumentParser()
_run_setup_argument_parser_function(info, dummy_parser)
if info.status != Status.LOADING: # error
return
_run_setup_and_set_status(info)
assert info.status != Status.LOADING
if info.status == Status.ACTIVE:
get_main_window().event_generate("<<PluginsLoaded>>")
|
9934dda70020d385f9cb8768a3835314c0f4174c
|
2f1e3f24f2798507c9eb73185a955c9bfb735140
|
/libreco/feature/multi_sparse.py
|
f411b2197aff9d8d1d9a72ffd68f946dd20c3c28
|
[
"MIT"
] |
permissive
|
massquantity/LibRecommender
|
e4f55b06b2208c794a3f97f7ff89413fa9beaffa
|
8d5fbe9c177f5b91c2b6f19a155a83320dd0e20c
|
refs/heads/master
| 2023-08-31T23:48:37.634663
| 2023-08-20T11:58:15
| 2023-08-20T11:58:15
| 174,493,761
| 251
| 55
|
MIT
| 2023-08-20T11:58:16
| 2019-03-08T07:58:27
|
Python
|
UTF-8
|
Python
| false
| false
| 5,187
|
py
|
multi_sparse.py
|
import itertools
import numpy as np
def get_multi_sparse_indices_matrix(
data, multi_sparse_col, multi_sparse_unique, is_train, is_ordered
):
"""Get all multi_sparse indices for all samples in data.
The function will consider each sub-feature of multi_sparse columns.
Parameters
----------
data : pandas.DataFrame
The original data.
multi_sparse_col : list of str
All multi_sparse feature names.
multi_sparse_unique : dict of {str : numpy.ndarray}
Unique values of all multi_sparse features. Each sub-feature will use the first
feature as representative.
is_train : bool
Whether the data is training data.
is_ordered : bool
Whether the `unique` values are sorted.
Returns
-------
multi_sparse_indices : numpy.ndarray
Multi_sparse indices of all multi_sparse features.
"""
from .sparse import column_sparse_indices
n_samples = len(data)
n_features = len(list(itertools.chain.from_iterable(multi_sparse_col)))
multi_sparse_indices = np.zeros((n_samples, n_features), dtype=np.int32)
i = 0
while i < n_features:
for field in multi_sparse_col:
unique_values = multi_sparse_unique[field[0]]
for col in field:
col_values = data[col].to_numpy()
multi_sparse_indices[:, i] = column_sparse_indices(
col_values, unique_values, is_train, is_ordered, multi_sparse=True
)
i += 1
return multi_sparse_indices
def get_multi_sparse_offset(multi_sparse_col, multi_sparse_unique):
unique_values = [
len(multi_sparse_unique[field[0]]) + 1 for field in multi_sparse_col
]
field_offset = np.cumsum(np.array([0, *unique_values])).tolist()[:-1]
offset = []
# each sub-feature will use same offset
for i, field in enumerate(multi_sparse_col):
offset.extend([field_offset[i]] * len(field))
return np.array(offset)
def multi_sparse_oov(multi_sparse_col, multi_sparse_unique, extend=True):
unique_values = [
len(multi_sparse_unique[field[0]]) + 1 for field in multi_sparse_col
]
field_oov = np.cumsum(unique_values) - 1
if extend:
oov = []
for i, field in enumerate(multi_sparse_col):
oov.extend([field_oov[i]] * len(field))
return np.array(oov)
else:
return field_oov
def get_multi_sparse_info(
all_sparse_cols,
sparse_col,
multi_sparse_col,
sparse_unique,
multi_sparse_unique,
pad_val,
):
from .sparse import get_last_offset
from ..data import MultiSparseInfo
if not multi_sparse_col:
return
field_offset = [all_sparse_cols.index(field[0]) for field in multi_sparse_col]
field_length = [len(col) for col in multi_sparse_col]
feat_oov = multi_sparse_oov(multi_sparse_col, multi_sparse_unique, extend=False)
if sparse_col:
sparse_last_offset = get_last_offset(sparse_col, sparse_unique)
feat_oov += sparse_last_offset
return MultiSparseInfo(field_offset, field_length, feat_oov, pad_val)
def multi_sparse_col_map(multi_sparse_col):
"""Map sub-features in multi-sparse features to main-features.
Each multi-sparse feature will use its first sub-feature as representative.
This function maps the rest sub-features to the representative sub-feature.
Parameters
----------
multi_sparse_col : list of [list of str]
All multi_sparse feature names.
Returns
-------
dict of {str : str}
"""
multi_sparse_map = dict()
for field in multi_sparse_col:
if len(field) > 1:
for col in field[1:]:
multi_sparse_map[col] = field[0]
return multi_sparse_map
def recover_sparse_cols(data_info):
"""Get the original nested multi_sparse columns from data_info."""
total_sparse_cols = data_info.sparse_col.name
sparse_cols, multi_sparse_cols = None, None
if data_info.sparse_unique_vals:
sparse_cols = [
col for col in total_sparse_cols if col in data_info.sparse_unique_vals
]
if data_info.multi_sparse_unique_vals:
multi_sparse_cols = []
i, field = 0, 0
while i < len(total_sparse_cols):
col = total_sparse_cols[i]
if col in data_info.multi_sparse_unique_vals:
field_len = data_info.multi_sparse_combine_info.field_len[field]
multi_sparse_cols.append(
[total_sparse_cols[k] for k in range(i, i + field_len)]
)
i += field_len
field += 1
else:
i += 1
return sparse_cols, multi_sparse_cols
def true_sparse_field_size(data_info, sparse_field_size, combiner):
"""Get the real sparse field size.
When using multi_sparse_combiner, field size will decrease.
"""
if data_info.multi_sparse_combine_info and combiner in ("sum", "mean", "sqrtn"):
field_length = data_info.multi_sparse_combine_info.field_len
return sparse_field_size - (sum(field_length) - len(field_length))
else:
return sparse_field_size
|
6ab4662b0c7c086493d22c24514f48922bc0193d
|
267ec08897c53cab1c3781ba0b6d061cb4410e84
|
/hypertools/_shared/helpers.py
|
8197881ec92bb94ee566d293d856c38acce82bbe
|
[
"MIT"
] |
permissive
|
ContextLab/hypertools
|
dd0fa798296b7b9b9d5f17a37086465ade6ca6a0
|
564c1d43da447da68ce3d76f51306725291630e0
|
refs/heads/master
| 2023-08-25T07:28:00.791443
| 2022-02-12T02:32:06
| 2022-02-12T02:32:06
| 69,400,415
| 1,768
| 189
|
MIT
| 2023-08-06T22:25:57
| 2016-09-27T21:31:25
|
Python
|
UTF-8
|
Python
| false
| false
| 7,357
|
py
|
helpers.py
|
#!/usr/bin/env python
"""
Helper functions
"""
##PACKAGES##
import functools
import sys
import numpy as np
import copy
from scipy.interpolate import PchipInterpolator as pchip
import seaborn as sns
import itertools
import pandas as pd
from matplotlib.lines import Line2D
np.seterr(divide='ignore', invalid='ignore')
def center(x):
assert type(x) is list, "Input data to center must be list"
x_stacked = np.vstack(x)
return [i - np.mean(x_stacked, 0) for i in x]
def scale(x):
assert type(x) is list, "Input data to scale must be list"
x_stacked = np.vstack(x)
m1 = np.min(x_stacked)
m2 = np.max(x_stacked - m1)
f = lambda x: 2*(np.divide(x - m1, m2)) - 1
return [f(i) for i in x]
def group_by_category(vals):
if any(isinstance(el, list) for el in vals):
vals = list(itertools.chain(*vals))
val_set = list(sorted(set(vals), key=list(vals).index))
return [val_set.index(val) for val in vals]
def vals2colors(vals, cmap='GnBu',res=100):
"""Maps values to colors
Args:
values (list or list of lists) - list of values to map to colors
cmap (str) - color map (default is 'GnBu')
res (int) - resolution of the color map (default: 100)
Returns:
list of rgb tuples
"""
# flatten if list of lists
if any(isinstance(el, list) for el in vals):
vals = list(itertools.chain(*vals))
# get palette from seaborn
palette = np.array(sns.color_palette(cmap, res))
ranks = np.digitize(vals, np.linspace(np.min(vals), np.max(vals)+1, res+1)) - 1
return [tuple(i) for i in palette[ranks, :]]
def vals2bins(vals,res=100):
"""Maps values to bins
Args:
values (list or list of lists) - list of values to map to colors
res (int) - resolution of the color map (default: 100)
Returns:
list of numbers representing bins
"""
# flatten if list of lists
if any(isinstance(el, list) for el in vals):
vals = list(itertools.chain(*vals))
return list(np.digitize(vals, np.linspace(np.min(vals), np.max(vals)+1, res+1)) - 1)
def interp_array(arr,interp_val=10):
x=np.arange(0, len(arr), 1)
xx=np.arange(0, len(arr)-1, 1/interp_val)
q=pchip(x,arr)
return q(xx)
def interp_array_list(arr_list,interp_val=10):
smoothed= [np.zeros(arr_list[0].shape) for item in arr_list]
for idx,arr in enumerate(arr_list):
smoothed[idx] = interp_array(arr,interp_val)
return smoothed
def parse_args(x,args):
args_list = []
for i,item in enumerate(x):
tmp = []
for ii, arg in enumerate(args):
if isinstance(arg, (tuple, list)):
if len(arg) == len(x):
tmp.append(arg[i])
else:
print('Error: arguments must be a list of the same length as x')
sys.exit(1)
else:
tmp.append(arg)
args_list.append(tuple(tmp))
return args_list
def parse_kwargs(x, kwargs):
kwargs_list = []
for i,item in enumerate(x):
tmp = {}
for kwarg in kwargs:
if isinstance(kwargs[kwarg], (tuple, list)):
if len(kwargs[kwarg]) == len(x):
tmp[kwarg]=kwargs[kwarg][i]
else:
tmp[kwarg] = None
else:
tmp[kwarg]=kwargs[kwarg]
kwargs_list.append(tmp)
return kwargs_list
def reshape_data(x, hue, labels):
categories = list(sorted(set(hue), key=list(hue).index))
x_stacked = np.vstack(x)
x_reshaped = [[] for _ in categories]
labels_reshaped = [[] for _ in categories]
if labels is None:
labels = [None]*len(hue)
for idx, (point, label) in enumerate(zip(hue, labels)):
x_reshaped[categories.index(point)].append(x_stacked[idx])
labels_reshaped[categories.index(point)].append(labels[idx])
return [np.vstack(i) for i in x_reshaped], labels_reshaped
def patch_lines(x):
"""
Draw lines between groups
"""
for idx in range(len(x)-1):
x[idx] = np.vstack([x[idx], x[idx+1][0,:]])
return x
def is_line(format_str):
if isinstance(format_str, np.bytes_):
format_str = format_str.decode('utf-8')
markers = list(map(lambda x: str(x), Line2D.markers.keys()))
return (format_str is None) or (all([str(symbol) not in format_str for symbol in markers]))
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def get_type(data):
"""
Checks what the data type is and returns it as a string label
"""
from ..datageometry import DataGeometry
if isinstance(data, list):
if isinstance(data[0], (str, bytes)):
return 'list_str'
elif isinstance(data[0], (int, float)):
return 'list_num'
elif isinstance(data[0], np.ndarray):
return 'list_arr'
else:
raise TypeError('Unsupported data type passed. Supported types: '
'Numpy Array, Pandas DataFrame, String, List of strings'
', List of numbers')
elif isinstance(data, np.ndarray):
if isinstance(data[0][0], (str, bytes)):
return 'arr_str'
else:
return 'arr_num'
elif isinstance(data, pd.DataFrame):
return 'df'
elif isinstance(data, (str, bytes)):
return 'str'
elif isinstance(data, DataGeometry):
return 'geo'
else:
raise TypeError('Unsupported data type passed. Supported types: '
'Numpy Array, Pandas DataFrame, String, List of strings'
', List of numbers')
def convert_text(data):
dtype = get_type(data)
if dtype in ['list_str', 'str']:
data = np.array(data).reshape(-1, 1)
return data
def check_geo(geo):
""" Checks a geo and makes sure the text fields are not binary """
geo = copy.copy(geo)
def fix_item(item):
if isinstance(item, bytes):
return item.decode()
return item
def fix_list(lst):
return [fix_item(i) for i in lst]
if isinstance(geo.reduce, bytes):
geo.reduce = geo.reduce.decode()
for key in geo.kwargs.keys():
if geo.kwargs[key] is not None:
if isinstance(geo.kwargs[key], (list, np.ndarray)):
geo.kwargs[key] = fix_list(geo.kwargs[key])
elif isinstance(geo.kwargs[key], bytes):
geo.kwargs[key] = fix_item(geo.kwargs[key])
return geo
def get_dtype(data):
"""
Checks what the data type is and returns it as a string label
"""
from ..datageometry import DataGeometry
if isinstance(data, list):
return 'list'
elif isinstance(data, np.ndarray):
return 'arr'
elif isinstance(data, pd.DataFrame):
return 'df'
elif isinstance(data, (str, bytes)):
return 'str'
elif isinstance(data, DataGeometry):
return 'geo'
else:
raise TypeError('Unsupported data type passed. Supported types: '
'Numpy Array, Pandas DataFrame, String, List of strings'
', List of numbers')
|
5487070bacf7edf108256956d4aa58128f210eb8
|
af6484154df8451750cd9cbce0f62f549c100b55
|
/models/vision/detection/awsdet/utils/keras/__init__.py
|
2b84a75a3c8c404f00947131fd1f0bfe72e51216
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
aws-samples/deep-learning-models
|
375cd60d45379a290c77dc91142999278d12e8ad
|
0a852badc1004b9ae32a3a1c44bdec1ff9604329
|
refs/heads/master
| 2023-01-06T17:04:26.841548
| 2023-01-04T20:03:37
| 2023-01-04T20:03:37
| 141,620,532
| 141
| 80
|
NOASSERTION
| 2023-01-05T20:31:25
| 2018-07-19T19:07:10
|
Python
|
UTF-8
|
Python
| false
| false
| 314
|
py
|
__init__.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
from .backbone import get_base_model, get_outputs
from .freeze import *
from . import optimizers
__all__ = ['get_base_model', 'get_outputs', 'freeze_model_layers', 'optimizers']
|
e98f84a370437cdd1fa3a886af105ce98da3da24
|
f2c773e7ccdd60caf5a7c062305cfcd14d11beec
|
/AR_Scripts_1.74/MoGraph/AR_MoSelectionMerge.py
|
058ee6f950b91267070c9b1bf44b02d88545d667
|
[] |
no_license
|
aturtur/cinema4d-scripts
|
4ccfbc3403326a79076d9bcf001189cd5427f46a
|
a87fc6c835db5d205f8428cc67ccd30fdd4b4d4b
|
refs/heads/master
| 2023-07-03T13:34:58.735879
| 2023-06-19T09:57:22
| 2023-06-19T09:57:22
| 63,731,563
| 316
| 49
| null | 2022-04-24T02:31:17
| 2016-07-19T22:15:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,461
|
py
|
AR_MoSelectionMerge.py
|
"""
AR_MoSelectionMerge
Author: Arttu Rautio (aturtur)
Website: http://aturtur.com/
Name-US: AR_MoSelectionMerge
Version: 1.0.1
Description-US: Merges selected MoGraph Selection Tag into one tag
Note: If you have nested MoGraph Generator, disable parent generators before running this script
Written for Maxon Cinema 4D R25.010
Python version 3.9.1
To Do:
- Option to keep old tags
Change log:
1.0.1 (29.03.2022) - Support for R25
"""
# Libraries
import c4d
from c4d.modules import mograph as mo
# Functions
def SortTags(items):
msTags = [] # Init list for polygon selection tags
objects = [] # Init list for objects
# Sort
for t in items:
if t.GetType() == 1021338: # MoGraph selection tag
msTags.append(t)
objects.append(t.GetObject().GetGUID())
objects = list(dict.fromkeys(objects)) # Remove duplicates
return msTags, objects
def MergeMoGraphSelectionTags(msTags, objects):
doc = c4d.documents.GetActiveDocument() # Get active Cinema 4D document
nameSet = False # If name is not set
for o in objects:
collectedMsTags = []
for t in msTags:
k = t.GetObject()
if k != None:
if k.GetGUID() == o:
collectedMsTags.append(t)
if len(collectedMsTags) >= 2:
mgSelTag = c4d.BaseTag(1021338) # Initialize MoGraph selection tag
selection = c4d.BaseSelect() # Initialize a base select
nameSet = False # If name is not set
for i in collectedMsTags: # Iterate through tags
if not nameSet: # If name is not set
mgSelTag.SetName(i.GetName()) # Set name
nameSet = True
selection.Merge(mo.GeGetMoDataSelection(i)) # Merge selections
i.GetObject().InsertTag(mgSelTag, i.GetObject().GetLastTag()) # Insert tag to object
doc.AddUndo(c4d.UNDOTYPE_NEWOBJ, mgSelTag) # Record undo for inserting a new tag
mo.GeSetMoDataSelection(mgSelTag, selection) # Set MoGraph selection
mgSelTag.SetBit(c4d.BIT_ACTIVE) # Select tag
for r in msTags:
doc.AddUndo(c4d.UNDOTYPE_DELETEOBJ, r) # Record undo for deleting a tag
r.Remove() # Detele the tag
def main():
doc = c4d.documents.GetActiveDocument() # Get active Cinema 4D document
doc.StartUndo() # Start recording undos
collectedObjects = [] # Collect objects
collectedTags = [] # Collect tags
selection = doc.GetSelection() # Get selected items
for s in selection: # Iterate through selection
if (type(s).__name__ == "BaseObject"):
collectedObjects.append(s)
elif s.GetType() == 1021338:
collectedTags.append(s)
if len(collectedTags) == 0: # If no tags selected
for i, obj in enumerate(collectedObjects):
collectedTags = []
tags = obj.GetTags()
for t in tags:
if t.GetType() == 1021338:
collectedTags.append(t)
msTags, objects = SortTags(collectedTags)
MergeMoGraphSelectionTags(msTags, objects) #Run the merge function
else: # If tags selected
msTags, objects = SortTags(selection)
MergeMoGraphSelectionTags(msTags, objects) #Run the merge function
doc.EndUndo() # Stop recording undos
c4d.EventAdd() # Refresh Cinema 4D
# Execute main()
if __name__=='__main__':
main()
|
c1bc4249ed16b382f90ea085215e3350fc1c2282
|
662117fa36c2745abf7a79e9b4c26ad2a7388c68
|
/perception/hashers/__init__.py
|
00f868b16be771ac0dc1d7383825f52205348420
|
[
"Apache-2.0"
] |
permissive
|
thorn-oss/perception
|
1fb20fc76626c1d22f79e3ffeed49ca351d17e03
|
ef58c1c2c61118d5128b247c14561d544ed09a59
|
refs/heads/main
| 2023-08-04T09:22:09.906837
| 2023-03-21T12:53:23
| 2023-03-21T12:53:23
| 219,032,163
| 155
| 15
|
Apache-2.0
| 2023-07-25T19:39:48
| 2019-11-01T17:28:57
|
Python
|
UTF-8
|
Python
| false
| false
| 642
|
py
|
__init__.py
|
from .hasher import Hasher, ImageHasher, VideoHasher
from .image.average import AverageHash
from .image.phash import PHash, PHashU8, PHashF
from .image.wavelet import WaveletHash
from .image.opencv import MarrHildreth, BlockMean, ColorMoment
from .image.dhash import DHash
from .video.framewise import FramewiseHasher
from .video.scenes import SimpleSceneDetection
from .video.tmk import TMKL1, TMKL2
__all__ = [
"AverageHash",
"PHash",
"WaveletHash",
"MarrHildreth",
"BlockMean",
"ColorMoment",
"DHash",
"FramewiseHasher",
"TMKL1",
"TMKL2",
"PHashU8",
"PHashF",
"SimpleSceneDetection",
]
|
9c924068314ae6b105a2cffccfb0364cadc3a841
|
61004e474b7b2ad0071c16766f0f7874f04f9466
|
/examples/dataproc-lifecycle-via-composer/include/generate_dag_files.py
|
e11bf798c978d02d301b7b0900bf87d89e971452
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/professional-services
|
eb79751efae765a8c691a745e520f44f51bd715c
|
0f51121b945bd74c7f667e74e8861fceda87565c
|
refs/heads/main
| 2023-09-05T02:57:33.328973
| 2023-08-30T14:40:30
| 2023-08-30T14:40:30
| 91,730,359
| 2,626
| 1,381
|
Apache-2.0
| 2023-09-14T20:13:42
| 2017-05-18T19:29:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,980
|
py
|
generate_dag_files.py
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import fileinput
config_filepath = '../include/dag_config/'
dag_template_filename = '../include/dag_template.py'
for filename in os.listdir(config_filepath):
with open(config_filepath + filename) as f:
config = json.load(f)
new_filename = '../dags/' + config['DagId'] + '.py'
shutil.copyfile(dag_template_filename, new_filename)
with fileinput.input(new_filename, inplace=True) as file:
for line in file:
new_line = line.replace('\'dag_id_to_replace\'', "'" + config['DagId'] + "'") \
.replace('\'cluster_name_to_replace\'', "'" + config['ClusterName'] + "'") \
.replace('\'spark_job_name_to_replace\'', "'" + config['SparkJob'] + "'") \
.replace('\'year_to_replace\'', config['StartYear'] ) \
.replace('\'month_to_replace\'', config['StartMonth'] ) \
.replace('\'day_to_replace\'', config['StartDay'] ) \
.replace('\'machine_type_to_replace\'', "'" + config['ClusterMachineType'] + "'") \
.replace('\'idle_delete_ttl_to_replace\'', config['ClusterIdleDeleteTtl']) \
.replace('\'catchup_to_replace\'', config['Catchup']) \
.replace('\'schedule_to_replace\'', config['Schedule'])
print(new_line, end='')
|
7958f42b19587846afb6374ad363fcfca37922b9
|
362017d16a70ff497542f3a89ff5ef736e17361e
|
/model.py
|
edb0bc57a76c42f018dbe5624e2c6b5b2ee269be
|
[
"MIT"
] |
permissive
|
seanie12/neural-question-generation
|
2bac038af8deb6ff382d2a19aea31f0c3990ccb7
|
39ceaaa497eb3942451ccfb133ac26327717c0b6
|
refs/heads/master
| 2021-07-04T14:07:00.121611
| 2020-09-18T11:02:17
| 2020-09-18T11:02:17
| 179,623,388
| 135
| 36
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,159
|
py
|
model.py
|
import config
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from torch_scatter import scatter_max
from data_utils import UNK_ID
INF = 1e12
class Encoder(nn.Module):
def __init__(self, embeddings, vocab_size, embedding_size, hidden_size, num_layers, dropout):
super(Encoder, self).__init__()
self.embedding = nn.Embedding(vocab_size, embedding_size)
self.tag_embedding = nn.Embedding(3, 3)
lstm_input_size = embedding_size + 3
if embeddings is not None:
self.embedding = nn.Embedding(vocab_size, embedding_size). \
from_pretrained(embeddings, freeze=config.freeze_embedding)
self.num_layers = num_layers
if self.num_layers == 1:
dropout = 0.0
self.lstm = nn.LSTM(lstm_input_size, hidden_size, dropout=dropout,
num_layers=num_layers, bidirectional=True, batch_first=True)
self.linear_trans = nn.Linear(2 * hidden_size, 2 * hidden_size)
self.update_layer = nn.Linear(
4 * hidden_size, 2 * hidden_size, bias=False)
self.gate = nn.Linear(4 * hidden_size, 2 * hidden_size, bias=False)
def gated_self_attn(self, queries, memories, mask):
# queries: [b,t,d]
# memories: [b,t,d]
# mask: [b,t]
energies = torch.matmul(queries, memories.transpose(1, 2)) # [b, t, t]
mask = mask.unsqueeze(1)
energies = energies.masked_fill(mask == 0, value=-1e12)
scores = F.softmax(energies, dim=2)
context = torch.matmul(scores, queries)
inputs = torch.cat([queries, context], dim=2)
f_t = torch.tanh(self.update_layer(inputs))
g_t = torch.sigmoid(self.gate(inputs))
updated_output = g_t * f_t + (1 - g_t) * queries
return updated_output
def forward(self, src_seq, src_len, tag_seq):
total_length = src_seq.size(1)
embedded = self.embedding(src_seq)
tag_embedded = self.tag_embedding(tag_seq)
embedded = torch.cat((embedded, tag_embedded), dim=2)
packed = pack_padded_sequence(embedded,
src_len,
batch_first=True,
enforce_sorted=False)
outputs, states = self.lstm(packed) # states : tuple of [4, b, d]
outputs, _ = pad_packed_sequence(outputs,
batch_first=True,
total_length=total_length) # [b, t, d]
h, c = states
# self attention
mask = torch.sign(src_seq)
memories = self.linear_trans(outputs)
outputs = self.gated_self_attn(outputs, memories, mask)
_, b, d = h.size()
h = h.view(2, 2, b, d) # [n_layers, bi, b, d]
h = torch.cat((h[:, 0, :, :], h[:, 1, :, :]), dim=-1)
c = c.view(2, 2, b, d)
c = torch.cat((c[:, 0, :, :], c[:, 1, :, :]), dim=-1)
concat_states = (h, c)
return outputs, concat_states
class Decoder(nn.Module):
def __init__(self, embeddings, vocab_size,
embedding_size, hidden_size, num_layers, dropout):
super(Decoder, self).__init__()
self.vocab_size = vocab_size
self.embedding = nn.Embedding(vocab_size, embedding_size)
if embeddings is not None:
self.embedding = nn.Embedding(vocab_size, embedding_size). \
from_pretrained(embeddings, freeze=config.freeze_embedding)
if num_layers == 1:
dropout = 0.0
self.encoder_trans = nn.Linear(hidden_size, hidden_size)
self.reduce_layer = nn.Linear(
embedding_size + hidden_size, embedding_size)
self.lstm = nn.LSTM(embedding_size, hidden_size, batch_first=True,
num_layers=num_layers, bidirectional=False, dropout=dropout)
self.concat_layer = nn.Linear(2 * hidden_size, hidden_size)
self.logit_layer = nn.Linear(hidden_size, vocab_size)
@staticmethod
def attention(query, memories, mask):
# query : [b, 1, d]
energy = torch.matmul(query, memories.transpose(1, 2)) # [b, 1, t]
energy = energy.squeeze(1).masked_fill(mask == 0, value=-1e12)
attn_dist = F.softmax(energy, dim=1).unsqueeze(dim=1) # [b, 1, t]
context_vector = torch.matmul(attn_dist, memories) # [b, 1, d]
return context_vector, energy
def get_encoder_features(self, encoder_outputs):
return self.encoder_trans(encoder_outputs)
def forward(self, trg_seq, ext_src_seq, init_states, encoder_outputs, encoder_mask):
# trg_seq : [b,t]
# init_states : [2,b,d]
# encoder_outputs : [b,t,d]
# init_states : a tuple of [2, b, d]
device = trg_seq.device
batch_size, max_len = trg_seq.size()
hidden_size = encoder_outputs.size(-1)
memories = self.get_encoder_features(encoder_outputs)
logits = []
# init decoder hidden states and context vector
prev_states = init_states
prev_context = torch.zeros((batch_size, 1, hidden_size))
prev_context = prev_context.to(device)
for i in range(max_len):
y_i = trg_seq[:, i].unsqueeze(1) # [b, 1]
embedded = self.embedding(y_i) # [b, 1, d]
lstm_inputs = self.reduce_layer(
torch.cat([embedded, prev_context], 2))
output, states = self.lstm(lstm_inputs, prev_states)
# encoder-decoder attention
context, energy = self.attention(output, memories, encoder_mask)
concat_input = torch.cat((output, context), dim=2).squeeze(dim=1)
logit_input = torch.tanh(self.concat_layer(concat_input))
logit = self.logit_layer(logit_input) # [b, |V|]
# maxout pointer network
if config.use_pointer:
num_oov = max(torch.max(ext_src_seq - self.vocab_size + 1), 0)
zeros = torch.zeros((batch_size, num_oov),
device=config.device)
extended_logit = torch.cat([logit, zeros], dim=1)
out = torch.zeros_like(extended_logit) - INF
out, _ = scatter_max(energy, ext_src_seq, out=out)
out = out.masked_fill(out == -INF, 0)
logit = extended_logit + out
logit = logit.masked_fill(logit == 0, -INF)
logits.append(logit)
# update prev state and context
prev_states = states
prev_context = context
logits = torch.stack(logits, dim=1) # [b, t, |V|]
return logits
def decode(self, y, ext_x, prev_states, prev_context, encoder_features, encoder_mask):
# forward one step lstm
# y : [b]
embedded = self.embedding(y.unsqueeze(1))
lstm_inputs = self.reduce_layer(torch.cat([embedded, prev_context], 2))
output, states = self.lstm(lstm_inputs, prev_states)
context, energy = self.attention(output,
encoder_features,
encoder_mask)
concat_input = torch.cat((output, context), 2).squeeze(1)
logit_input = torch.tanh(self.concat_layer(concat_input))
logit = self.logit_layer(logit_input) # [b, |V|]
if config.use_pointer:
batch_size = y.size(0)
num_oov = max(torch.max(ext_x - self.vocab_size + 1), 0)
zeros = torch.zeros((batch_size, num_oov), device=config.device)
extended_logit = torch.cat([logit, zeros], dim=1)
out = torch.zeros_like(extended_logit) - INF
out, _ = scatter_max(energy, ext_x, out=out)
out = out.masked_fill(out == -INF, 0)
logit = extended_logit + out
logit = logit.masked_fill(logit == -INF, 0)
# forcing UNK prob 0
logit[:, UNK_ID] = -INF
return logit, states, context
class Seq2seq(nn.Module):
def __init__(self, embedding=None):
super(Seq2seq, self).__init__()
self.encoder = Encoder(embedding,
config.vocab_size,
config.embedding_size,
config.hidden_size,
config.num_layers,
config.dropout)
self.decoder = Decoder(embedding, config.vocab_size,
config.embedding_size,
2 * config.hidden_size,
config.num_layers,
config.dropout)
def forward(self, src_seq, tag_seq, ext_src_seq, trg_seq):
enc_mask = torch.sign(src_seq)
src_len = torch.sum(enc_mask, 1)
enc_outputs, enc_states = self.encoder(src_seq, src_len, tag_seq)
sos_trg = trg_seq[:, :-1].contiguous()
logits = self.decoder(sos_trg, ext_src_seq,
enc_states, enc_outputs, enc_mask)
return logits
|
0c50c76aab4e8f03876a84a7a3ff37ab3527e8eb
|
48e66c53cb340435369a0e6420bb30140f501505
|
/Chapter08/ttk_combobox_info.py
|
5f7a8fdd62e5607e021c16c5505f3864248ae3d3
|
[
"MIT"
] |
permissive
|
PacktPublishing/Python-GUI-Programming-with-Tkinter
|
d38278555408bf8c6dba3b12b4b963f987abf7ab
|
217ff90413dc2813a1b29d5ade52e23a72efadf5
|
refs/heads/master
| 2023-07-25T15:05:12.357484
| 2023-01-30T10:23:35
| 2023-01-30T10:23:35
| 133,041,882
| 236
| 127
|
MIT
| 2023-07-10T14:26:58
| 2018-05-11T13:17:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,282
|
py
|
ttk_combobox_info.py
|
import tkinter as tk
from tkinter import ttk
from pprint import pprint
root = tk.Tk()
style = ttk.Style()
print('TTK Combobox\n')
cb = ttk.Combobox(root)
cb_stylename = cb.winfo_class()
print("Style name: ", cb_stylename)
print("Starting state:", cb.state())
cb.state(['active', 'invalid'])
print("New state:", cb.state())
cb.state(['!invalid'])
print("Newer state: ", cb.state())
cb_layout = style.layout(cb_stylename)
print("\nLayout: ")
pprint(cb_layout)
def walk_layout(layout):
for element, subelements in layout:
print("\nOptions for {}:".format(element))
pprint(style.element_options(element))
if subelements.get("children"):
walk_layout(subelements.get("children"))
walk_layout(cb_layout)
cb_map = style.map(cb_stylename)
print("\nDefault Map:")
pprint(cb_map)
style.map(cb_stylename,
fieldbackground=[
('!invalid', 'blue'),
('invalid', 'red')
],
font=[
('!invalid', 'Helvetica 20 normal'),
('invalid', 'Helvetica 20 bold')
])
cb_map = style.map(cb_stylename)
print("\nNew Map:")
pprint(cb_map)
print('\nAvailable Themes:')
pprint(style.theme_names())
print('\nCurrent Theme:', style.theme_use())
pprint(style.element_names())
|
eed60154af80fca0997b557d06b10c8386c5ab9b
|
788cfa8d89ab6f8aacd84fb5587ae41aeef33e44
|
/vgci/blockify.py
|
55cdf86706b93470e0a2f7adada3553af32999e3
|
[
"LicenseRef-scancode-boost-original",
"MIT",
"BSL-1.0"
] |
permissive
|
vgteam/vg
|
5064ca0996db9c54c9642cc4d63d2fef2cefcdb0
|
e173d0a81c6aa0e5b93a863f19b73ee4200177dc
|
refs/heads/master
| 2023-08-31T04:08:13.862938
| 2023-08-25T00:35:47
| 2023-08-25T00:35:47
| 24,727,800
| 886
| 202
|
NOASSERTION
| 2023-09-08T11:33:30
| 2014-10-02T16:54:27
|
C++
|
UTF-8
|
Python
| false
| false
| 651
|
py
|
blockify.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
blockify.py: make sure input and output FDs are in blocking mode before running a command.
"""
import os
import sys
import fcntl
# Check input
if len(sys.argv) < 2:
raise RuntimeError("A program to execute is required")
# Fix up standard file descriptors (0, 1, 2) by clearing the nonblocking flag.
# See https://stackoverflow.com/a/30172682
for fd in range(3):
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) & ~os.O_NONBLOCK)
# Become the first argument running with the rest of the arguments.
# Make sure to pass along the program name.
os.execvp(sys.argv[1], sys.argv[1:])
|
9838a650997c8ee73d57bbb18a413945bd9c712e
|
1dbbb05b30d27c6419b9f34eea3b9a47f92582a0
|
/parlai/zoo/sensitive_topics_classifier/build.py
|
a6a2b1d26a2ac2b58cf78527c1eb1f0bb6e7cded
|
[
"MIT"
] |
permissive
|
facebookresearch/ParlAI
|
815334323d0ebef51bf9837336fe3eef6fe1655d
|
e1d899edfb92471552bae153f59ad30aa7fca468
|
refs/heads/main
| 2023-08-31T22:20:45.918129
| 2023-08-14T19:39:56
| 2023-08-14T19:39:56
| 89,266,735
| 10,943
| 2,395
|
MIT
| 2023-09-13T23:07:40
| 2017-04-24T17:10:44
|
Python
|
UTF-8
|
Python
| false
| false
| 985
|
py
|
build.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Pretrained Transformer-based classifier for classification of sensitive topics.
Example command: ``` parlai eval_model -mf zoo:sensitive_topics_classifier/model -t
sensitive_topics_evaluation -dt valid -bs 16 ```
"""
from parlai.core.build_data import built, download_models, get_model_dir
import os
import os.path
def download(datapath):
model_name = 'sensitive_topics_classifier'
mdir = os.path.join(get_model_dir(datapath), model_name)
version = 'v1'
if not built(mdir, version):
opt = {'datapath': datapath}
fnames = ['sensitive_topics_classifier2.tgz']
download_models(
opt,
fnames,
model_name,
version=version,
use_model_type=False,
flatten_tar=True,
)
|
939aa48a828dfc785e18283f8957fa5dd780c2e0
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/storage/blob_service_properties.py
|
36ceeada45ce94a8192242e4958b62819e6dfba2
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 25,570
|
py
|
blob_service_properties.py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['BlobServicePropertiesArgs', 'BlobServiceProperties']
@pulumi.input_type
class BlobServicePropertiesArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
automatic_snapshot_policy_enabled: Optional[pulumi.Input[bool]] = None,
blob_services_name: Optional[pulumi.Input[str]] = None,
change_feed: Optional[pulumi.Input['ChangeFeedArgs']] = None,
container_delete_retention_policy: Optional[pulumi.Input['DeleteRetentionPolicyArgs']] = None,
cors: Optional[pulumi.Input['CorsRulesArgs']] = None,
default_service_version: Optional[pulumi.Input[str]] = None,
delete_retention_policy: Optional[pulumi.Input['DeleteRetentionPolicyArgs']] = None,
is_versioning_enabled: Optional[pulumi.Input[bool]] = None,
last_access_time_tracking_policy: Optional[pulumi.Input['LastAccessTimeTrackingPolicyArgs']] = None,
restore_policy: Optional[pulumi.Input['RestorePolicyPropertiesArgs']] = None):
"""
The set of arguments for constructing a BlobServiceProperties resource.
:param pulumi.Input[str] account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param pulumi.Input[bool] automatic_snapshot_policy_enabled: Deprecated in favor of isVersioningEnabled property.
:param pulumi.Input[str] blob_services_name: The name of the blob Service within the specified storage account. Blob Service Name must be 'default'
:param pulumi.Input['ChangeFeedArgs'] change_feed: The blob service properties for change feed events.
:param pulumi.Input['DeleteRetentionPolicyArgs'] container_delete_retention_policy: The blob service properties for container soft delete.
:param pulumi.Input['CorsRulesArgs'] cors: Specifies CORS rules for the Blob service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the Blob service.
:param pulumi.Input[str] default_service_version: DefaultServiceVersion indicates the default version to use for requests to the Blob service if an incoming request’s version is not specified. Possible values include version 2008-10-27 and all more recent versions.
:param pulumi.Input['DeleteRetentionPolicyArgs'] delete_retention_policy: The blob service properties for blob soft delete.
:param pulumi.Input[bool] is_versioning_enabled: Versioning is enabled if set to true.
:param pulumi.Input['LastAccessTimeTrackingPolicyArgs'] last_access_time_tracking_policy: The blob service property to configure last access time based tracking policy.
:param pulumi.Input['RestorePolicyPropertiesArgs'] restore_policy: The blob service properties for blob restore policy.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if automatic_snapshot_policy_enabled is not None:
pulumi.set(__self__, "automatic_snapshot_policy_enabled", automatic_snapshot_policy_enabled)
if blob_services_name is not None:
pulumi.set(__self__, "blob_services_name", blob_services_name)
if change_feed is not None:
pulumi.set(__self__, "change_feed", change_feed)
if container_delete_retention_policy is not None:
pulumi.set(__self__, "container_delete_retention_policy", container_delete_retention_policy)
if cors is not None:
pulumi.set(__self__, "cors", cors)
if default_service_version is not None:
pulumi.set(__self__, "default_service_version", default_service_version)
if delete_retention_policy is not None:
pulumi.set(__self__, "delete_retention_policy", delete_retention_policy)
if is_versioning_enabled is not None:
pulumi.set(__self__, "is_versioning_enabled", is_versioning_enabled)
if last_access_time_tracking_policy is not None:
pulumi.set(__self__, "last_access_time_tracking_policy", last_access_time_tracking_policy)
if restore_policy is not None:
pulumi.set(__self__, "restore_policy", restore_policy)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group within the user's subscription. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="automaticSnapshotPolicyEnabled")
def automatic_snapshot_policy_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Deprecated in favor of isVersioningEnabled property.
"""
return pulumi.get(self, "automatic_snapshot_policy_enabled")
@automatic_snapshot_policy_enabled.setter
def automatic_snapshot_policy_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "automatic_snapshot_policy_enabled", value)
@property
@pulumi.getter(name="blobServicesName")
def blob_services_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the blob Service within the specified storage account. Blob Service Name must be 'default'
"""
return pulumi.get(self, "blob_services_name")
@blob_services_name.setter
def blob_services_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "blob_services_name", value)
@property
@pulumi.getter(name="changeFeed")
def change_feed(self) -> Optional[pulumi.Input['ChangeFeedArgs']]:
"""
The blob service properties for change feed events.
"""
return pulumi.get(self, "change_feed")
@change_feed.setter
def change_feed(self, value: Optional[pulumi.Input['ChangeFeedArgs']]):
pulumi.set(self, "change_feed", value)
@property
@pulumi.getter(name="containerDeleteRetentionPolicy")
def container_delete_retention_policy(self) -> Optional[pulumi.Input['DeleteRetentionPolicyArgs']]:
"""
The blob service properties for container soft delete.
"""
return pulumi.get(self, "container_delete_retention_policy")
@container_delete_retention_policy.setter
def container_delete_retention_policy(self, value: Optional[pulumi.Input['DeleteRetentionPolicyArgs']]):
pulumi.set(self, "container_delete_retention_policy", value)
@property
@pulumi.getter
def cors(self) -> Optional[pulumi.Input['CorsRulesArgs']]:
"""
Specifies CORS rules for the Blob service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the Blob service.
"""
return pulumi.get(self, "cors")
@cors.setter
def cors(self, value: Optional[pulumi.Input['CorsRulesArgs']]):
pulumi.set(self, "cors", value)
@property
@pulumi.getter(name="defaultServiceVersion")
def default_service_version(self) -> Optional[pulumi.Input[str]]:
"""
DefaultServiceVersion indicates the default version to use for requests to the Blob service if an incoming request’s version is not specified. Possible values include version 2008-10-27 and all more recent versions.
"""
return pulumi.get(self, "default_service_version")
@default_service_version.setter
def default_service_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_service_version", value)
@property
@pulumi.getter(name="deleteRetentionPolicy")
def delete_retention_policy(self) -> Optional[pulumi.Input['DeleteRetentionPolicyArgs']]:
"""
The blob service properties for blob soft delete.
"""
return pulumi.get(self, "delete_retention_policy")
@delete_retention_policy.setter
def delete_retention_policy(self, value: Optional[pulumi.Input['DeleteRetentionPolicyArgs']]):
pulumi.set(self, "delete_retention_policy", value)
@property
@pulumi.getter(name="isVersioningEnabled")
def is_versioning_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Versioning is enabled if set to true.
"""
return pulumi.get(self, "is_versioning_enabled")
@is_versioning_enabled.setter
def is_versioning_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_versioning_enabled", value)
@property
@pulumi.getter(name="lastAccessTimeTrackingPolicy")
def last_access_time_tracking_policy(self) -> Optional[pulumi.Input['LastAccessTimeTrackingPolicyArgs']]:
"""
The blob service property to configure last access time based tracking policy.
"""
return pulumi.get(self, "last_access_time_tracking_policy")
@last_access_time_tracking_policy.setter
def last_access_time_tracking_policy(self, value: Optional[pulumi.Input['LastAccessTimeTrackingPolicyArgs']]):
pulumi.set(self, "last_access_time_tracking_policy", value)
@property
@pulumi.getter(name="restorePolicy")
def restore_policy(self) -> Optional[pulumi.Input['RestorePolicyPropertiesArgs']]:
"""
The blob service properties for blob restore policy.
"""
return pulumi.get(self, "restore_policy")
@restore_policy.setter
def restore_policy(self, value: Optional[pulumi.Input['RestorePolicyPropertiesArgs']]):
pulumi.set(self, "restore_policy", value)
class BlobServiceProperties(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
automatic_snapshot_policy_enabled: Optional[pulumi.Input[bool]] = None,
blob_services_name: Optional[pulumi.Input[str]] = None,
change_feed: Optional[pulumi.Input[pulumi.InputType['ChangeFeedArgs']]] = None,
container_delete_retention_policy: Optional[pulumi.Input[pulumi.InputType['DeleteRetentionPolicyArgs']]] = None,
cors: Optional[pulumi.Input[pulumi.InputType['CorsRulesArgs']]] = None,
default_service_version: Optional[pulumi.Input[str]] = None,
delete_retention_policy: Optional[pulumi.Input[pulumi.InputType['DeleteRetentionPolicyArgs']]] = None,
is_versioning_enabled: Optional[pulumi.Input[bool]] = None,
last_access_time_tracking_policy: Optional[pulumi.Input[pulumi.InputType['LastAccessTimeTrackingPolicyArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
restore_policy: Optional[pulumi.Input[pulumi.InputType['RestorePolicyPropertiesArgs']]] = None,
__props__=None):
"""
The properties of a storage account’s Blob service.
Azure REST API version: 2022-09-01. Prior API version in Azure Native 1.x: 2021-02-01
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param pulumi.Input[bool] automatic_snapshot_policy_enabled: Deprecated in favor of isVersioningEnabled property.
:param pulumi.Input[str] blob_services_name: The name of the blob Service within the specified storage account. Blob Service Name must be 'default'
:param pulumi.Input[pulumi.InputType['ChangeFeedArgs']] change_feed: The blob service properties for change feed events.
:param pulumi.Input[pulumi.InputType['DeleteRetentionPolicyArgs']] container_delete_retention_policy: The blob service properties for container soft delete.
:param pulumi.Input[pulumi.InputType['CorsRulesArgs']] cors: Specifies CORS rules for the Blob service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the Blob service.
:param pulumi.Input[str] default_service_version: DefaultServiceVersion indicates the default version to use for requests to the Blob service if an incoming request’s version is not specified. Possible values include version 2008-10-27 and all more recent versions.
:param pulumi.Input[pulumi.InputType['DeleteRetentionPolicyArgs']] delete_retention_policy: The blob service properties for blob soft delete.
:param pulumi.Input[bool] is_versioning_enabled: Versioning is enabled if set to true.
:param pulumi.Input[pulumi.InputType['LastAccessTimeTrackingPolicyArgs']] last_access_time_tracking_policy: The blob service property to configure last access time based tracking policy.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param pulumi.Input[pulumi.InputType['RestorePolicyPropertiesArgs']] restore_policy: The blob service properties for blob restore policy.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BlobServicePropertiesArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The properties of a storage account’s Blob service.
Azure REST API version: 2022-09-01. Prior API version in Azure Native 1.x: 2021-02-01
:param str resource_name: The name of the resource.
:param BlobServicePropertiesArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BlobServicePropertiesArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
automatic_snapshot_policy_enabled: Optional[pulumi.Input[bool]] = None,
blob_services_name: Optional[pulumi.Input[str]] = None,
change_feed: Optional[pulumi.Input[pulumi.InputType['ChangeFeedArgs']]] = None,
container_delete_retention_policy: Optional[pulumi.Input[pulumi.InputType['DeleteRetentionPolicyArgs']]] = None,
cors: Optional[pulumi.Input[pulumi.InputType['CorsRulesArgs']]] = None,
default_service_version: Optional[pulumi.Input[str]] = None,
delete_retention_policy: Optional[pulumi.Input[pulumi.InputType['DeleteRetentionPolicyArgs']]] = None,
is_versioning_enabled: Optional[pulumi.Input[bool]] = None,
last_access_time_tracking_policy: Optional[pulumi.Input[pulumi.InputType['LastAccessTimeTrackingPolicyArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
restore_policy: Optional[pulumi.Input[pulumi.InputType['RestorePolicyPropertiesArgs']]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BlobServicePropertiesArgs.__new__(BlobServicePropertiesArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["automatic_snapshot_policy_enabled"] = automatic_snapshot_policy_enabled
__props__.__dict__["blob_services_name"] = blob_services_name
__props__.__dict__["change_feed"] = change_feed
__props__.__dict__["container_delete_retention_policy"] = container_delete_retention_policy
__props__.__dict__["cors"] = cors
__props__.__dict__["default_service_version"] = default_service_version
__props__.__dict__["delete_retention_policy"] = delete_retention_policy
__props__.__dict__["is_versioning_enabled"] = is_versioning_enabled
__props__.__dict__["last_access_time_tracking_policy"] = last_access_time_tracking_policy
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["restore_policy"] = restore_policy
__props__.__dict__["name"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:storage/v20180701:BlobServiceProperties"), pulumi.Alias(type_="azure-native:storage/v20181101:BlobServiceProperties"), pulumi.Alias(type_="azure-native:storage/v20190401:BlobServiceProperties"), pulumi.Alias(type_="azure-native:storage/v20190601:BlobServiceProperties"), pulumi.Alias(type_="azure-native:storage/v20200801preview:BlobServiceProperties"), pulumi.Alias(type_="azure-native:storage/v20210101:BlobServiceProperties"), pulumi.Alias(type_="azure-native:storage/v20210201:BlobServiceProperties"), pulumi.Alias(type_="azure-native:storage/v20210401:BlobServiceProperties"), pulumi.Alias(type_="azure-native:storage/v20210601:BlobServiceProperties"), pulumi.Alias(type_="azure-native:storage/v20210801:BlobServiceProperties"), pulumi.Alias(type_="azure-native:storage/v20210901:BlobServiceProperties"), pulumi.Alias(type_="azure-native:storage/v20220501:BlobServiceProperties"), pulumi.Alias(type_="azure-native:storage/v20220901:BlobServiceProperties"), pulumi.Alias(type_="azure-native:storage/v20230101:BlobServiceProperties")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(BlobServiceProperties, __self__).__init__(
'azure-native:storage:BlobServiceProperties',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'BlobServiceProperties':
"""
Get an existing BlobServiceProperties resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = BlobServicePropertiesArgs.__new__(BlobServicePropertiesArgs)
__props__.__dict__["automatic_snapshot_policy_enabled"] = None
__props__.__dict__["change_feed"] = None
__props__.__dict__["container_delete_retention_policy"] = None
__props__.__dict__["cors"] = None
__props__.__dict__["default_service_version"] = None
__props__.__dict__["delete_retention_policy"] = None
__props__.__dict__["is_versioning_enabled"] = None
__props__.__dict__["last_access_time_tracking_policy"] = None
__props__.__dict__["name"] = None
__props__.__dict__["restore_policy"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["type"] = None
return BlobServiceProperties(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="automaticSnapshotPolicyEnabled")
def automatic_snapshot_policy_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Deprecated in favor of isVersioningEnabled property.
"""
return pulumi.get(self, "automatic_snapshot_policy_enabled")
@property
@pulumi.getter(name="changeFeed")
def change_feed(self) -> pulumi.Output[Optional['outputs.ChangeFeedResponse']]:
"""
The blob service properties for change feed events.
"""
return pulumi.get(self, "change_feed")
@property
@pulumi.getter(name="containerDeleteRetentionPolicy")
def container_delete_retention_policy(self) -> pulumi.Output[Optional['outputs.DeleteRetentionPolicyResponse']]:
"""
The blob service properties for container soft delete.
"""
return pulumi.get(self, "container_delete_retention_policy")
@property
@pulumi.getter
def cors(self) -> pulumi.Output[Optional['outputs.CorsRulesResponse']]:
"""
Specifies CORS rules for the Blob service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the Blob service.
"""
return pulumi.get(self, "cors")
@property
@pulumi.getter(name="defaultServiceVersion")
def default_service_version(self) -> pulumi.Output[Optional[str]]:
"""
DefaultServiceVersion indicates the default version to use for requests to the Blob service if an incoming request’s version is not specified. Possible values include version 2008-10-27 and all more recent versions.
"""
return pulumi.get(self, "default_service_version")
@property
@pulumi.getter(name="deleteRetentionPolicy")
def delete_retention_policy(self) -> pulumi.Output[Optional['outputs.DeleteRetentionPolicyResponse']]:
"""
The blob service properties for blob soft delete.
"""
return pulumi.get(self, "delete_retention_policy")
@property
@pulumi.getter(name="isVersioningEnabled")
def is_versioning_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Versioning is enabled if set to true.
"""
return pulumi.get(self, "is_versioning_enabled")
@property
@pulumi.getter(name="lastAccessTimeTrackingPolicy")
def last_access_time_tracking_policy(self) -> pulumi.Output[Optional['outputs.LastAccessTimeTrackingPolicyResponse']]:
"""
The blob service property to configure last access time based tracking policy.
"""
return pulumi.get(self, "last_access_time_tracking_policy")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="restorePolicy")
def restore_policy(self) -> pulumi.Output[Optional['outputs.RestorePolicyPropertiesResponse']]:
"""
The blob service properties for blob restore policy.
"""
return pulumi.get(self, "restore_policy")
@property
@pulumi.getter
def sku(self) -> pulumi.Output['outputs.SkuResponse']:
"""
Sku name and tier.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
|
2394d4f47c8a9077807fb6ec84ed46b74aefe23d
|
f167dffa2f767a0419aa82bf434852069a8baeb8
|
/lib/youtube_dl/extractor/mlb.py
|
b69301d9775c641fc59b95e2315d67e8e4f6b2bc
|
[
"MIT"
] |
permissive
|
firsttris/plugin.video.sendtokodi
|
d634490b55149adfdcb62c1af1eb77568b8da3f5
|
1095c58e2bc21de4ab6fcb67a70e4f0f04febbc3
|
refs/heads/master
| 2023-08-18T10:10:39.544848
| 2023-08-15T17:06:44
| 2023-08-15T17:06:44
| 84,665,460
| 111
| 31
|
MIT
| 2022-11-11T08:05:21
| 2017-03-11T16:53:06
|
Python
|
UTF-8
|
Python
| false
| false
| 9,355
|
py
|
mlb.py
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
parse_duration,
parse_iso8601,
try_get,
)
class MLBBaseIE(InfoExtractor):
def _real_extract(self, url):
display_id = self._match_id(url)
video = self._download_video_data(display_id)
video_id = video['id']
title = video['title']
feed = self._get_feed(video)
formats = []
for playback in (feed.get('playbacks') or []):
playback_url = playback.get('url')
if not playback_url:
continue
name = playback.get('name')
ext = determine_ext(playback_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
playback_url, video_id, 'mp4',
'm3u8_native', m3u8_id=name, fatal=False))
else:
f = {
'format_id': name,
'url': playback_url,
}
mobj = re.search(r'_(\d+)K_(\d+)X(\d+)', name)
if mobj:
f.update({
'height': int(mobj.group(3)),
'tbr': int(mobj.group(1)),
'width': int(mobj.group(2)),
})
mobj = re.search(r'_(\d+)x(\d+)_(\d+)_(\d+)K\.mp4', playback_url)
if mobj:
f.update({
'fps': int(mobj.group(3)),
'height': int(mobj.group(2)),
'tbr': int(mobj.group(4)),
'width': int(mobj.group(1)),
})
formats.append(f)
self._sort_formats(formats)
thumbnails = []
for cut in (try_get(feed, lambda x: x['image']['cuts'], list) or []):
src = cut.get('src')
if not src:
continue
thumbnails.append({
'height': int_or_none(cut.get('height')),
'url': src,
'width': int_or_none(cut.get('width')),
})
language = (video.get('language') or 'EN').lower()
return {
'id': video_id,
'title': title,
'formats': formats,
'description': video.get('description'),
'duration': parse_duration(feed.get('duration')),
'thumbnails': thumbnails,
'timestamp': parse_iso8601(video.get(self._TIMESTAMP_KEY)),
'subtitles': self._extract_mlb_subtitles(feed, language),
}
class MLBIE(MLBBaseIE):
_VALID_URL = r'''(?x)
https?://
(?:[\da-z_-]+\.)*mlb\.com/
(?:
(?:
(?:[^/]+/)*video/[^/]+/c-|
(?:
shared/video/embed/(?:embed|m-internal-embed)\.html|
(?:[^/]+/)+(?:play|index)\.jsp|
)\?.*?\bcontent_id=
)
(?P<id>\d+)
)
'''
_TESTS = [
{
'url': 'https://www.mlb.com/mariners/video/ackleys-spectacular-catch/c-34698933',
'md5': '632358dacfceec06bad823b83d21df2d',
'info_dict': {
'id': '34698933',
'ext': 'mp4',
'title': "Ackley's spectacular catch",
'description': 'md5:7f5a981eb4f3cbc8daf2aeffa2215bf0',
'duration': 66,
'timestamp': 1405995000,
'upload_date': '20140722',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
'url': 'https://www.mlb.com/video/stanton-prepares-for-derby/c-34496663',
'md5': 'bf2619bf9cacc0a564fc35e6aeb9219f',
'info_dict': {
'id': '34496663',
'ext': 'mp4',
'title': 'Stanton prepares for Derby',
'description': 'md5:d00ce1e5fd9c9069e9c13ab4faedfa57',
'duration': 46,
'timestamp': 1405120200,
'upload_date': '20140711',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
'url': 'https://www.mlb.com/video/cespedes-repeats-as-derby-champ/c-34578115',
'md5': '99bb9176531adc600b90880fb8be9328',
'info_dict': {
'id': '34578115',
'ext': 'mp4',
'title': 'Cespedes repeats as Derby champ',
'description': 'md5:08df253ce265d4cf6fb09f581fafad07',
'duration': 488,
'timestamp': 1405414336,
'upload_date': '20140715',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
'url': 'https://www.mlb.com/video/bautista-on-home-run-derby/c-34577915',
'md5': 'da8b57a12b060e7663ee1eebd6f330ec',
'info_dict': {
'id': '34577915',
'ext': 'mp4',
'title': 'Bautista on Home Run Derby',
'description': 'md5:b80b34031143d0986dddc64a8839f0fb',
'duration': 52,
'timestamp': 1405405122,
'upload_date': '20140715',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
'url': 'https://www.mlb.com/video/hargrove-homers-off-caldwell/c-1352023483?tid=67793694',
'only_matching': True,
},
{
'url': 'http://m.mlb.com/shared/video/embed/embed.html?content_id=35692085&topic_id=6479266&width=400&height=224&property=mlb',
'only_matching': True,
},
{
'url': 'http://mlb.mlb.com/shared/video/embed/embed.html?content_id=36599553',
'only_matching': True,
},
{
'url': 'http://mlb.mlb.com/es/video/play.jsp?content_id=36599553',
'only_matching': True,
},
{
'url': 'https://www.mlb.com/cardinals/video/piscottys-great-sliding-catch/c-51175783',
'only_matching': True,
},
{
# From http://m.mlb.com/news/article/118550098/blue-jays-kevin-pillar-goes-spidey-up-the-wall-to-rob-tim-beckham-of-a-homer
'url': 'http://mlb.mlb.com/shared/video/embed/m-internal-embed.html?content_id=75609783&property=mlb&autoplay=true&hashmode=false&siteSection=mlb/multimedia/article_118550098/article_embed&club=mlb',
'only_matching': True,
},
]
_TIMESTAMP_KEY = 'date'
@staticmethod
def _get_feed(video):
return video
@staticmethod
def _extract_mlb_subtitles(feed, language):
subtitles = {}
for keyword in (feed.get('keywordsAll') or []):
keyword_type = keyword.get('type')
if keyword_type and keyword_type.startswith('closed_captions_location_'):
cc_location = keyword.get('value')
if cc_location:
subtitles.setdefault(language, []).append({
'url': cc_location,
})
return subtitles
def _download_video_data(self, display_id):
return self._download_json(
'http://content.mlb.com/mlb/item/id/v1/%s/details/web-v1.json' % display_id,
display_id)
class MLBVideoIE(MLBBaseIE):
_VALID_URL = r'https?://(?:www\.)?mlb\.com/(?:[^/]+/)*video/(?P<id>[^/?&#]+)'
_TEST = {
'url': 'https://www.mlb.com/mariners/video/ackley-s-spectacular-catch-c34698933',
'md5': '632358dacfceec06bad823b83d21df2d',
'info_dict': {
'id': 'c04a8863-f569-42e6-9f87-992393657614',
'ext': 'mp4',
'title': "Ackley's spectacular catch",
'description': 'md5:7f5a981eb4f3cbc8daf2aeffa2215bf0',
'duration': 66,
'timestamp': 1405995000,
'upload_date': '20140722',
'thumbnail': r're:^https?://.+',
},
}
_TIMESTAMP_KEY = 'timestamp'
@classmethod
def suitable(cls, url):
return False if MLBIE.suitable(url) else super(MLBVideoIE, cls).suitable(url)
@staticmethod
def _get_feed(video):
return video['feeds'][0]
@staticmethod
def _extract_mlb_subtitles(feed, language):
subtitles = {}
for cc_location in (feed.get('closedCaptions') or []):
subtitles.setdefault(language, []).append({
'url': cc_location,
})
def _download_video_data(self, display_id):
# https://www.mlb.com/data-service/en/videos/[SLUG]
return self._download_json(
'https://fastball-gateway.mlb.com/graphql',
display_id, query={
'query': '''{
mediaPlayback(ids: "%s") {
description
feeds(types: CMS) {
closedCaptions
duration
image {
cuts {
width
height
src
}
}
playbacks {
name
url
}
}
id
timestamp
title
}
}''' % display_id,
})['data']['mediaPlayback'][0]
|
59197a7b840db75cf779e2badb37ecd3702557f2
|
39d9c170b18aae1e55624002b4d3d1b9e0ae2b1e
|
/amulet_map_editor/programs/edit/api/events.py
|
ff68244de99005daa763f001b57346fe8b8a1efe
|
[] |
no_license
|
Amulet-Team/Amulet-Map-Editor
|
514fd8c46adfe7177b4241f8dc8fe1494cd25d85
|
52991d26cb11e93c5392427377489820ae762094
|
refs/heads/0.10
| 2023-08-25T08:34:59.371594
| 2023-08-12T09:55:08
| 2023-08-12T09:55:08
| 222,710,374
| 1,514
| 162
| null | 2023-09-14T13:02:18
| 2019-11-19T14:06:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,331
|
py
|
events.py
|
import wx
from wx.lib import newevent
from amulet_map_editor.api.opengl.camera import (
CameraMovedEvent,
EVT_CAMERA_MOVED,
ProjectionChangedEvent,
EVT_PROJECTION_CHANGED,
SpeedChangedEvent,
EVT_SPEED_CHANGED,
)
from amulet_map_editor.api.opengl.events import (
PreDrawEvent,
EVT_PRE_DRAW,
DrawEvent,
EVT_DRAW,
PostDrawEvent,
EVT_POST_DRAW,
)
from amulet_map_editor.api.wx.util.button_input import (
InputPressEvent,
InputHeldEvent,
InputReleaseEvent,
EVT_INPUT_PRESS,
EVT_INPUT_HELD,
EVT_INPUT_RELEASE,
)
from .selection import SelectionChangeEvent, EVT_SELECTION_CHANGE
DimensionChangeEvent, EVT_DIMENSION_CHANGE = newevent.NewEvent()
# the active tool changed
_ToolChangeEventType = wx.NewEventType()
EVT_TOOL_CHANGE = wx.PyEventBinder(_ToolChangeEventType)
class ToolChangeEvent(wx.PyEvent):
"""Run when the camera has moved or rotated."""
def __init__(self, tool: str, state=None):
wx.PyEvent.__init__(self, eventType=_ToolChangeEventType)
self.tool = tool
self.state = state
UndoEvent, EVT_UNDO = newevent.NewEvent()
RedoEvent, EVT_REDO = newevent.NewEvent()
CreateUndoEvent, EVT_CREATE_UNDO = newevent.NewEvent()
SaveEvent, EVT_SAVE = newevent.NewEvent()
EditCloseEvent, EVT_EDIT_CLOSE = newevent.NewEvent()
|
05625e541b6ad1e17c77c41ea18511f935b57f9d
|
8a80f95f6b122257165cd0c1ce500b9be5d9cc4b
|
/data/dataset_blindsr.py
|
3d16ae3418b45d3550f70c43cd56ac0491fe87b6
|
[
"MIT"
] |
permissive
|
cszn/KAIR
|
841b0cd97241d5c7203cdf9496d98be6aec61145
|
047dc42c4649b8488653facaace592518da91a74
|
refs/heads/master
| 2023-06-24T21:23:19.056481
| 2023-06-10T20:04:14
| 2023-06-10T20:04:14
| 228,241,233
| 2,440
| 609
|
MIT
| 2023-08-31T08:50:15
| 2019-12-15T19:42:25
|
Python
|
UTF-8
|
Python
| false
| false
| 3,952
|
py
|
dataset_blindsr.py
|
import random
import numpy as np
import torch.utils.data as data
import utils.utils_image as util
import os
from utils import utils_blindsr as blindsr
class DatasetBlindSR(data.Dataset):
'''
# -----------------------------------------
# dataset for BSRGAN
# -----------------------------------------
'''
def __init__(self, opt):
super(DatasetBlindSR, self).__init__()
self.opt = opt
self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
self.sf = opt['scale'] if opt['scale'] else 4
self.shuffle_prob = opt['shuffle_prob'] if opt['shuffle_prob'] else 0.1
self.use_sharp = opt['use_sharp'] if opt['use_sharp'] else False
self.degradation_type = opt['degradation_type'] if opt['degradation_type'] else 'bsrgan'
self.lq_patchsize = self.opt['lq_patchsize'] if self.opt['lq_patchsize'] else 64
self.patch_size = self.opt['H_size'] if self.opt['H_size'] else self.lq_patchsize*self.sf
self.paths_H = util.get_image_paths(opt['dataroot_H'])
print(len(self.paths_H))
# for n, v in enumerate(self.paths_H):
# if 'face' in v:
# del self.paths_H[n]
# time.sleep(1)
assert self.paths_H, 'Error: H path is empty.'
def __getitem__(self, index):
L_path = None
# ------------------------------------
# get H image
# ------------------------------------
H_path = self.paths_H[index]
img_H = util.imread_uint(H_path, self.n_channels)
img_name, ext = os.path.splitext(os.path.basename(H_path))
H, W, C = img_H.shape
if H < self.patch_size or W < self.patch_size:
img_H = np.tile(np.random.randint(0, 256, size=[1, 1, self.n_channels], dtype=np.uint8), (self.patch_size, self.patch_size, 1))
# ------------------------------------
# if train, get L/H patch pair
# ------------------------------------
if self.opt['phase'] == 'train':
H, W, C = img_H.shape
rnd_h_H = random.randint(0, max(0, H - self.patch_size))
rnd_w_H = random.randint(0, max(0, W - self.patch_size))
img_H = img_H[rnd_h_H:rnd_h_H + self.patch_size, rnd_w_H:rnd_w_H + self.patch_size, :]
if 'face' in img_name:
mode = random.choice([0, 4])
img_H = util.augment_img(img_H, mode=mode)
else:
mode = random.randint(0, 7)
img_H = util.augment_img(img_H, mode=mode)
img_H = util.uint2single(img_H)
if self.degradation_type == 'bsrgan':
img_L, img_H = blindsr.degradation_bsrgan(img_H, self.sf, lq_patchsize=self.lq_patchsize, isp_model=None)
elif self.degradation_type == 'bsrgan_plus':
img_L, img_H = blindsr.degradation_bsrgan_plus(img_H, self.sf, shuffle_prob=self.shuffle_prob, use_sharp=self.use_sharp, lq_patchsize=self.lq_patchsize)
else:
img_H = util.uint2single(img_H)
if self.degradation_type == 'bsrgan':
img_L, img_H = blindsr.degradation_bsrgan(img_H, self.sf, lq_patchsize=self.lq_patchsize, isp_model=None)
elif self.degradation_type == 'bsrgan_plus':
img_L, img_H = blindsr.degradation_bsrgan_plus(img_H, self.sf, shuffle_prob=self.shuffle_prob, use_sharp=self.use_sharp, lq_patchsize=self.lq_patchsize)
# ------------------------------------
# L/H pairs, HWC to CHW, numpy to tensor
# ------------------------------------
img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(img_L)
if L_path is None:
L_path = H_path
return {'L': img_L, 'H': img_H, 'L_path': L_path, 'H_path': H_path}
def __len__(self):
return len(self.paths_H)
|
dc5b6da4e56a278aaff5306e40080edcfa1316cb
|
1679c239b02b7412ed3a62f01530a7519563f193
|
/experiments/lending.py
|
4c08fcd44cbc1b4f4eb3fb45ed7929a8f661fb7a
|
[
"Apache-2.0"
] |
permissive
|
google/ml-fairness-gym
|
8b8b4d223c3be91aeab74988ce1a42f996fe633a
|
38eaf4514062892e0c3ce5d7cff4b4c1a7e49242
|
refs/heads/master
| 2023-08-31T12:24:32.720046
| 2022-02-04T16:10:44
| 2022-02-04T16:57:55
| 200,863,357
| 310
| 82
|
Apache-2.0
| 2023-03-08T12:27:55
| 2019-08-06T14:03:17
|
Python
|
UTF-8
|
Python
| false
| false
| 5,635
|
py
|
lending.py
|
# coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Code to run lending experiments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import attr
import core
import params
import rewards
import run_util
from agents import classifier_agents
from agents import oracle_lending_agent
from agents import threshold_policies
from environments import lending
from environments import lending_params
from metrics import error_metrics
from metrics import lending_metrics
from metrics import value_tracking_metrics
MAXIMIZE_REWARD = threshold_policies.ThresholdPolicy.MAXIMIZE_REWARD
EQUALIZE_OPPORTUNITY = threshold_policies.ThresholdPolicy.EQUALIZE_OPPORTUNITY
@attr.s
class Experiment(core.Params):
"""Specifies the parameters of an experiment to run."""
######################
# Environment params #
######################
group_0_prob = attr.ib(default=0.5)
bank_starting_cash = attr.ib(default=100)
interest_rate = attr.ib(default=1.0)
cluster_shift_increment = attr.ib(default=0.01)
cluster_probabilities = attr.ib(
default=lending_params.DELAYED_IMPACT_CLUSTER_PROBS)
################
# Agent params #
################
# Policy the agent uses when setting thresholds.
threshold_policy = attr.ib(default=MAXIMIZE_REWARD)
# Number of steps before applying the threshold policy.
burnin = attr.ib(default=50)
##############
# Run params #
##############
seed = attr.ib(default=27) # Random seed.
num_steps = attr.ib(default=10000) # Number of steps in the experiment.
return_json = attr.ib(default=True) # Return the results as a json string.
include_cumulative_loans = attr.ib(default=False)
def scenario_builder(self):
"""Returns an agent and environment pair."""
env_params = lending_params.DelayedImpactParams(
applicant_distribution=lending_params.two_group_credit_clusters(
cluster_probabilities=self.cluster_probabilities,
group_likelihoods=[self.group_0_prob, 1 - self.group_0_prob]),
bank_starting_cash=self.bank_starting_cash,
interest_rate=self.interest_rate,
cluster_shift_increment=self.cluster_shift_increment,
)
env = lending.DelayedImpactEnv(env_params)
agent_params = classifier_agents.ScoringAgentParams(
feature_keys=['applicant_features'],
group_key='group',
default_action_fn=(lambda: 1),
burnin=self.burnin,
convert_one_hot_to_integer=True,
threshold_policy=self.threshold_policy,
skip_retraining_fn=lambda action, observation: action == 0,
cost_matrix=params.CostMatrix(
fn=0, fp=-1, tp=env_params.interest_rate, tn=0))
agent = oracle_lending_agent.OracleThresholdAgent(
action_space=env.action_space,
reward_fn=rewards.BinarizedScalarDeltaReward(
'bank_cash', baseline=env.initial_params.bank_starting_cash),
observation_space=env.observation_space,
params=agent_params,
env=env)
agent.seed(100)
return env, agent
def run(self):
"""Run a lending experiment.
Returns:
A json encoding of the experiment result.
"""
env, agent = self.scenario_builder()
metrics = {
'initial_credit_distribution':
lending_metrics.CreditDistribution(env, step=0),
'final_credit_distributions':
lending_metrics.CreditDistribution(env, step=-1),
'recall':
error_metrics.RecallMetric(
env,
prediction_fn=lambda x: x.action,
ground_truth_fn=lambda x: not x.state.will_default,
stratify_fn=lambda x: str(x.state.group_id)),
'precision':
error_metrics.PrecisionMetric(
env,
prediction_fn=lambda x: x.action,
ground_truth_fn=lambda x: not x.state.will_default,
stratify_fn=lambda x: str(x.state.group_id)),
'profit rate':
value_tracking_metrics.ValueChange(env, state_var='bank_cash'),
}
if self.include_cumulative_loans:
metrics['cumulative_loans'] = lending_metrics.CumulativeLoans(env)
metrics['cumulative_recall'] = lending_metrics.CumulativeRecall(env)
metric_results = run_util.run_simulation(env, agent, metrics,
self.num_steps, self.seed)
report = {
'environment': {
'name': env.__class__.__name__,
'params': env.initial_params,
'history': env.history,
},
'agent': {
'name': agent.__class__.__name__,
'params': agent.params,
'debug_string': agent.debug_string(),
'threshold_history': agent.group_specific_threshold_history,
'tpr_targets': agent.target_recall_history,
},
'experiment_params': self,
'metric_results': metric_results,
}
if self.return_json:
return core.to_json(report, indent=4)
return report
|
476c98df14752b29cbf3bab01c8af0b604a6ea36
|
32ef8d7b826185e15fa28fe198f35fe1ebb2e0bc
|
/sendgrid/helpers/inbound/config.py
|
06ca683cb60b650599f0eb14a6afd97d52f465ae
|
[
"MIT"
] |
permissive
|
sendgrid/sendgrid-python
|
2165eaa7c0b02bb69143f049252e1303c5752a2c
|
2fe145956a1ee50355f5da8deab401e1e118c736
|
refs/heads/main
| 2023-08-30T18:42:42.884191
| 2023-04-17T08:48:43
| 2023-04-17T08:48:43
| 3,546,794
| 1,470
| 826
|
MIT
| 2023-08-22T23:45:51
| 2012-02-25T19:10:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,133
|
py
|
config.py
|
"""Set up credentials (.env) and application variables (config.yml)"""
import os
import yaml
class Config(object):
"""All configuration for this app is loaded here"""
def __init__(self, **opts):
if os.environ.get('ENV') != 'prod': # We are not in Heroku
self.init_environment()
"""Allow variables assigned in config.yml available the following variables
via properties"""
self.path = opts.get(
'path', os.path.abspath(os.path.dirname(__file__))
)
with open('{0}/config.yml'.format(self.path)) as stream:
config = yaml.load(stream, Loader=yaml.FullLoader)
self._debug_mode = config['debug_mode']
self._endpoint = config['endpoint']
self._host = config['host']
self._keys = config['keys']
self._port = config['port']
@staticmethod
def init_environment():
"""Allow variables assigned in .env available using
os.environ.get('VAR_NAME')"""
base_path = os.path.abspath(os.path.dirname(__file__))
env_path = '{0}/.env'.format(base_path)
if os.path.exists(env_path):
with open(env_path) as f:
lines = f.readlines()
for line in lines:
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
@property
def debug_mode(self):
"""Flask debug mode - set to False in production."""
return self._debug_mode
@property
def endpoint(self):
"""Endpoint to receive Inbound Parse POSTs."""
return self._endpoint
@property
def host(self):
"""URL that the sender will POST to."""
return self._host
@property
def keys(self):
"""Incoming Parse fields to parse. For reference, see
https://sendgrid.com/docs/Classroom/Basics/Inbound_Parse_Webhook/setting_up_the_inbound_parse_webhook.html
"""
return self._keys
@property
def port(self):
"""Port to listen on."""
return self._port
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.