blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ce86505457c7de064a81101b9bf8d69f3f48849a
|
9da4adae4c389e84097a0da9bfce40f9132eef96
|
/pygame_menu/controls.py
|
e18514bb98df1b0deaa8941362ae642418446092
|
[
"MIT"
] |
permissive
|
ppizarror/pygame-menu
|
f8fd2ff3acefad25b07e19499a2dfebd50507403
|
bcfaccbb11d4a6ecba588eec2851932dc46c2337
|
refs/heads/master
| 2023-07-07T10:38:09.651797
| 2023-06-28T18:00:25
| 2023-06-28T18:00:25
| 89,940,842
| 570
| 207
|
NOASSERTION
| 2023-08-19T19:17:59
| 2017-05-01T16:26:50
|
Python
|
UTF-8
|
Python
| false
| false
| 9,166
|
py
|
controls.py
|
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
CONTROLS
Default controls of Menu object and key definition.
"""
__all__ = [
# Joy pad
'JOY_AXIS_X',
'JOY_AXIS_Y',
'JOY_BUTTON_BACK',
'JOY_BUTTON_SELECT',
'JOY_DEADZONE',
'JOY_DELAY',
'JOY_DOWN',
'JOY_LEFT',
'JOY_REPEAT',
'JOY_RIGHT',
'JOY_UP',
# Keyboard events
'KEY_APPLY',
'KEY_BACK',
'KEY_CLOSE_MENU',
'KEY_LEFT',
'KEY_MOVE_DOWN',
'KEY_MOVE_UP',
'KEY_RIGHT',
# Controller object
'Controller'
]
# Imports
# noinspection PyUnresolvedReferences
import pygame_menu
import pygame.locals as _locals
from pygame.event import Event as EventType
from typing import Union
WidgetType = Union['pygame_menu.Menu', 'pygame_menu.widgets.Widget']
# Joy pad
JOY_AXIS_X = 0
JOY_AXIS_Y = 1
JOY_BUTTON_BACK = 1
JOY_BUTTON_SELECT = 0
JOY_DEADZONE = 0.5
JOY_DELAY = 300 # ms
JOY_DOWN = (0, -1)
JOY_LEFT = (-1, 0)
JOY_REPEAT = 100 # ms
JOY_RIGHT = (1, 0)
JOY_UP = (0, 1)
# Keyboard events
KEY_APPLY = _locals.K_RETURN
KEY_BACK = _locals.K_BACKSPACE
KEY_CLOSE_MENU = _locals.K_ESCAPE
KEY_LEFT = _locals.K_LEFT
KEY_MOVE_DOWN = _locals.K_UP
KEY_MOVE_UP = _locals.K_DOWN # Consider keys are "inverted"
KEY_RIGHT = _locals.K_RIGHT
KEY_TAB = _locals.K_TAB
# noinspection PyUnusedLocal
class Controller(object):
"""
Controller class. Accepts any object and provides functions to handle each
event.
"""
joy_delay: int
joy_repeat: int
def __init__(self) -> None:
self.joy_delay = JOY_DELAY
self.joy_repeat = JOY_REPEAT
@staticmethod
def apply(event: EventType, widget: WidgetType) -> bool:
"""
Accepts apply key. Requires ``pygame.KEYDOWN``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.key == KEY_APPLY
@staticmethod
def back(event: EventType, widget: WidgetType) -> bool:
"""
Accepts back key. Requires ``pygame.KEYDOWN``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.key == KEY_BACK
@staticmethod
def close_menu(event: EventType, widget: WidgetType) -> bool:
"""
Accepts close menu key. Requires ``pygame.KEYDOWN``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.key == KEY_CLOSE_MENU
@staticmethod
def delete(event: EventType, widget: WidgetType) -> bool:
"""
Accepts delete key. Requires ``pygame.KEYDOWN``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.key == _locals.K_DELETE
@staticmethod
def end(event: EventType, widget: WidgetType) -> bool:
"""
Accepts end key. Requires ``pygame.KEYDOWN``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.key == _locals.K_END
@staticmethod
def escape(event: EventType, widget: WidgetType) -> bool:
"""
Accepts escape key. Requires ``pygame.KEYDOWN``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.key == _locals.K_ESCAPE
@staticmethod
def home(event: EventType, widget: WidgetType) -> bool:
"""
Accepts home key. Requires ``pygame.KEYDOWN``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.key == _locals.K_HOME
@staticmethod
def joy_axis_x_left(event: EventType, widget: WidgetType) -> bool:
"""
Accepts joy movement on x-axis (left direction). Requires ``pygame.JOYAXISMOTION``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.axis == JOY_AXIS_X and event.value < -JOY_DEADZONE
@staticmethod
def joy_axis_x_right(event: EventType, widget: WidgetType) -> bool:
"""
Accepts joy movement on x-axis (right direction). Requires ``pygame.JOYAXISMOTION``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.axis == JOY_AXIS_X and event.value > JOY_DEADZONE
@staticmethod
def joy_axis_y_down(event: EventType, widget: WidgetType) -> bool:
"""
Accepts joy movement on y-axis (down direction). Requires ``pygame.JOYAXISMOTION``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.axis == JOY_AXIS_Y and event.value > JOY_DEADZONE
@staticmethod
def joy_axis_y_up(event: EventType, widget: WidgetType) -> bool:
"""
Accepts joy movement on y-axis (up direction). Requires ``pygame.JOYAXISMOTION``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.axis == JOY_AXIS_Y and event.value < -JOY_DEADZONE
@staticmethod
def joy_back(event: EventType, widget: WidgetType) -> bool:
"""
Accepts joy back button. Requires ``pygame.JOYBUTTONDOWN``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.button == JOY_BUTTON_BACK
@staticmethod
def joy_down(event: EventType, widget: WidgetType) -> bool:
"""
Accepts joy movement to down direction. Requires ``pygame.JOYHATMOTION``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.value == JOY_DOWN
@staticmethod
def joy_left(event: EventType, widget: WidgetType) -> bool:
"""
Accepts joy movement to left direction. Requires ``pygame.JOYHATMOTION``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.value == JOY_LEFT
@staticmethod
def joy_right(event: EventType, widget: WidgetType) -> bool:
"""
Accepts joy movement to right direction. Requires ``pygame.JOYHATMOTION``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.value == JOY_RIGHT
@staticmethod
def joy_select(event: EventType, widget: WidgetType) -> bool:
"""
Accepts joy select button. Also used for apply(). Requires ``pygame.JOYBUTTONDOWN``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.button == JOY_BUTTON_SELECT
@staticmethod
def joy_up(event: EventType, widget: WidgetType) -> bool:
"""
Accepts joy movement to up direction. Requires ``pygame.JOYHATMOTION``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.value == JOY_UP
@staticmethod
def left(event: EventType, widget: WidgetType) -> bool:
"""
Accepts left key. Requires ``pygame.KEYDOWN``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.key == KEY_LEFT
@staticmethod
def move_down(event: EventType, widget: WidgetType) -> bool:
"""
Accepts move down key. Requires ``pygame.KEYDOWN``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.key == KEY_MOVE_DOWN
@staticmethod
def move_up(event: EventType, widget: WidgetType) -> bool:
"""
Accepts move up key. Requires ``pygame.KEYDOWN``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.key == KEY_MOVE_UP
@staticmethod
def right(event: EventType, widget: WidgetType) -> bool:
"""
Accepts right key. Requires ``pygame.KEYDOWN``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.key == KEY_RIGHT
@staticmethod
def tab(event: EventType, widget: WidgetType) -> bool:
"""
Accepts tab key. Requires ``pygame.KEYDOWN``.
:param event: Event
:param widget: Widget that accepts the event
:return: True if event matches
"""
return event.key == KEY_TAB
|
0bc4e85e7b350872c4a1554d004ebcc70299ce3c
|
5988ce18871f856332b5be0f0e44f1217494d3f7
|
/tests/messages_data/mime_emails/raw_email11.py
|
b1111b42cf3f82c82cd909b7babe8e09817fb747
|
[
"Apache-2.0"
] |
permissive
|
ikvk/imap_tools
|
5c17efc61e6ca4ab5b978cc97b30a9c24fd6d98d
|
5c74f76301bccc5cdd529d23bceaaba43190b973
|
refs/heads/master
| 2023-08-03T18:22:22.805958
| 2023-08-01T05:51:55
| 2023-08-01T05:51:55
| 74,050,445
| 571
| 87
|
Apache-2.0
| 2023-08-01T05:48:34
| 2016-11-17T17:23:11
|
Python
|
UTF-8
|
Python
| false
| false
| 991
|
py
|
raw_email11.py
|
import datetime
from imap_tools import EmailAddress
DATA = dict(
subject='worse when you use them.',
from_='xxxxx@xxxxx',
to=('',),
cc=(),
bcc=(),
reply_to=(),
date=datetime.datetime(2005, 4, 27, 14, 15, 31, tzinfo=datetime.timezone(datetime.timedelta(-1, 61200))),
date_str='Wed, 27 Apr 2005 14:15:31 -0700',
text='\r\nXXXXX Xxxxx\r\n',
html='',
headers={'mime-version': ('1.0 (Apple Message framework v619.2)',), 'to': ('"xxxxx@xxxxx" <matmail>',), 'message-id': ('<416eaebec6d333ec6939eaf8a7d80724@xxxxx>',), 'content-type': ('multipart/alternative;\r\n boundary=Apple-Mail-5-1037861608',), 'from': ('"xxxxx@xxxxx" <xxxxx@xxxxx>',), 'subject': ('worse when you use them.',), 'date': ('Wed, 27 Apr 2005 14:15:31 -0700',)},
attachments=[],
from_values=EmailAddress(name='xxxxx@xxxxx', email='xxxxx@xxxxx'),
to_values=(EmailAddress(name='xxxxx@xxxxx', email=''),),
cc_values=(),
bcc_values=(),
reply_to_values=(),
)
|
41686e3858b6dcb3f4b0e658c85e1b96df8d7060
|
c531778b6b568e5924fcf438dce274067b6e1d31
|
/resources/lib/services/playback/am_section_skipping.py
|
bc649d39f4b5c39872a0ea1df9964949a6046e0f
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
CastagnaIT/plugin.video.netflix
|
a5180fbbaea244a490f750a2dd417b4e7303321a
|
ece10d24449faaccd7d65a4093c6b5679ee0b383
|
refs/heads/master
| 2023-07-01T23:32:20.442923
| 2023-06-27T06:42:18
| 2023-06-27T06:42:18
| 164,314,803
| 2,019
| 456
|
MIT
| 2023-09-13T13:34:06
| 2019-01-06T14:27:56
|
Python
|
UTF-8
|
Python
| false
| false
| 3,009
|
py
|
am_section_skipping.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Skipping of video sections (recap, intro)
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
import xbmc
import resources.lib.common as common
import resources.lib.kodi.ui as ui
from resources.lib.globals import G
from resources.lib.utils.logging import LOG
from .action_manager import ActionManager
from .markers import SKIPPABLE_SECTIONS, get_timeline_markers
class AMSectionSkipper(ActionManager):
"""
Checks if a skippable section has been reached and takes appropriate action
"""
SETTING_ID = 'SectionSkipper_enabled'
def __init__(self):
super().__init__()
self.markers = {}
self.auto_skip = False
self.pause_on_skip = False
def __str__(self):
return f'enabled={self.enabled}, markers={self.markers}, auto_skip={self.auto_skip}, pause_on_skip={self.pause_on_skip}'
def initialize(self, data):
self.markers = get_timeline_markers(data['metadata'][0])
self.auto_skip = G.ADDON.getSettingBool('auto_skip_credits')
self.pause_on_skip = G.ADDON.getSettingBool('pause_on_skip')
def on_tick(self, player_state):
for section in SKIPPABLE_SECTIONS:
self._check_section(section, player_state['elapsed_seconds'])
def _check_section(self, section, elapsed):
if self.markers.get(section) and self.markers[section]['start'] <= elapsed <= self.markers[section]['end']:
self._skip_section(section)
del self.markers[section]
def _skip_section(self, section):
LOG.debug('Entered section {}', section)
if self.auto_skip:
self._auto_skip(section)
else:
self._ask_to_skip(section)
def _auto_skip(self, section):
LOG.info('Auto-skipping {}', section)
player = xbmc.Player()
ui.show_notification(
common.get_local_string(SKIPPABLE_SECTIONS[section]))
if self.pause_on_skip:
player.pause()
xbmc.sleep(1000) # give kodi the chance to execute
player.seekTime(self.markers[section]['end'])
xbmc.sleep(1000) # give kodi the chance to execute
player.pause() # unpause playback at seek position
else:
player.seekTime(self.markers[section]['end'])
def _ask_to_skip(self, section):
LOG.debug('Asking to skip {}', section)
dialog_duration = (self.markers[section]['end'] -
self.markers[section]['start'])
ui.show_skip_dialog(dialog_duration,
seek_time=self.markers[section]['end'],
label=common.get_local_string(SKIPPABLE_SECTIONS[section]))
def on_playback_stopped(self, player_state):
# Close any dialog remaining open
xbmc.executebuiltin('Dialog.Close(all,true)')
|
1b1ffb93520911c63b9eb04f996811f3e37c1c54
|
6f36df6219f8e50374068bb4b3e1a5387c7a2f34
|
/fipy/viewers/matplotlibViewer/matplotlib1DViewer.py
|
3ac220b9bee56221a8017eb9cac34a154949e114
|
[
"NIST-PD"
] |
permissive
|
usnistgov/fipy
|
0a3db715fea452ae710eea3999d9cd42dfe76fe7
|
fdc17193bc293da7511be9021e6d4766757e1966
|
refs/heads/master
| 2023-08-31T21:59:36.611448
| 2023-06-27T16:28:58
| 2023-06-27T16:28:58
| 23,316,495
| 444
| 171
|
NOASSERTION
| 2023-09-06T19:21:19
| 2014-08-25T14:27:58
|
Python
|
UTF-8
|
Python
| false
| false
| 4,425
|
py
|
matplotlib1DViewer.py
|
from __future__ import unicode_literals
from builtins import zip
__docformat__ = 'restructuredtext'
from fipy.viewers.matplotlibViewer.matplotlibViewer import AbstractMatplotlibViewer
__all__ = ["Matplotlib1DViewer"]
from future.utils import text_to_native_str
__all__ = [text_to_native_str(n) for n in __all__]
class Matplotlib1DViewer(AbstractMatplotlibViewer):
"""
Displays a y vs. x plot of one or more 1D `CellVariable` objects using
Matplotlib_.
.. _Matplotlib: http://matplotlib.sourceforge.net/
"""
def __init__(self, vars, title=None, xlog=False, ylog=False, limits={}, legend='upper left', axes=None, **kwlimits):
"""
Parameters
----------
vars : ~fipy.variables.cellVariable.CellVariable or list
`CellVariable` objects to plot
title : str, optional
displayed at the top of the `Viewer` window
xlog : bool
log scaling of x axis if `True`
ylog : bool
log scaling of y axis if `True`
limits : dict
a (deprecated) alternative to limit keyword arguments
xmin, xmax, datamin, datamax : float, optional
displayed range of data. Any limit set to
a (default) value of `None` will autoscale.
(*ymin* and *ymax* are synonyms for *datamin* and *datamax*).
legend : str
place a legend at the specified position, if not `None`
axes : ~matplotlib.axes.Axes
if not `None`, `vars` will be plotted into this
:ref:`Matplotlib` :class:`~matplotlib.axes.Axes` object
"""
kwlimits.update(limits)
AbstractMatplotlibViewer.__init__(self, vars=vars, title=title, axes=axes, **kwlimits)
if xlog and ylog:
self._lines = [self.axes.loglog(*datum) for datum in self._data]
elif xlog:
self._lines = [self.axes.semilogx(*datum) for datum in self._data]
elif ylog:
self._lines = [self.axes.semilogy(*datum) for datum in self._data]
else:
self._lines = [self.axes.plot(*datum) for datum in self._data]
if legend is not None:
self.axes.legend([var.name for var in self.vars], loc=legend)
self.axes.set_xlim(xmin=self._getLimit('xmin'),
xmax=self._getLimit('xmax'))
ymin = self._getLimit(('datamin', 'ymin'))
ymax = self._getLimit(('datamax', 'ymax'))
self.axes.set_ylim(ymin=ymin, ymax=ymax)
if ymax is None or ymin is None:
import warnings
warnings.warn("Matplotlib1DViewer efficiency is improved by setting the 'datamax' and 'datamin' keys", UserWarning, stacklevel=2)
@property
def lines(self):
"""The collection of :ref:`Matplotlib` :class:`~matplotlib.lines.Line2D`
objects representing the plotted data."""
return self._lines
@property
def log(self):
"""Whether data has logarithmic scaling"""
return self.axes.get_yscale() == 'log'
@log.setter
def log(self, value):
ax = self.axes.get_yaxis()
if value:
ax = self.axes.set_yscale('log')
else:
ax = self.axes.set_yscale('linear')
@property
def _data(self):
from fipy.tools.numerix import array
return [[array(var.mesh.cellCenters[0]), array(var)] for var in self.vars]
def _getSuitableVars(self, vars):
vars = [var for var in AbstractMatplotlibViewer._getSuitableVars(self, vars) if var.mesh.dim == 1]
if len(vars) > 1:
vars = [var for var in vars if var.mesh is vars[0].mesh]
if len(vars) == 0:
from fipy.viewers import MeshDimensionError
raise MeshDimensionError("Can only plot 1D data")
return vars
def _plot(self):
ymin, ymax = self._autoscale(vars=self.vars,
datamin=self._getLimit(('datamin', 'ymin')),
datamax=self._getLimit(('datamax', 'ymax')))
self.axes.set_ylim(ymin=ymin, ymax=ymax)
for line, datum in zip(self.lines, self._data):
line[0].set_xdata(datum[0])
line[0].set_ydata(datum[1])
@classmethod
def _doctest_body(cls):
return cls._test1D()
if __name__ == "__main__":
import fipy.tests.doctestPlus
fipy.tests.doctestPlus.execButNoTest()
|
52607d0225b694480190d528c2a3c99555e50a6e
|
c40cde6f5959e9af69a95befec4a238fad7f46f8
|
/src/mici/states.py
|
8f6556ae908ddc4bceaef6e7dbc0250c34a4f918
|
[
"MIT"
] |
permissive
|
matt-graham/mici
|
ab98f5313d6f6c24916aeb8df922dd6ffebf50c0
|
7f5c73e65b858e73ca813d59eaf110fb3f8d25c1
|
refs/heads/main
| 2023-08-21T08:00:25.546739
| 2023-08-16T14:13:29
| 2023-08-16T14:13:29
| 52,494,384
| 161
| 24
|
MIT
| 2023-08-16T14:13:30
| 2016-02-25T03:36:01
|
Python
|
UTF-8
|
Python
| false
| false
| 13,925
|
py
|
states.py
|
"""Objects for recording state of a Markov chain and caching computations."""
from __future__ import annotations
import copy
from collections import Counter
from functools import wraps
from typing import TYPE_CHECKING
from mici.errors import ReadOnlyStateError
if TYPE_CHECKING:
from collections.abc import Iterable
from typing import Any, Callable, Optional
from numpy.typing import ArrayLike
from mici.systems import System
def _cache_key_func(system: System, method: Callable) -> tuple[str, int]:
"""Construct cache key for a given system and method pair."""
if not isinstance(method, str):
method = method.__name__
return (f"{type(system).__name__}.{method}", id(system))
def cache_in_state(
*depends_on: str,
) -> Callable[
[Callable[[System, ChainState], ArrayLike]],
Callable[[System, ChainState], ArrayLike],
]:
"""Memoizing decorator for system methods.
Used to decorate `mici.systems.System` methods which compute a function of one or
more chain state variable(s), with the decorated method caching the value returned
by the method being wrapped in the `ChainState` object to prevent the need for
recomputation on future calls if the state variables the returned value depends on
have not been changed in between the calls.
Additionally for `ChainState` instances initialized with a `_call_counts` argument,
the memoized method will update a counter for the method in the `_call_counts`
attribute every time the method being decorated is called (i.e. when there isn't a
valid cached value available).
Args:
*depends_on: One or more strings corresponding to the names of any state
variables the value returned by the method depends on, e.g. `pos` or `mom`,
such that the cache in the state object is correctly cleared when the value
of any of these variables (attributes) of the state object changes.
"""
def cache_in_state_decorator(method):
@wraps(method)
def wrapper(self, state):
key = _cache_key_func(self, method)
if key not in state._cache:
for dep in depends_on:
state._dependencies[dep].add(key)
if key not in state._cache or state._cache[key] is None:
state._cache[key] = method(self, state)
if state._call_counts is not None:
state._call_counts[key] += 1
return state._cache[key]
return wrapper
return cache_in_state_decorator
def cache_in_state_with_aux(
depends_on: Iterable[str],
auxiliary_outputs: Iterable[str],
) -> Callable[
[Callable[[System, ChainState], ArrayLike]],
Callable[[System, ChainState], ArrayLike],
]:
"""Memoizing decorator for system methods with possible auxiliary outputs.
Used to decorate `System` methods which compute a function of one or more chain
state variable(s), with the decorated method caching the value or values returned by
the method being wrapped in the `ChainState` object to prevent the need for
recomputation on future calls if the state variables the returned value(s) depends
on have not been changed in between the calls.
Compared to the `cache_in_state` decorator, this variant allows for methods which
may optionally also return additional auxiliary outputs, such as intermediate result
computed while computing the primary output, which correspond to the output of
another system method decorated with the `cache_in_state` or
`cache_in_state_with_aux` decorators. If such auxiliary outputs are returned they
are also used to update cache entry for the corresponding decorated method,
potentially saving recomputation in subsequent calls to that method. A common
instance of this pattern is in derivative values computed using automatic
differentiation (AD), with the primal value being differentiated usually either
calculated alongside the derivative (in forward-mode AD) or calculated first in a
forward-pass before the derivatives are calculated in a reverse-pass (in
reverse-mode AD). By caching the value of the primal computed as part of the
derivative calculation, a subsequent call to a method corresponding to calculation
of the primal itself will retrieve the cached value and not recompute the primal,
providing the relevant state variables the primal (and derivative) depend on have
not been changed in between.
Additionally for `ChainState` instances initialized with a `_call_counts` argument,
the memoized method will update a counter for the method in the `_call_counts`
attribute every time the method being decorated is called (i.e. when there isn't a
valid cached value available).
Args:
depends_on: A string or tuple of strings, with each string corresponding to the
name of a state variables the value(s) returned by the method depends on,
e.g. 'pos' or 'mom', such that the cache in the state object is correctly
cleared when the value of any of these variables (attributes) of the state
object changes.
auxiliary_outputs: A string or tuple of strings, with each string defining an
auxiliary output the wrapped method may additionally return in addition to
the primary output. If auxiliary outputs are returned, the returned value
should be a tuple with first entry the 'primary' output corresponding to the
value associated with the name of the method and the subsequent entries in
the tuple corresponding to the auxiliary outputs in the order specified by
the entries in the `auxiliary_outputs` argument. If the primary output is
itself a tuple, it must be wrapped in another tuple even when no auxiliary
outputs are being returned.
"""
if isinstance(depends_on, str):
depends_on = (depends_on,)
if isinstance(auxiliary_outputs, str):
auxiliary_outputs = (auxiliary_outputs,)
def cache_in_state_with_aux_decorator(method):
@wraps(method)
def wrapper(self, state):
prim_key = _cache_key_func(self, method)
keys = [prim_key] + [_cache_key_func(self, a) for a in auxiliary_outputs]
for _i, key in enumerate(keys):
if key not in state._cache:
for dep in depends_on:
state._dependencies[dep].add(key)
if prim_key not in state._cache or state._cache[prim_key] is None:
vals = method(self, state)
if isinstance(vals, tuple):
for k, v in zip(keys, vals):
state._cache[k] = v
else:
state._cache[prim_key] = vals
if state._call_counts is not None:
state._call_counts[prim_key] += 1
return state._cache[prim_key]
return wrapper
return cache_in_state_with_aux_decorator
class ChainState:
"""Markov chain state.
As well as recording the chain state variable values, the state object is also used
to cache derived quantities to avoid recalculation if these values are subsequently
reused.
Additionally for `ChainState` instances initialized with a `_call_counts`
dictionary, any memoized system methods (i.e. those decorated with `cache_in_state`
or `cache_in_state_with_aux`) will update a counter for the method in the state
`_call_counts` dictionary attribute every time the decorated method is called (i.e.
when there isn't a valid cached value available).
"""
def __init__(
self,
*,
_call_counts: Optional[dict[str, int]] = None,
_read_only: bool = False,
_dependencies: Optional[dict[str, set[str]]] = None,
_cache: Optional[dict[str, Any]] = None,
**variables: ArrayLike,
):
"""Create a new `ChainState` instance.
Any keyword arguments passed to the constructor (with names not starting with an
underscore) will be used to set state variable attributes of state object for
example
state = ChainState(pos=pos_val, mom=mom_val, dir=dir_val)
will return a `ChainState` instance `state` with variable attributes
`state.pos`, `state.mom` and `state.dir` with initial values set to `pos_val`,
`mom_val` and `dir_val` respectively.
Keyword arguments with a leading underscore in the name are reserved for
additional arguments to the constructor not corresponding to state variables.
Additionally the name `copy` should not be used as attribute access to this name
will be blocked by the `copy` method.
Args:
**variables: Keyword arguments corresponding to state variables. All names
must not begin with an underscore and no name can be `copy`. See
description above for details.
_call_counts: If a dictionary (or `Counter`) is passed this will be used to
store counts of the number of calls of system methods decorated with
`cache_in_state` or `cache_in_state_with_aux` when called on this state
object and when no cached value for the method is available so that the
wrapped method is called. The `_call_counts` attribute persists between
all copies of a state so will count any decorated method calls on copies
of the state as well - e.g. all copies of a state in a sampled Markov
chain, allowing the `_call_counts` attribute to be used to monitor the
number of method call while sampling a chain.
_read_only: If `True` a `mici.errors.ReadOnlyStateError` exception will be
raised when attempting to set any attributes of the state object after
construction. Defaults to `False`.
_dependencies: Intended for internal use only. If not `None` this should be
a dictionary with string keys corresponding to the state variable names
and values which are sets of strings indicating any dependencies of the
relevant state variable in the cache.
_cache: Intended for internal use only. If not `None` this should be a
dictionary with keys corresponding to unique identifiers for methods
decorated with the `cache_in_state` or `cache_in_state_with_aux`
decorators and values corresponding to cached computed outputs of these
methods or `None` for when a cached output is not available.
"""
# Set attributes by directly writing to __dict__ to ensure set before
# any call to __setattr__
self.__dict__["_variables"] = variables
if _dependencies is None:
_dependencies = {name: set() for name in variables}
self.__dict__["_dependencies"] = _dependencies
if _cache is None:
_cache = {}
self.__dict__["_cache"] = _cache
self.__dict__["_call_counts"] = (
Counter(_call_counts)
if _call_counts is None or not isinstance(_call_counts, Counter)
else _call_counts
)
self.__dict__["_read_only"] = _read_only
def __getattr__(self, name: str) -> ArrayLike:
if name in self._variables:
return self._variables[name]
else:
msg = f"'{type(self).__name__}' object has no attribute '{name}'"
raise AttributeError(msg)
def __setattr__(self, name: str, value: ArrayLike):
if self._read_only:
msg = "ChainState instance is read-only."
raise ReadOnlyStateError(msg)
if name in self._variables:
self._variables[name] = value
# clear any dependent cached values
for dep in self._dependencies[name]:
self._cache[dep] = None
return None
else:
return super().__setattr__(name, value)
def __contains__(self, name: str) -> bool:
return name in self._variables
def copy(self, *, read_only: bool = False) -> ChainState:
"""Create a deep copy of the state object.
Args:
read_only: Whether the state copy should be read-only.
Returns:
A copy of the state object with variable attributes that are independent
copies of the original state object's variables.
"""
return type(self)(
_dependencies=self._dependencies,
_cache=self._cache.copy(),
_call_counts=self._call_counts,
_read_only=read_only,
**{name: copy.copy(val) for name, val in self._variables.items()},
)
def __str__(self) -> str:
return (
"(\n " + ",\n ".join([f"{k}={v}" for k, v in self._variables.items()]) + ")"
)
def __repr__(self) -> str:
return type(self).__name__ + str(self)
def __getstate__(self) -> dict[str, Any]:
return {
"variables": self._variables,
"dependencies": self._dependencies,
# Don't pickle callable cached 'variables' such as derivative
# functions
"cache": {k: v for k, v in self._cache.items() if not callable(v)},
"call_counts": self._call_counts,
"read_only": self._read_only,
}
def __setstate__(self, state: dict[str, Any]):
self.__dict__["_variables"] = state["variables"]
self.__dict__["_dependencies"] = state["dependencies"]
self.__dict__["_cache"] = state["cache"]
self.__dict__["_call_counts"] = state["call_counts"]
self.__dict__["_read_only"] = state["read_only"]
|
9174ce94fe2111e35121abc1912379c23e842705
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/govern/data-meta/OpenMetadata/ingestion/tests/unit/test_credentials.py
|
57940db244678fa5df06b2d33c4b9d9de8ac8fa9
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 3,236
|
py
|
test_credentials.py
|
# Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test Credentials helper module
"""
from unittest import TestCase
from pydantic import SecretStr
from metadata.generated.schema.security.credentials.gcsCredentials import GCSValues
from metadata.utils.credentials import (
InvalidPrivateKeyException,
build_google_credentials_dict,
)
class TestCredentials(TestCase):
"""
Validate credentials handling
"""
def test_build_google_credentials_dict(self):
"""
Check how we can validate GCS values
"""
# Key mocked online
private_key = """-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDMGwM93kIt3D4r4+dWAGdoTboSaZcFLhsG1lvnZlYEpnZoFo1M
ek7laRKDUW3CkdTlSid9p4/RTs9SYKuuXvNKNSLApHUeR2zgKBIHYTGGv1t1bEWc
ohVeqr7w8HkFr9LV4qxgFEWBBd3QYncY/Y1iZgTtbmMiUxJN9vj/kuH0xQIDAQAB
AoGAPDqAY2JRrwy9v9/ZpPQrj4jYLpS//sRTL1pT9l2pZmfkquR0v6ub2nB+CQgf
VnoIE70lGBw5AS+7V/i00JiuO6GP/MWWqxKdc5McjBGYDIb+9gQ/DrryVDHsqgGX
iZrWr7rIrpGsbCB2xt2HPpKR7D9IpI8FA+EEU9fIPfETM6ECQQDv69L78zdijSNk
CYx70dVHqCiDZT5RbkJqDmQwKabIGXBqZLTM+7ZAHotq0EXGc5BvQGyIMso/qIOs
Wq3imi3dAkEA2ci4xEzj5guQcGxoVcxfGm+M/VqXLuw/eW1sYdOp52OwdDywxG+I
6tpm5ByVowhqT8PHDJVOy8GEV9QNw0Y4CQJBAJiyn/rJJlPr/j1aMnZP642KwhY2
pr4PDegQNsXMjKDISBr+82+POMSAbD1UR0RyItgbybe5k62GZB+bKxaRCGUCQEVj
l8MrwH0eeCHp2IBlwnN40VIz1/GiYkL9I0g0GXFZKPKQF74uz1AM0DWkCeVNHBpY
BYaz18xB1znonY33RIkCQQDE3wAWxFrvr582J12qJkE4enmNhRJFdcSREDX54d/5
VEhPQF0i0tUU7Fl071hcYaiQoZx4nIjN+NG6p5QKbl6k
-----END RSA PRIVATE KEY-----"""
gcs_values = GCSValues(
type="my_type",
projectId=["project_id"],
privateKeyId="private_key_id",
privateKey=private_key,
clientEmail="email@mail.com",
clientId="client_id",
clientX509CertUrl="http://localhost:1234",
)
expected_dict = {
"type": "my_type",
"project_id": ["project_id"],
"private_key_id": "private_key_id",
"private_key": private_key,
"client_email": "email@mail.com",
"client_id": "client_id",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "http://localhost:1234",
}
build_google_credentials_dict(gcs_values)
self.assertEqual(expected_dict, build_google_credentials_dict(gcs_values))
gcs_values.privateKey = SecretStr("I don't think I am a proper Private Key")
with self.assertRaises(InvalidPrivateKeyException):
build_google_credentials_dict(gcs_values)
|
fd6921c1dcf7599b010750e63dd8f362fc196057
|
6d162c19c9f1dc1d03f330cad63d0dcde1df082d
|
/util/test/tests/Vulkan/VK_SPIRV_13_Shaders.py
|
ebfc759678ebf6cca81964095180dfdfb5082f2e
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-3.0",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
baldurk/renderdoc
|
24efbb84446a9d443bb9350013f3bfab9e9c5923
|
a214ffcaf38bf5319b2b23d3d014cf3772cda3c6
|
refs/heads/v1.x
| 2023-08-16T21:20:43.886587
| 2023-07-28T22:34:10
| 2023-08-15T09:09:40
| 17,253,131
| 7,729
| 1,358
|
MIT
| 2023-09-13T09:36:53
| 2014-02-27T15:16:30
|
C++
|
UTF-8
|
Python
| false
| false
| 3,571
|
py
|
VK_SPIRV_13_Shaders.py
|
import renderdoc as rd
import rdtest
class VK_SPIRV_13_Shaders(rdtest.TestCase):
demos_test_name = 'VK_SPIRV_13_Shaders'
def check_capture(self):
action = self.find_action("Draw")
self.check(action is not None)
self.controller.SetFrameEvent(action.eventId, False)
pipe: rd.PipeState = self.controller.GetPipelineState()
refl: rd.ShaderReflection = pipe.GetShaderReflection(rd.ShaderStage.Vertex)
disasm: str = self.controller.DisassembleShader(pipe.GetGraphicsPipelineObject(), refl, "")
if (refl.inputSignature[0].varName != 'pos' or refl.inputSignature[0].compCount != 3):
raise rdtest.TestFailureException("Vertex shader input 'pos' not reflected correctly")
if (refl.inputSignature[1].varName != 'col' or refl.inputSignature[1].compCount != 4):
raise rdtest.TestFailureException("Vertex shader input 'col' not reflected correctly")
if (refl.inputSignature[2].varName != 'uv' or refl.inputSignature[2].compCount != 2):
raise rdtest.TestFailureException("Vertex shader input 'uv' not reflected correctly")
if (refl.outputSignature[0].varName != 'opos' or refl.outputSignature[0].compCount != 4 or refl.outputSignature[0].systemValue != rd.ShaderBuiltin.Position):
raise rdtest.TestFailureException("Vertex shader output 'opos' not reflected correctly")
if (refl.outputSignature[1].varName != 'outcol' or refl.outputSignature[1].compCount != 4):
raise rdtest.TestFailureException("Vertex shader output 'outcol' not reflected correctly")
if 'vertmain' not in disasm:
raise rdtest.TestFailureException("Vertex shader disassembly failed, entry point not found")
refl: rd.ShaderReflection = pipe.GetShaderReflection(rd.ShaderStage.Fragment)
disasm: str = self.controller.DisassembleShader(pipe.GetGraphicsPipelineObject(), refl, "")
if (refl.inputSignature[0].varName != 'incol' or refl.inputSignature[0].compCount != 4):
raise rdtest.TestFailureException("Fragment shader input 'incol' not reflected correctly")
if (refl.outputSignature[0].varName != 'ocol' or refl.outputSignature[0].compCount != 4 or refl.outputSignature[0].systemValue != rd.ShaderBuiltin.ColorOutput):
raise rdtest.TestFailureException("Fragment shader output 'ocol' not reflected correctly")
if 'fragmain' not in disasm:
raise rdtest.TestFailureException("Fragment shader disassembly failed, entry point not found")
rdtest.log.success("shader reflection and disassembly as expected")
postvs_data = self.get_postvs(action, rd.MeshDataStage.VSOut, 0, action.numIndices)
postvs_ref = {
0: {
'vtx': 0,
'idx': 0,
'opos': [-0.5, 0.5, 0.0, 1.0],
'outcol': [0.0, 1.0, 0.0, 1.0],
},
1: {
'vtx': 1,
'idx': 1,
'opos': [0.0, -0.5, 0.0, 1.0],
'outcol': [0.0, 1.0, 0.0, 1.0],
},
2: {
'vtx': 2,
'idx': 2,
'opos': [0.5, 0.5, 0.0, 1.0],
'outcol': [0.0, 1.0, 0.0, 1.0],
},
}
self.check_mesh_data(postvs_ref, postvs_data)
rdtest.log.success("vertex output is as expected")
self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, 0.5, 0.5, [0.0, 1.0, 0.0, 1.0])
rdtest.log.success("picked value is as expected")
|
960cfdb22a27013e40efee09cfcb5dfb35ed355e
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/hardware-testing/hardware_testing/examples/__init__.py
|
ce566ba03016d906d0037592a4d232449c3e4122
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906
| 2023-08-31T16:02:45
| 2023-08-31T16:02:45
| 38,644,841
| 326
| 174
|
Apache-2.0
| 2023-09-14T21:47:20
| 2015-07-06T20:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 16
|
py
|
__init__.py
|
"""Examples."""
|
32c439af82b91361896b59dc6e619262b71bcee7
|
b710dc871264de7167c04962e893aea0a352882d
|
/src/aaf2/mobs.py
|
8eb33cd9255d9cd726b1fd959dffc00d28cae950
|
[
"MIT"
] |
permissive
|
markreidvfx/pyaaf2
|
4f632fadfae0eecd7d10a691f8d089ed090169e9
|
23b61437452e8f18fc445d5a8c8b605972fa2349
|
refs/heads/main
| 2023-08-31T20:21:47.906137
| 2023-05-29T20:06:11
| 2023-05-29T20:06:11
| 102,393,631
| 108
| 32
|
MIT
| 2023-08-31T18:35:05
| 2017-09-04T19:07:29
|
C
|
UTF-8
|
Python
| false
| false
| 15,828
|
py
|
mobs.py
|
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
from datetime import datetime
import io
from . import core
from . mobid import MobID
from . utils import register_class, rescale
from . misc import TaggedValueHelper
from . import essence
from . import video
from . import audio
from .rational import AAFRational
from .auid import AUID
from .components import SourceReference
@register_class
class Mob(core.AAFObject):
"""
Base Class for All Mob Objects
"""
class_id = AUID("0d010101-0101-3400-060e-2b3402060101")
__slots__ = ()
def __init__(self, name=None):
super(Mob, self).__init__()
self.name = name or "Mob"
self.mob_id = MobID.new()
now = datetime.now()
self['CreationTime'].value = now
self['LastModified'].value = now
self['Slots'].value = []
@property
def unique_key(self):
return self.mob_id
@property
def name(self):
return self['Name'].value
@name.setter
def name(self, value):
self['Name'].value = value
@property
def mob_id(self):
"""
The unique Mob ID associated with this mob. Get Returns :class:`aaf2.mobid.MobID` Object
"""
return self['MobID'].value
@mob_id.setter
def mob_id(self, value):
self['MobID'].value = value
@property
def usage(self):
return self['UsageCode'].value
@usage.setter
def usage(self, value):
self['UsageCode'].value = value
@property
def comments(self):
return TaggedValueHelper(self['UserComments'])
@property
def slots(self):
return self['Slots']
def slot_at(self, slot_id):
for slot in self.slots:
if slot.slot_id == slot_id:
return slot
raise IndexError("No SlotID: %s" % str(slot_id))
def create_timeline_slot(self, edit_rate, slot_id=None):
slots = [slot.slot_id for slot in self.slots]
slots.sort()
if slot_id is None:
start = 1
if slots and slots[0] == 0:
start = 0
for i, e in enumerate(slots + [None], start):
if i != e:
slot_id = i
elif slot_id in slots:
raise ValueError("slot id: %d already exists" % slot_id)
slot = self.root.create.TimelineMobSlot(slot_id, edit_rate=edit_rate)
self.slots.append(slot)
return slot
def create_empty_sequence_slot(self, edit_rate, slot_id=None, media_kind=None):
"""
Create an empty timeline slot and sets its segment to a new, empty
`aaf2.components.Sequence` component. Timeline slots are for continuous,
monotonically-changing media, like picture and sound.
"""
slot = self.create_timeline_slot(edit_rate, slot_id)
sequence = self.root.create.Sequence(media_kind=media_kind)
sequence['Components'].value = []
slot.segment = sequence
return slot
def create_picture_slot(self, edit_rate=25):
"""
Create an empty timeline slot, with the 'picture' media kind, and sets
its segment to a new, empty `aaf2.components.Sequence` component.
"""
return self.create_empty_sequence_slot(edit_rate, media_kind="picture")
def create_sound_slot(self, edit_rate=25):
"""
Create an empty timeline slot, with the 'sound' media kind, and sets
its segment to a new, empty `aaf2.components.Sequence` component.
"""
return self.create_empty_sequence_slot(edit_rate, media_kind="sound")
def create_source_clip(self, slot_id=None, start=None, length=None, media_kind=None):
"""
Create a SourceClip of Mobs slot with `slot_id`. If no length given the default
length will be the full length of slots segment minus `start`.
Returns :class:`aaf2.components.SourceClip` Object
"""
source_slot = self.slot_at(slot_id)
if not media_kind:
media_kind = source_slot.media_kind
clip = self.root.create.SourceClip(media_kind=media_kind)
clip.mob = self
clip.slot = source_slot
clip.start = start or 0
clip.length = length or max(source_slot.length - clip.start, 0)
return clip
def dependant_mobs(self):
"""
Yields all mobs that this mob is dependant on in depth first order.
"""
visited = set()
stack = [self]
while stack:
mob = stack[-1]
children_processed = True
for obj, _ in mob.walk_references(topdown=True):
if isinstance(obj, SourceReference):
ref_mob = obj.mob
if not ref_mob:
continue
if ref_mob.mob_id not in visited:
stack.append(ref_mob)
children_processed = False
if children_processed:
stack.pop(-1)
if mob.mob_id not in visited:
visited.add(mob.mob_id)
if mob is not self:
yield mob
def __repr__(self):
s = "%s.%s" % (self.__class__.__module__,
self.__class__.__name__)
s += ' "%s" %s' % (self.name or "", str(self.mob_id))
return '<%s at 0x%x>' % (s, id(self))
@register_class
class CompositionMob(Mob):
class_id = AUID("0d010101-0101-3500-060e-2b3402060101")
__slots__ = ()
@register_class
class MasterMob(Mob):
class_id = AUID("0d010101-0101-3600-060e-2b3402060101")
__slots__ = ()
def import_dnxhd_essence(self, path, edit_rate, tape=None, length=None, offline=False):
"""
Import video essence from raw DNxHD/DNxHR stream
"""
# create sourceMob and essencedata
source_mob = self.root.create.SourceMob("%s.PHYS" % self.name)
self.root.content.mobs.append(source_mob)
# import the essencedata
source_slot = source_mob.import_dnxhd_essence(path, edit_rate, tape, length, offline)
# create slot and clip that references source_mob slot
slot = self.create_timeline_slot(edit_rate=edit_rate)
slot.segment = source_mob.create_source_clip(source_slot.slot_id, media_kind='picture')
# set clip length
slot.segment.length = source_slot.segment.length
return slot
def import_audio_essence(self, path, edit_rate=None, tape=None, length=None, offline=False):
"""
Import audio essence from wav file
"""
# create sourceMob and essencedata
source_mob = self.root.create.SourceMob("%s.PHYS" % self.name)
self.root.content.mobs.append(source_mob)
source_slot = source_mob.import_audio_essence(path, edit_rate, tape, length, offline)
# create slot and clip that references source_mob slot
edit_rate = edit_rate or source_slot.edit_rate
slot = self.create_timeline_slot(edit_rate=edit_rate)
slot.segment = source_mob.create_source_clip(source_slot.slot_id, media_kind='sound')
# set clip length
slot.segment.length = source_slot.segment.length
return slot
@register_class
class SourceMob(Mob):
class_id = AUID("0d010101-0101-3700-060e-2b3402060101")
__slots__ = ()
@property
def descriptor(self):
return self['EssenceDescription'].value
@descriptor.setter
def descriptor(self, value):
self['EssenceDescription'].value = value
def create_essence(self, edit_rate=None, media_kind='picture', slot_id=None, offline=False):
# NOTE: appears like a SourceMob can only link to 1 essence and it must be slot 1
slot = self.create_empty_slot(edit_rate=edit_rate, media_kind=media_kind, slot_id=1)
if offline:
return None, slot
essencedata = self.root.create.EssenceData()
essencedata.mob_id = self.mob_id
self.root.content.essencedata.append(essencedata)
return essencedata, slot
def create_empty_slot(self, edit_rate=None, media_kind='picture', slot_id=None):
slot = self.create_timeline_slot(edit_rate, slot_id)
clip = self.root.create.SourceClip(media_kind=media_kind)
slot.segment = clip
return slot
def create_timecode_slot(self, edit_rate, timecode_fps, drop_frame=False, length=None):
timecode_slot = self.create_timeline_slot(edit_rate)
timecode_slot.segment = self.root.create.Timecode(timecode_fps, drop=drop_frame, length=length)
return timecode_slot
def create_tape_slots(self, tape_name, edit_rate, timecode_fps, drop_frame=False, media_kind=None, length=None):
self.name = tape_name
self.descriptor = self.root.create.TapeDescriptor()
slot = self.create_empty_slot(edit_rate, media_kind, slot_id=1)
slot.segment.length = int(float(AAFRational(edit_rate)) * 60 * 60 * 12) # 12 hours
timecode_slot = self.create_timecode_slot(edit_rate, timecode_fps, drop_frame, length)
return slot, timecode_slot
def import_rawvideo_essence(self, path, edit_rate, width, height, pixel_layout, tape=None):
essencedata, slot = self.create_essence(edit_rate, 'picture')
if tape:
slot.segment = tape
# create essence descriptor
descriptor = self.root.create.RGBADescriptor()
self.descriptor = descriptor
alignment = 8192
# set minimal properties
descriptor['SampleRate'].value = edit_rate
descriptor['VideoLineMap'].value = [42, 0] # Not exactly sure what linemap is
descriptor['ContainerFormat'].value = self.root.dictionary.lookup_containerdef("AAF")
descriptor['StoredWidth'].value = width
descriptor['StoredHeight'].value = height
descriptor['ImageAlignmentFactor'].value = alignment
descriptor['PixelLayout'].value = pixel_layout
descriptor['FrameLayout'].value = "FullFrame"
descriptor['ImageAspectRatio'].value = "%d/%d" % (width, height)
raw_frame_size_bits = 0
for layout in pixel_layout:
raw_frame_size_bits += layout.get('Size', 0) * width * height
raw_frame_size = (raw_frame_size_bits + 7) // 8
frame_size = (raw_frame_size + (alignment-1)) // alignment * alignment
descriptor['FrameSampleSize'].value = frame_size
# open essence stream
stream = essencedata.open('w')
length = 0
alignment_padding = bytearray(frame_size - raw_frame_size)
with io.open(path, 'rb', buffering=io.DEFAULT_BUFFER_SIZE) as f:
while True:
data = f.read(raw_frame_size)
if not data:
break
stream.write(data)
if alignment_padding:
stream.write(alignment_padding)
length += 1
descriptor['Length'].value = length
descriptor['ImageSize'].value = length * frame_size
slot.segment.length = length
return slot
def import_dnxhd_essence(self, path, edit_rate, tape=None, length=None, offline=False):
"""
Import video essence from raw DNxHD/DNxHR stream
"""
essencedata, slot = self.create_essence(edit_rate, 'picture', offline=offline)
if tape:
slot.segment = tape
# create essence descriptor
descriptor = self.root.create.CDCIDescriptor()
self.descriptor = descriptor
# set minimal properties
descriptor['SampleRate'].value = edit_rate
descriptor['VideoLineMap'].value = [42, 0] # Not exactly sure what linemap is
descriptor['ContainerFormat'].value = self.root.dictionary.lookup_containerdef("AAF")
dnxhd_codec_auid = AUID("8ef593f6-9521-4344-9ede-b84e8cfdc7da")
descriptor['CodecDefinition'].value = self.root.dictionary.lookup_codecdef(dnxhd_codec_auid)
stream = None
if essencedata is not None:
# open essence stream
stream = essencedata.open('w')
# open input file
with io.open(path, 'rb', buffering=io.DEFAULT_BUFFER_SIZE) as f:
cid = None
for i, packet in enumerate(video.iter_dnx_stream(f), 1):
if cid is None:
(cid, width, height, bitdepth, interlaced) = video.read_dnx_frame_header(packet)
descriptor['StoredWidth'].value = width
descriptor['StoredHeight'].value = height
descriptor['ComponentWidth'].value = bitdepth
descriptor['FrameLayout'].value = 'SeparateFields' if interlaced else 'FullFrame'
descriptor['ImageAspectRatio'].value = "%d/%d" % (width, height)
descriptor['FrameSampleSize'].value = len(packet)
descriptor['Compression'].value = video.dnx_compression_auids[cid]
descriptor['HorizontalSubsampling'].value = 2
if stream is not None:
stream.write(packet)
# set descriptor and component lengths
slot.segment.length = length or i
descriptor.length = i
return slot
def import_audio_essence(self, path, edit_rate=None, tape=None, length=None, offline=False):
"""
Import audio essence from wav file
"""
# read the wav file header
a = audio.WaveReader(path)
sample_rate = a.getframerate()
channels = a.getnchannels()
sample_width = a.getsampwidth()
block_align = a.getblockalign()
frames = a.getnframes()
edit_rate = edit_rate or sample_rate
# create essencedata
essencedata, slot = self.create_essence(edit_rate, 'sound', offline=offline)
if tape:
slot.segment = tape
# create essence descriptor
descriptor = self.root.create.PCMDescriptor()
self.descriptor = descriptor
descriptor['Channels'].value = channels
descriptor['BlockAlign'].value = block_align
descriptor['SampleRate'].value = sample_rate
descriptor['AverageBPS'].value = sample_rate * channels * sample_width
descriptor['QuantizationBits'].value = sample_width * 8
descriptor['AudioSamplingRate'].value = sample_rate
# set lengths
descriptor.length = frames
slot.segment.length = length or int(rescale(frames, sample_rate, edit_rate))
if essencedata is not None:
stream = essencedata.open('w')
while True:
data = a.readframes(sample_rate)
if not data:
break
stream.write(data)
return slot
def export_audio(self, path):
descriptor = self.descriptor
assert isinstance(descriptor, essence.PCMDescriptor)
a = audio.wave.Wave_write(path)
try:
channels = descriptor['Channels'].value
sample_rate = int(float(descriptor['SampleRate'].value))
sample_size = descriptor['QuantizationBits'].value // 8
a.setnchannels(channels)
a.setframerate(sample_rate)
a.setsampwidth(sample_size)
read_size = channels * int(float(sample_rate)) * sample_size
stream = self.essence.open('r')
while True:
data = stream.read(read_size)
if not data:
break
a.writeframesraw(data)
finally:
a.close()
@property
def essence(self):
return self.root.content.essencedata.get(self.mob_id, None)
|
d86842288d3af47329fe2aa998b1ede56dc91f8f
|
94866c11a78c1a999a8e11e2fbe03845e6a56318
|
/src/robomaster/action.py
|
e5f9c706f12ff9341c6509f07fa8c156927ebec8
|
[
"Apache-2.0"
] |
permissive
|
dji-sdk/RoboMaster-SDK
|
f4bc4d6de7a7f2b797b68287221033c0a10794af
|
ff6646e115ab125af3207a4ed3df42cc76c795b2
|
refs/heads/master
| 2023-05-12T09:06:11.705254
| 2022-11-10T03:54:51
| 2022-11-10T03:54:51
| 232,267,793
| 281
| 142
|
Apache-2.0
| 2023-05-08T09:47:07
| 2020-01-07T07:26:10
|
C
|
UTF-8
|
Python
| false
| false
| 14,037
|
py
|
action.py
|
# -*-coding:utf-8-*-
# Copyright (c) 2020 DJI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import re
from . import protocol
from . import logger
__all__ = ['Action', 'ActionDispatcher', 'ACTION_IDLE', 'ACTION_RUNNING', 'ACTION_SUCCEEDED', 'ACTION_FAILED',
'ACTION_STARTED', 'ACTION_ABORTED', 'ACTION_EXCEPTION', 'ACTION_REJECTED']
ACTION_IDLE = 'action_idle'
ACTION_RUNNING = 'action_running'
ACTION_SUCCEEDED = 'action_succeeded'
ACTION_FAILED = 'action_failed'
ACTION_STARTED = 'action_started'
ACTION_ABORTING = 'action_aborting'
ACTION_ABORTED = 'action_aborted'
ACTION_REJECTED = 'action_rejected'
ACTION_EXCEPTION = 'action_exception'
_VALID_STATES = {ACTION_IDLE, ACTION_RUNNING, ACTION_SUCCEEDED, ACTION_FAILED, ACTION_STARTED}
#: string: Abort previous action which in progress and action now.
ACTION_NOW = 'action_now'
#: string: Action after all previous action completed.
ACTION_QUEUE = 'action_queue'
#: string: Action if idle, throw exception if an action is in progress.
ACTION_REQUEST = 'action_request'
_VALID_ACTION_TYPES = {ACTION_NOW, ACTION_QUEUE, ACTION_REQUEST}
RM_SDK_FIRST_ACTION_ID = 1
RM_SDK_LAST_ACTION_ID = 255
registered_actions = {}
class _AutoRegisterAction(type):
'''hepler to automatically register Proto Class whereever they're defined '''
def __new__(mcs, name, bases, attrs, **kw):
return super().__new__(mcs, name, bases, attrs, **kw)
def __init__(cls, name, bases, attrs, **kw):
super().__init__(name, bases, attrs, **kw)
if name == 'Action':
return
key = name
if key in registered_actions.keys():
raise ValueError("Duplicate proto class {0}".format(name))
if attrs['_action_proto_cls'] is None or attrs['_push_proto_cls'] is None:
raise ValueError('action must specific proto cls and push cls')
registered_actions[key] = cls
class Action(metaclass=_AutoRegisterAction):
_action_mutex = threading.Lock()
_next_action_id = RM_SDK_FIRST_ACTION_ID
_action_proto_cls = None
_push_proto_cls = None
_target = protocol.host2byte(0, 0)
def __init__(self, **kw):
super().__init__(**kw)
self._action_id = -1
self._state = ACTION_IDLE
self._failure_reason = None
self._percent = 0
# wait on event.
self._event = threading.Event()
self._obj = None
self._on_state_changed = None
def _get_next_action_id(self):
self.__class__._action_mutex.acquire()
action_id = self.__class__._next_action_id
if self.__class__._next_action_id == RM_SDK_LAST_ACTION_ID:
self.__class__._next_action_id = RM_SDK_FIRST_ACTION_ID
else:
self.__class__._next_action_id = self.__class__._next_action_id + 1
self.__class__._action_mutex.release()
return action_id
def __repr__(self):
return "<action, name:{0} id:{1:d}, state:{2}, percent:{3:d}%>".format(
self.__class__.__name__, self._action_id, self._state, self._percent)
@property
def target(self):
return self._target
@property
def is_running(self):
""" 是否正在运行中。 """
return self._state == ACTION_RUNNING or self._state == ACTION_STARTED
@property
def is_completed(self):
return (self._percent == 100 or self._state is ACTION_SUCCEEDED) or (self._state is ACTION_FAILED) or \
(self.state is ACTION_EXCEPTION) or (self.state is ACTION_REJECTED)
@property
def _is_aborting(self):
""" 是否在取消任务状态中 """
return self._state == ACTION_ABORTING
@property
def has_succeeded(self):
""" 是否已经成功完成 """
return self._state == ACTION_SUCCEEDED
@property
def has_failed(self):
""" 是否已经执行失败 """
return self._state == ACTION_FAILED
@property
def failure_reason(self):
""" 获取执行失败原因 """
return self._failure_reason
@property
def state(self):
""" 返回当前任务动作状态。 """
return self._state
def encode(self):
raise NotImplementedError()
def make_action_key(self):
return self._action_proto_cls._cmdid * 256 + self._action_id
def _update_action_state(self, proto_state):
if proto_state == 0:
self._changeto_state(ACTION_RUNNING)
elif proto_state == 1:
self._changeto_state(ACTION_SUCCEEDED)
elif proto_state == 2:
self._changeto_state(ACTION_FAILED)
elif proto_state == 3:
self._changeto_state(ACTION_STARTED)
def _changeto_state(self, state):
""" 修改action状态 """
if state != self._state:
orgin = self._state
self._state = state
logger.info("Action, name:{0} _changeto_state from {1} "
"to {2}".format(self.__class__.__name__, orgin, self._state))
if self._on_state_changed and self._obj:
self._on_state_changed(self._obj, self, orgin, self._state)
if self.is_completed:
self._event.set()
def wait_for_completed(self, timeout=None):
""" 等待任务动作直到完成
:param timeout: 超时,在timeout前未完成任务动作,直接返回
:return: bool: 动作在指定时间内完成,返回True; 动作超时返回False
"""
if self._event.isSet() and self.is_completed:
return True
if timeout:
self._event.wait(timeout)
if not self._event.isSet():
logger.warning("Action: wait_for_completed timeout.")
self._changeto_state(ACTION_EXCEPTION)
return False
else:
self._event.wait()
if not self._event.isSet():
logger.warning("Action: wait_for_completed timeout.")
self._changeto_state(ACTION_EXCEPTION)
return False
return True
def _abort(self):
""" 取消任务动作 """
self._changeto_state(ACTION_ABORTED)
self._event.set()
def found_proto(self, proto):
if proto.cmdset == self._action_proto_cls._cmdset \
and proto.cmdid == self._action_proto_cls._cmdid:
return True
else:
return False
def found_action(self, proto):
if proto.cmdset == self._push_proto_cls._cmdset \
and proto.cmdid == self._push_proto_cls._cmdid:
return True
else:
return False
def _make_action_key(cmdid, action_id):
return cmdid*256 + action_id
class TextAction(Action):
""" Blocking action in plaintext protocol """
_action_proto_cls = protocol.TextProtoDrone
_push_proto_cls = protocol.TextProtoDronePush
def __init__(self, **kw):
super().__init__(**kw)
self._text_proto = None
def __repr__(self):
return "<action, name:{0}, state:{1}".format(self.__class__.__name__, self._state)
def _update_action_state(self, proto_state):
logger.debug("TextAction: _update_action_state, proto_state {0}".format(proto_state))
if proto_state == 'ok':
self._changeto_state(ACTION_SUCCEEDED)
elif re.match(r'Re\d{4} ok', proto_state):
self._changeto_state(ACTION_SUCCEEDED)
elif proto_state == 'error':
self._changeto_state(ACTION_FAILED)
logger.error("TextAction: action failed ! resp: {0}".format(proto_state))
else:
logger.error("TextAction: action failed ! resp: {0}".format(proto_state))
def make_action_key(self):
return self.target
@property
def text_proto(self):
return self._text_proto
@text_proto.setter
def text_proto(self, text_cmd):
if not text_cmd:
logger.error("TextAction: input command is invalid!")
self._text_proto = text_cmd
def found_proto(self, proto):
return False
def found_action(self, proto):
if proto._action_state == 'ok' or proto._action_state == 'error' or proto._action_state == 'out of range' \
or proto._action_state == "error No valid marker" or re.match(r'Re\d{4} ok', proto._action_state):
return True
else:
return False
class ActionDispatcher(object):
def __init__(self, client=None):
self._client = client
self._in_progress_mutex = threading.Lock()
self._in_progress = {}
def initialize(self):
self._client.add_handler(self, "ActionDispatcher", self._on_recv)
@property
def has_in_progress_actions(self):
""" 是否有正在执行的任务 """
return len(self._in_progress) > 0
@classmethod
def _on_recv(cls, self, msg):
logger.debug("ActionDispatcher: on_recv, in_progress:{0}".format(self._in_progress))
proto = msg.get_proto()
if proto is None:
return
action = None
found_proto = False
found_action = False
self._in_progress_mutex.acquire()
for key in self._in_progress.keys():
action = self._in_progress[key]
if action:
if action.found_proto(proto):
found_proto = True
break
if action.found_action(proto):
found_action = True
break
else:
logger.warning("ActionDispatcher: in_progress action is None")
self._in_progress_mutex.release()
if found_proto:
if proto._retcode == 0:
if proto._accept == 0:
action._changeto_state(ACTION_STARTED)
elif proto._accept == 1:
action._changeto_state(ACTION_REJECTED)
elif proto._accept == 2:
action._changeto_state(ACTION_SUCCEEDED)
else:
action._changeto_state(ACTION_FAILED)
logger.debug("ActionDispatcher, found_proto, action:{0}".format(action))
if found_action:
if isinstance(action, TextAction):
logger.debug("ActionDispatcher, found text action, and will update_from_push action:{0}".format(action))
if action.is_running:
action.update_from_push(proto)
return
if proto._action_id == action._action_id:
logger.debug("ActionDispatcher, found action, and will update_from_push action:{0}".format(action))
if action.is_running:
action.update_from_push(proto)
def get_msg_by_action(self, action):
proto = action.encode()
if isinstance(action, TextAction):
action_msg = protocol.TextMsg(proto)
else:
proto._action_id = action._action_id
action_msg = protocol.Msg(self._client.hostbyte, action.target, proto)
return action_msg
def send_action(self, action, action_type=ACTION_NOW):
""" 发送任务动作命令 """
action._action_id = action._get_next_action_id()
if self.has_in_progress_actions:
self._in_progress_mutex.acquire()
for k in self._in_progress:
act = self._in_progress[k]
if action.target == act.target:
action = list(self._in_progress.values())[0]
logger.error("Robot is already performing {0} action(s) {1}".format(len(self._in_progress), action))
raise Exception("Robot is already performing {0} action(s) {1}".format(
len(self._in_progress), action))
self._in_progress_mutex.release()
if action.is_running:
raise Exception("Action is already running")
action_msg = self.get_msg_by_action(action)
action_key = action.make_action_key()
self._in_progress[action_key] = action
self._client.add_handler(self, "ActionDispatcher", self._on_recv)
action._obj = self
action._on_state_changed = self._on_action_state_changed
self._client.send_msg(action_msg)
if isinstance(action, TextAction):
action._changeto_state(ACTION_STARTED)
logger.info("ActionDispatcher: send_action, action:{0}".format(action))
@classmethod
def _on_action_state_changed(cls, self, action, orgin, target):
if action.is_completed:
action_key = action.make_action_key()
logger.debug("ActionDispatcher, in_progress:{0}".format(self._in_progress))
self._in_progress_mutex.acquire()
if action_key in self._in_progress.keys():
logger.debug("ActionDispatcher, del action:{0}".format(action))
del (self._in_progress[action_key])
else:
logger.warning("ActionDispatcher, del failed, action: {0}".format(action))
self._in_progress_mutex.release()
|
10261c0391e940724f5208e847f254e25f1a7f53
|
c0eeaae1a689a349a86b1e0cf672f7b90fa32886
|
/solara/template/portal/solara_portal/components/__init__.py
|
6ddd1cf1f17620be9d95920cee361c70f4b8650a
|
[
"MIT"
] |
permissive
|
widgetti/solara
|
a624b9e6408b080cb9845c46d9a3dd56da61a1b7
|
baa36623c3eb7db50672d8eb3d3cdab9220a50a6
|
refs/heads/master
| 2023-08-31T06:59:51.637969
| 2023-07-31T19:04:29
| 2023-07-31T19:04:29
| 467,834,772
| 959
| 68
|
MIT
| 2023-09-13T19:33:42
| 2022-03-09T08:12:01
|
Python
|
UTF-8
|
Python
| false
| false
| 70
|
py
|
__init__.py
|
from .header import Header # noqa
from .layout import Layout # noqa
|
cdca5fb8b3637b543cd172ffeefc42f3e188a8b6
|
bec3ad1268b2b45941a2f4277ccd8e2a0860952e
|
/2020/KAPO/Baby_Bubmi/flag.py
|
d041a37a429f5ae57af6d45725b4896e1ff16617
|
[] |
no_license
|
pcw109550/write-up
|
7a5e19e6c52f7831c6c7709331eb84b9a67cfd2c
|
bf6ab22619a107c8eb4011861a3c1ea80ac1c9f8
|
refs/heads/master
| 2023-07-08T18:10:56.780156
| 2023-07-04T17:28:12
| 2023-07-04T17:28:12
| 186,902,436
| 168
| 34
| null | 2023-02-08T05:21:24
| 2019-05-15T21:03:53
|
Sage
|
UTF-8
|
Python
| false
| false
| 32
|
py
|
flag.py
|
flag = b'flag{r341_e1s3nst13n}'
|
e176bdbfe681e1b71715a3e2d7462224f5ff54bd
|
e7e536df0263ae2a7ac44ef30f19110f891213a9
|
/src/pretalx/event/migrations/0018_auto_20190223_1543.py
|
a5dcb4b9ce9d7817618c34575092e86709378a25
|
[
"Apache-2.0"
] |
permissive
|
pretalx/pretalx
|
b3b3808266f4810dfc8445dc1ed33ba398e7a9c2
|
269dce90a6fb1ce0064008c40ce5dd4dad61e2e3
|
refs/heads/main
| 2023-09-05T11:09:23.538325
| 2023-09-04T19:57:47
| 2023-09-04T19:57:47
| 83,081,285
| 563
| 195
|
Apache-2.0
| 2023-09-13T19:12:28
| 2017-02-24T20:46:51
|
Python
|
UTF-8
|
Python
| false
| false
| 631
|
py
|
0018_auto_20190223_1543.py
|
# Generated by Django 2.1.5 on 2019-02-23 15:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("submission", "0031_auto_20190223_0730"),
("event", "0017_auto_20180922_0511"),
]
operations = [
migrations.AddField(
model_name="team",
name="limit_tracks",
field=models.ManyToManyField(blank=True, to="submission.Track"),
),
migrations.AlterField(
model_name="event",
name="timezone",
field=models.CharField(default="UTC", max_length=30),
),
]
|
a90a987994d8ae53726658b7f5b0f558fc4127cf
|
c8d98c2101a2932c4449183c9e8bd6501c57345f
|
/copulae/marginal/__init__.py
|
e649a94086aba077dedf8fea28089a9ce030477e
|
[
"MIT"
] |
permissive
|
DanielBok/copulae
|
a9af8fa88a212a5436226a22d59799d671d78645
|
d48fbd064426605b8784684114844758e3ffc90d
|
refs/heads/master
| 2023-07-08T09:52:31.815899
| 2023-06-14T04:29:39
| 2023-06-14T05:22:31
| 165,516,660
| 131
| 30
|
MIT
| 2023-06-14T05:22:32
| 2019-01-13T14:43:39
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 37
|
py
|
__init__.py
|
from .marginal import MarginalCopula
|
1ab9cdd9c2d1c9991d165d8c4b3cb012f8f58388
|
167c6226bc77c5daaedab007dfdad4377f588ef4
|
/python/ql/src/Security/CVE-2018-1281/BindToAllInterfaces.py
|
e3de8345c6d73ff2d334070952cf4575bf461073
|
[
"MIT"
] |
permissive
|
github/codeql
|
1eebb449a34f774db9e881b52cb8f7a1b1a53612
|
d109637e2d7ab3b819812eb960c05cb31d9d2168
|
refs/heads/main
| 2023-08-20T11:32:39.162059
| 2023-08-18T14:33:32
| 2023-08-18T14:33:32
| 143,040,428
| 5,987
| 1,363
|
MIT
| 2023-09-14T19:36:50
| 2018-07-31T16:35:51
|
CodeQL
|
UTF-8
|
Python
| false
| false
| 373
|
py
|
BindToAllInterfaces.py
|
import socket
# binds to all interfaces, insecure
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('0.0.0.0', 31137))
# binds to all interfaces, insecure
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 4040))
# binds only to a dedicated interface, secure
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('84.68.10.12', 8080))
|
174f2ac89d50ec9e0b9bd52352061dac9c1dc99a
|
725ac5a0bf72829be627bf8dc82fdc51ba0f94ae
|
/Reading_comprehension/BERT_MRC/dataset/args.py
|
99645936e1492645dfa2e08783d2b2fd2e85ae9c
|
[] |
no_license
|
shawroad/NLP_pytorch_project
|
fa14b6e4a156229765e1d552901d0492d8e1def3
|
1272fed2dc8fef78a9ded0f1ae1644d613a3b57b
|
refs/heads/master
| 2023-06-25T02:37:35.503251
| 2023-06-12T10:57:11
| 2023-06-12T10:57:11
| 229,694,655
| 530
| 104
| null | 2020-12-08T09:21:47
| 2019-12-23T06:54:29
|
Python
|
UTF-8
|
Python
| false
| false
| 978
|
py
|
args.py
|
"""
@file : args.py
@author: xiaolu
@time : 2020-03-03
"""
import torch
# 抽取出数据集的位置
search_input_file = "../data/extract/train/search.train.json"
zhidao_input_file = "../data/extract/train/zhidao.train.json"
dev_zhidao_input_file = "../data/extract/dev/zhidao.dev.json"
dev_search_input_file = "../data/extract/dev/search.dev.json"
device = torch.device('cuda: 0' if torch.cuda.is_available() else 'cpu')
seed = 42
max_seq_length = 512
max_query_length = 60
batch_size = 4
num_train_epochs = 4 # 训练多少个epoch
gradient_accumulation_steps = 8 # 梯度累积
# log_step = int(test_lines / batch_size / 4) # 每个epoch验证几次,默认4次
# output_dir = "./model_dir"
# predict_example_files = 'predict.data'
#
# max_para_num = 5 # 选择几篇文档进行预测
# learning_rate = 5e-5
# num_train_optimization_steps = int(test_lines / gradient_accumulation_steps / batch_size) * num_train_epochs
|
75326e2964f1fb034823b4c525d99a4fea97d827
|
f56a1c0d5af368c38bf4d05e0f251dc525c84ba4
|
/pulser-core/pulser/devices/__init__.py
|
3502506d90433aad45a9664a7c71a9af876589a8
|
[
"Apache-2.0"
] |
permissive
|
pasqal-io/Pulser
|
11197eb386057f4fbbd4ed254bd874fb134f5cd3
|
2315989dbfbf0e90a8701e9e7b537f18388b404a
|
refs/heads/develop
| 2023-08-30T13:38:50.553841
| 2023-08-29T13:07:10
| 2023-08-29T13:07:10
| 294,159,001
| 123
| 56
|
Apache-2.0
| 2023-09-14T16:16:56
| 2020-09-09T15:49:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,114
|
py
|
__init__.py
|
# Copyright 2020 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Valid devices for Pulser Sequence execution."""
from __future__ import annotations
from typing import TYPE_CHECKING
from pulser.devices._device_datacls import Device, VirtualDevice
from pulser.devices._devices import AnalogDevice, Chadoq2, IroiseMVP
from pulser.devices._mock_device import MockDevice
# Registers which devices can be used to avoid definition of custom devices
_mock_devices: tuple[VirtualDevice, ...] = (MockDevice,)
_valid_devices: tuple[Device, ...] = (
Chadoq2,
IroiseMVP,
AnalogDevice,
)
|
ba80978786f356d5caecd507759219f808fb6223
|
79cd7118917561ab5b8d25f04143e0975578b74f
|
/pytorch_widedeep/bayesian_models/tabular/bayesian_mlp/_layers.py
|
13eaa91d448d3ec439ab7f5ff271982c407e09c7
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
jrzaurin/pytorch-widedeep
|
aac80263ba8e94d36b41fb1f47181a66471d7594
|
74f1ab6feb2e231fdb8c10478638d9e8d5cf3a47
|
refs/heads/master
| 2023-09-06T06:41:41.800801
| 2023-09-04T15:32:38
| 2023-09-04T15:32:38
| 107,763,164
| 1,036
| 124
|
Apache-2.0
| 2023-09-04T15:32:39
| 2017-10-21T08:11:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,686
|
py
|
_layers.py
|
from torch import nn
from pytorch_widedeep.wdtypes import List, Tensor
from pytorch_widedeep.bayesian_models import bayesian_nn as bnn
from pytorch_widedeep.models._get_activation_fn import get_activation_fn
class BayesianMLP(nn.Module):
def __init__(
self,
d_hidden: List[int],
activation: str,
use_bias: bool = True,
prior_sigma_1: float = 1.0,
prior_sigma_2: float = 0.002,
prior_pi: float = 0.8,
posterior_mu_init: float = 0.0,
posterior_rho_init: float = -7.0,
):
super(BayesianMLP, self).__init__()
self.d_hidden = d_hidden
self.activation = activation
act_fn = get_activation_fn(activation)
self.bayesian_mlp = nn.Sequential()
for i in range(1, len(d_hidden)):
bayesian_dense_layer = nn.Sequential(
*[
bnn.BayesianLinear(
d_hidden[i - 1],
d_hidden[i],
use_bias,
prior_sigma_1,
prior_sigma_2,
prior_pi,
posterior_mu_init,
posterior_rho_init,
),
# The activation of the output neuron(s) will happen
# inside the BayesianTrainer
act_fn if i != len(d_hidden) - 1 else nn.Identity(),
]
)
self.bayesian_mlp.add_module(
"bayesian_dense_layer_{}".format(i - 1), bayesian_dense_layer
)
def forward(self, X: Tensor) -> Tensor:
return self.bayesian_mlp(X)
|
afa5f11b30e58ed1cc6e7cd8c7bcba861848263f
|
d1c2d00078520cd556f60b7213c27856f8b3460d
|
/sdks/python/apache_beam/transforms/display.py
|
b52a8fd5b6dd999cf681d150ba2b9b23bfbbdc61
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-protobuf",
"Apache-2.0",
"Python-2.0"
] |
permissive
|
apache/beam
|
ed11b9e043465c720659eac20ac71b5b171bfa88
|
6d5048e05087ea54abc889ce402ae2a0abb9252b
|
refs/heads/master
| 2023-09-04T07:41:07.002653
| 2023-09-01T23:01:05
| 2023-09-01T23:01:05
| 50,904,245
| 7,061
| 4,522
|
Apache-2.0
| 2023-09-14T21:43:38
| 2016-02-02T08:00:06
|
Java
|
UTF-8
|
Python
| false
| false
| 14,458
|
py
|
display.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
:class:`DisplayData`, its classes, interfaces and methods.
The classes in this module allow users and transform developers to define
static display data to be displayed when a pipeline runs.
:class:`~apache_beam.transforms.ptransform.PTransform` s,
:class:`~apache_beam.transforms.core.DoFn` s
and other pipeline components are subclasses of the :class:`HasDisplayData`
mixin. To add static display data to a component, you can override the
:meth:`HasDisplayData.display_data()` method.
Available classes:
* :class:`HasDisplayData` - Components that inherit from this class can have
static display data shown in the UI.
* :class:`DisplayDataItem` - This class represents static display data
elements.
* :class:`DisplayData` - Internal class that is used to create display data
and communicate it to the API.
"""
# pytype: skip-file
import calendar
import inspect
import json
from datetime import datetime
from datetime import timedelta
from typing import TYPE_CHECKING
from typing import List
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_runner_api_pb2
if TYPE_CHECKING:
from apache_beam.options.pipeline_options import PipelineOptions
__all__ = ['HasDisplayData', 'DisplayDataItem', 'DisplayData']
class HasDisplayData(object):
""" Basic mixin for elements that contain display data.
It implements only the display_data method and a
_get_display_data_namespace method.
"""
def display_data(self):
# type: () -> dict
""" Returns the display data associated to a pipeline component.
It should be reimplemented in pipeline components that wish to have
static display data.
Returns:
Dict[str, Any]: A dictionary containing ``key:value`` pairs.
The value might be an integer, float or string value; a
:class:`DisplayDataItem` for values that have more data
(e.g. short value, label, url); or a :class:`HasDisplayData` instance
that has more display data that should be picked up. For example::
{
'key1': 'string_value',
'key2': 1234,
'key3': 3.14159265,
'key4': DisplayDataItem('apache.org', url='http://apache.org'),
'key5': subComponent
}
"""
return {}
def _get_display_data_namespace(self):
# type: () -> str
return '{}.{}'.format(self.__module__, self.__class__.__name__)
class DisplayData(object):
""" Static display data associated with a pipeline component.
"""
def __init__(
self,
namespace, # type: str
display_data_dict # type: dict
):
# type: (...) -> None
self.namespace = namespace
self.items = [] # type: List[DisplayDataItem]
self._populate_items(display_data_dict)
def _populate_items(self, display_data_dict):
""" Populates the list of display data items.
"""
for key, element in display_data_dict.items():
if isinstance(element, HasDisplayData):
subcomponent_display_data = DisplayData(
element._get_display_data_namespace(), element.display_data())
self.items += subcomponent_display_data.items
continue
if isinstance(element, DisplayDataItem):
if element.should_drop():
continue
element.key = key
element.namespace = self.namespace
self.items.append(element)
continue
# If it's not a HasDisplayData element,
# nor a dictionary, then it's a simple value
self.items.append(
DisplayDataItem(element, namespace=self.namespace, key=key))
def to_proto(self):
# type: (...) -> List[beam_runner_api_pb2.DisplayData]
"""Returns a List of Beam proto representation of Display data."""
def create_payload(dd):
display_data_dict = None
try:
display_data_dict = dd.get_dict()
except ValueError:
# Skip if the display data is invalid.
return None
# We use 'label' or 'key' properties to populate the 'label' attribute of
# 'LabelledPayload'. 'label' is a better choice since it's expected to be
# more human readable but some transforms, sources, etc. may not set a
# 'label' property when configuring DisplayData.
label = (
display_data_dict['label']
if 'label' in display_data_dict else display_data_dict['key'])
value = display_data_dict['value']
if isinstance(value, str):
return beam_runner_api_pb2.LabelledPayload(
label=label,
string_value=value,
key=display_data_dict['key'],
namespace=display_data_dict.get('namespace', ''))
elif isinstance(value, bool):
return beam_runner_api_pb2.LabelledPayload(
label=label,
bool_value=value,
key=display_data_dict['key'],
namespace=display_data_dict.get('namespace', ''))
elif isinstance(value, int):
return beam_runner_api_pb2.LabelledPayload(
label=label,
int_value=value,
key=display_data_dict['key'],
namespace=display_data_dict.get('namespace', ''))
elif isinstance(value, (float, complex)):
return beam_runner_api_pb2.LabelledPayload(
label=label,
double_value=value,
key=display_data_dict['key'],
namespace=display_data_dict.get('namespace', ''))
else:
raise ValueError(
'Unsupported type %s for value of display data %s' %
(type(value), label))
dd_protos = []
for dd in self.items:
dd_proto = create_payload(dd)
if dd_proto:
dd_protos.append(
beam_runner_api_pb2.DisplayData(
urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,
payload=dd_proto.SerializeToString()))
return dd_protos
@classmethod
def create_from_options(cls, pipeline_options):
""" Creates :class:`~apache_beam.transforms.display.DisplayData` from a
:class:`~apache_beam.options.pipeline_options.PipelineOptions` instance.
When creating :class:`~apache_beam.transforms.display.DisplayData`, this
method will convert the value of any item of a non-supported type to its
string representation.
The normal :meth:`.create_from()` method rejects those items.
Returns:
~apache_beam.transforms.display.DisplayData:
A :class:`~apache_beam.transforms.display.DisplayData` instance with
populated items.
Raises:
ValueError: If the **has_display_data** argument is
not an instance of :class:`HasDisplayData`.
"""
from apache_beam.options.pipeline_options import PipelineOptions
if not isinstance(pipeline_options, PipelineOptions):
raise ValueError(
'Element of class {}.{} does not subclass PipelineOptions'.format(
pipeline_options.__module__, pipeline_options.__class__.__name__))
items = {
k: (v if DisplayDataItem._get_value_type(v) is not None else str(v))
for k,
v in pipeline_options.display_data().items()
}
return cls(pipeline_options._get_display_data_namespace(), items)
@classmethod
def create_from(cls, has_display_data):
""" Creates :class:`~apache_beam.transforms.display.DisplayData` from a
:class:`HasDisplayData` instance.
Returns:
~apache_beam.transforms.display.DisplayData:
A :class:`~apache_beam.transforms.display.DisplayData` instance with
populated items.
Raises:
ValueError: If the **has_display_data** argument is
not an instance of :class:`HasDisplayData`.
"""
if not isinstance(has_display_data, HasDisplayData):
raise ValueError(
'Element of class {}.{} does not subclass HasDisplayData'.format(
has_display_data.__module__, has_display_data.__class__.__name__))
return cls(
has_display_data._get_display_data_namespace(),
has_display_data.display_data())
class DisplayDataItem(object):
""" A DisplayDataItem represents a unit of static display data.
Each item is identified by a key and the namespace of the component the
display item belongs to.
"""
typeDict = {
str: 'STRING',
int: 'INTEGER',
float: 'FLOAT',
bool: 'BOOLEAN',
timedelta: 'DURATION',
datetime: 'TIMESTAMP'
}
def __init__(
self,
value,
url=None,
label=None,
namespace=None,
key=None,
shortValue=None):
self.namespace = namespace
self.key = key
self.type = self._get_value_type(value)
self.shortValue = (
shortValue if shortValue is not None else self._get_short_value(
value, self.type))
self.value = value
self.url = url
self.label = label
self._drop_if_none = False
self._drop_if_default = False
def drop_if_none(self):
# type: () -> DisplayDataItem
""" The item should be dropped if its value is None.
Returns:
Returns self.
"""
self._drop_if_none = True
return self
def drop_if_default(self, default):
# type: (...) -> DisplayDataItem
""" The item should be dropped if its value is equal to its default.
Returns:
Returns self.
"""
self._default = default
self._drop_if_default = True
return self
def should_drop(self):
# type: () -> bool
""" Return True if the item should be dropped, or False if it should not
be dropped. This depends on the drop_if_none, and drop_if_default calls.
Returns:
True or False; depending on whether the item should be dropped or kept.
"""
if self._drop_if_none and self.value is None:
return True
if self._drop_if_default and self.value == self._default:
return True
return False
def is_valid(self):
# type: () -> None
""" Checks that all the necessary fields of the :class:`DisplayDataItem`
are filled in. It checks that neither key, namespace, value or type are
:data:`None`.
Raises:
ValueError: If the item does not have a key, namespace,
value or type.
"""
if self.key is None:
raise ValueError(
'Invalid DisplayDataItem %s. Key must not be None.' % self)
if self.namespace is None:
raise ValueError(
'Invalid DisplayDataItem %s. Namespace must not be None' % self)
if self.value is None:
raise ValueError(
'Invalid DisplayDataItem %s. Value must not be None' % self)
if self.type is None:
raise ValueError(
'Invalid DisplayDataItem. Value {} is of an unsupported type.'.format(
self.value))
def _get_dict(self):
res = {
'key': self.key,
'namespace': self.namespace,
'type': self.type if self.type != 'CLASS' else 'STRING'
}
# TODO: Python Class types should not be special-cased once
# the Fn API is in.
if self.url is not None:
res['url'] = self.url
if self.shortValue is not None:
res['shortValue'] = self.shortValue
if self.label is not None:
res['label'] = self.label
res['value'] = self._format_value(self.value, self.type)
return res
def get_dict(self):
# type: () -> dict
""" Returns the internal-API dictionary representing the
:class:`DisplayDataItem`.
Returns:
Dict[str, Any]: A dictionary. The internal-API dictionary representing
the :class:`DisplayDataItem`.
Raises:
ValueError: if the item is not valid.
"""
self.is_valid()
return self._get_dict()
def __repr__(self):
return 'DisplayDataItem({})'.format(json.dumps(self._get_dict()))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._get_dict() == other._get_dict()
return False
def __hash__(self):
return hash(tuple(sorted(self._get_dict().items())))
@classmethod
def _format_value(cls, value, type_):
""" Returns the API representation of a value given its type.
Args:
value: The value of the item that needs to be shortened.
type_(string): The type of the value.
Returns:
A formatted value in the form of a float, int, or string.
"""
res = value
if type_ == 'CLASS':
res = '{}.{}'.format(value.__module__, value.__name__)
elif type_ == 'DURATION':
res = value.total_seconds() * 1000
elif type_ == 'TIMESTAMP':
res = calendar.timegm(
value.timetuple()) * 1000 + value.microsecond // 1000
return res
@classmethod
def _get_short_value(cls, value, type_):
""" Calculates the short value for an item.
Args:
value: The value of the item that needs to be shortened.
type_(string): The type of the value.
Returns:
The unqualified name of a class if type_ is 'CLASS'. None otherwise.
"""
if type_ == 'CLASS':
return value.__name__
return None
@classmethod
def _get_value_type(cls, value):
""" Infers the type of a given value.
Args:
value: The value whose type needs to be inferred. For 'DURATION' and
'TIMESTAMP', the corresponding Python type is datetime.timedelta and
datetime.datetime respectively. For Python classes, the API type is
just 'STRING' at the moment.
Returns:
One of 'STRING', 'INTEGER', 'FLOAT', 'CLASS', 'DURATION', or
'TIMESTAMP', depending on the type of the value.
"""
#TODO: Fix Args: documentation once the Python classes handling has changed
type_ = cls.typeDict.get(type(value))
if type_ is None:
type_ = 'CLASS' if inspect.isclass(value) else None
if type_ is None and value is None:
type_ = 'STRING'
return type_
|
ed2b3b3d3ff7a018a71359322ae050c4bf03c7dc
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/metricsadvisor/azure-ai-metricsadvisor/samples/sample_feedback.py
|
ea6b4b9499626bde18c21cd5ff31449f40bb083c
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 5,053
|
py
|
sample_feedback.py
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_feedback.py
DESCRIPTION:
This sample demonstrates feedback operations.
USAGE:
python sample_feedback.py
Set the environment variables with your own values before running the sample:
1) METRICS_ADVISOR_ENDPOINT - the endpoint of your Azure Metrics Advisor service
2) METRICS_ADVISOR_SUBSCRIPTION_KEY - Metrics Advisor service subscription key
3) METRICS_ADVISOR_API_KEY - Metrics Advisor service API key
4) METRICS_ADVISOR_METRIC_ID - the ID of an metric from an existing data feed
5) METRICS_ADVISOR_FEEDBACK_ID - the ID of an existing feedback
"""
import os
import datetime
def sample_add_feedback():
# [START add_feedback]
from azure.ai.metricsadvisor import MetricsAdvisorKeyCredential, MetricsAdvisorClient
from azure.ai.metricsadvisor.models import AnomalyFeedback, ChangePointFeedback, CommentFeedback, PeriodFeedback
service_endpoint = os.getenv("METRICS_ADVISOR_ENDPOINT")
subscription_key = os.getenv("METRICS_ADVISOR_SUBSCRIPTION_KEY")
api_key = os.getenv("METRICS_ADVISOR_API_KEY")
metric_id = os.getenv("METRICS_ADVISOR_METRIC_ID")
client = MetricsAdvisorClient(service_endpoint,
MetricsAdvisorKeyCredential(subscription_key, api_key))
anomaly_feedback = AnomalyFeedback(metric_id=metric_id,
dimension_key={"category": "Shoes Handbags & Sunglasses"},
start_time=datetime.datetime(2021, 8, 5),
end_time=datetime.datetime(2021, 8, 7),
value="NotAnomaly")
client.add_feedback(anomaly_feedback)
change_point_feedback = ChangePointFeedback(metric_id=metric_id,
dimension_key={"category": "Shoes Handbags & Sunglasses"},
start_time=datetime.datetime(2021, 8, 5),
end_time=datetime.datetime(2021, 8, 7),
value="NotChangePoint")
client.add_feedback(change_point_feedback)
comment_feedback = CommentFeedback(metric_id=metric_id,
dimension_key={"category": "Shoes Handbags & Sunglasses"},
start_time=datetime.datetime(2021, 8, 5),
end_time=datetime.datetime(2021, 8, 7),
value="comment")
client.add_feedback(comment_feedback)
period_feedback = PeriodFeedback(metric_id=metric_id,
dimension_key={"category": "Shoes Handbags & Sunglasses"},
start_time=datetime.datetime(2021, 8, 5),
end_time=datetime.datetime(2021, 8, 7),
period_type="AssignValue",
value=2)
client.add_feedback(period_feedback)
# [END add_feedback]
def sample_get_feedback():
# [START get_feedback]
from azure.ai.metricsadvisor import MetricsAdvisorKeyCredential, MetricsAdvisorClient
service_endpoint = os.getenv("METRICS_ADVISOR_ENDPOINT")
subscription_key = os.getenv("METRICS_ADVISOR_SUBSCRIPTION_KEY")
api_key = os.getenv("METRICS_ADVISOR_API_KEY")
feedback_id = os.getenv("METRICS_ADVISOR_FEEDBACK_ID")
client = MetricsAdvisorClient(service_endpoint,
MetricsAdvisorKeyCredential(subscription_key, api_key))
result = client.get_feedback(feedback_id=feedback_id)
print("Type: {}; Id: {}".format(result.feedback_type, result.id))
# [END get_feedback]
def sample_list_feedback():
# [START list_feedback]
from azure.ai.metricsadvisor import MetricsAdvisorKeyCredential, MetricsAdvisorClient
service_endpoint = os.getenv("METRICS_ADVISOR_ENDPOINT")
subscription_key = os.getenv("METRICS_ADVISOR_SUBSCRIPTION_KEY")
api_key = os.getenv("METRICS_ADVISOR_API_KEY")
metric_id = os.getenv("METRICS_ADVISOR_METRIC_ID")
client = MetricsAdvisorClient(service_endpoint,
MetricsAdvisorKeyCredential(subscription_key, api_key))
results = client.list_feedback(metric_id=metric_id)
for result in results:
print("Type: {}; Id: {}".format(result.feedback_type, result.id))
# [END list_feedback]
if __name__ == '__main__':
print("---Creating feedback...")
sample_add_feedback()
print("Feedback successfully created...")
print("\n---Get a feedback...")
sample_get_feedback()
print("\n---List feedbacks...")
sample_list_feedback()
|
c318026c61546cf85182e7480124c9f2f953d332
|
99833651e4a6a0bc1221d577d9fc43b8568abedd
|
/nltk_contrib/tiger/query/tsqlparser.py
|
6bb73d6825b63ce3bc5a0b8e6bbd5b6df2acac58
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
nltk/nltk_contrib
|
689e2683aa01b120c7473b9a4fc50bc49f014390
|
95d1806e2f4e89e960b76a685b1fba2eaa7d5142
|
refs/heads/master
| 2023-07-31T13:32:47.358897
| 2022-11-21T18:49:33
| 2022-11-21T18:49:33
| 2,530,774
| 145
| 127
|
NOASSERTION
| 2022-11-21T18:49:34
| 2011-10-07T05:59:13
|
Python
|
UTF-8
|
Python
| false
| false
| 16,332
|
py
|
tsqlparser.py
|
# -*- coding: utf-8 -*-
# Copyright © 2007-2008 Stockholm TreeAligner Project
# Author: Torsten Marek <shlomme@gmx.net>
# Licensed under the GNU GPLv2
"""Defines the grammar and parser class for the TigerSearch query language.
For a complete description, see the `TigerSearch Manual`_.
The grammar will create an abstract syntax tree from a TigerSearch query.
Please see the unit test and the `nltk_contrib.tiger.query.ast` module for
the nodes and the structure of the ASTs.
If applicable, all fragments contain information about their respective AST node type,
some example strings and the named results introduced by the fragment in question. This
is not the full list of named results returned by the fragment, which can be obtained
by examining which fragments are used in the expression builder in question.
The grammar does not check semantic correctness of the query, e.g. :
* valid predicate names
* correct parameter types for predicates, as long as they are a node operand or
an integer literal.
* type-correctness for variable references
* conflicts in feature constraints (e.g. ``[cat="NP"&"NN"]``)
* conflicts in relations (e.g. ``#a > #b & #b > #a``)
* nonsensical distance modifiers (e.g. ``#a >5,1 #b``)
All these checks are done by the query builder and evaluator functions.
The only normalization done in the parser is that ``cat!="NE"``
is turned into ``cat=!"NE"``.
.. _`TigerSearch Manual` : http://tinyurl.com/2jm24u
"""
import pyparsing
from nltk_contrib.tiger.graph import NodeType
from nltk_contrib.tiger.query import ast
from nltk_contrib.tiger.query.exceptions import TigerSyntaxError
__all__ = ["TsqlParser"]
# enable memoizing support in parser (speeds up parsing)
pyparsing.ParserElement.enablePackrat()
# convenience functions
NUMBER = pyparsing.Word(pyparsing.nums)
WORD = pyparsing.Word(pyparsing.alphas)
def single_value_holder(cls, conv = lambda n: n):
"""Creates a parse action that invokes `cls` with the first parse result.
`conv` can be used to convert the parse result before invoking `cls`.
"""
return lambda s, l, t: cls(conv(t[0]))
def suppressed_literal(s):
"""Creates a suppressed literal string `s`."""
return pyparsing.Literal(s).suppress()
def boolean_expr(atom):
"""Creates a boolean expression grammar out of an expression `atom`.
A boolean expression can contain the following operators, ordered by binding power:
* negation: ``!term``
* conjunction: ``term & term``
* disjunction: ``term | term``
and can have parentheses for grouping.
"""
ops = [
(suppressed_literal(u"!"), 1, pyparsing.opAssoc.RIGHT,
lambda s, l, t: ast.Negation(t[0][0])),
(suppressed_literal(u"&"), 2, pyparsing.opAssoc.LEFT,
lambda s, l, t: ast.Conjunction(t.asList()[0])),
(suppressed_literal(u"|"), 2, pyparsing.opAssoc.LEFT,
lambda s, l, t: ast.Disjunction(t.asList()[0]))]
return pyparsing.operatorPrecedence(atom, ops)
def surround(left, expr, right):
"""Circumfixes the expression `expr` with `left` and `right`.
Both `left` and `right` will be turned into suppressed literals.
*Parameters*:
`left`
the left part of the circumfix
`expr`
the grammar expression to be circumfixed
`right`
the right part of the circumfix
"""
return suppressed_literal(left) + expr + suppressed_literal(right)
def integer_literal():
"""Defines an expression for an integer literals.
:AST Node: `IntegerLiteral`
:Example: ``12345``
"""
return NUMBER.setParseAction(single_value_holder(ast.IntegerLiteral, int))
def string_literal():
"""Defines an expression for string literals.
A string literal can be enclosed in single (') or double (") quotes and can contain
escaped characters using "\\".
:AST Node: `StringLiteral`
:Example: ``"word"``
"""
string = (pyparsing.QuotedString("'", escChar = "\\") |
pyparsing.QuotedString('"', escChar = "\\"))
return string.setParseAction(single_value_holder(ast.StringLiteral))
def regex_literal():
""""Defines an expression for regular expression literals.
:AST Node: `RegexLiteral`
:Example: ``/a+b+/``
"""
regex = pyparsing.QuotedString("/")
return regex.setParseAction(single_value_holder(ast.RegexLiteral))
def variable_name(type_prefixes):
"""Defines a reusable expression for all variable names.
A variable name can only contain ASCII alphanumeric characters and the
underscore character, and must start with one of the characters listed in
in the dictionary `type_prefixes` (by default only "#"). The value in
`type_prefixes` determines the container type of the variable.
:Named Results:
- `varname`: the variable name
"""
assert all(len(pfx) == 1 for pfx in type_prefixes), "prefix list may only contain characters"
v_expr = pyparsing.Combine(pyparsing.oneOf(type_prefixes.keys()) +
pyparsing.Word(pyparsing.alphanums + "_")).setResultsName("varname")
v_expr.type_map = type_prefixes
return v_expr
def variable_reference(variable_expr, variable_type):
"""Defines an expression for variable references of type `variable_type`.
See `nltk_contrib.tiger.query.ast.VariableTypes` for the list of variable types.
:AST Node: `VariableReference`
:Example: ``#a``
"""
return variable_expr.setParseAction(
lambda s, l, t: ast.VariableReference(ast.Variable(
t.varname, variable_type, variable_expr.type_map[t.varname[0]])))
def variable_definition(variable_expr, variable_type, right_hand):
"""Defines an expression for variable definitions of type `variable_type`.
The referent expression is `right_hand`, and `variable_expr` contains the expression
for variable names.
:AST Node: `Variable`
:Example: ``#a:...``
:Named Results:
- `expr`: the right-hand side of the definition
"""
definition = (variable_expr + suppressed_literal(u":") +
right_hand.setResultsName("expr"))
return definition.setParseAction(
lambda s, l, t: ast.VariableDefinition(
ast.Variable(t.varname, variable_type, variable_expr.type_map[t.varname[0]]), t.expr))
def feature_value():
"""Defines an expression for the right hand side in feature constraints.
A feature constraint can be a boolean expression of string and regex literals.
:Example: ``"NE"``, ``"NE"|"NN"``, ``/h.*/ & !"haus"``
"""
return boolean_expr(string_literal() | regex_literal())
FEATURE_VALUE = feature_value()
VAR_PREFIXES = {"#": ast.ContainerTypes.Single}
def feature_record():
"""Defines an expression for feature records.
Valid feature records:
* ``T``: all terminals
* ``NT``: all nonterminals
:AST Node: `FeatureRecord:
:Example: ``T``, ``NT``
"""
return pyparsing.oneOf("T NT").setParseAction(
single_value_holder(ast.FeatureRecord, lambda v: NodeType.fromkey(v[0])))
def feature_constraint():
"""Defines a boolean expression for feature constraint.
A feature constraint is a feature name, a match operator and a
feature value expression.
Match operators:
* ``=`?` (equality)
* ``!=`` (inequality)
If the match operator is `!=`, the feature value expression will be wrapped inside
a `Negation` AST node.
:AST Node: `FeatureConstraint`
:Example: ``cat="NP"``, ``pos!=/N+/``, ``word="safe" & pos="NN"``
"""
op = pyparsing.oneOf(u"= !=")
v = FEATURE_VALUE
constraint = (WORD + op + v)
constraint.setParseAction(lambda s, l, t: ast.FeatureConstraint(t[0], t[2]) if t[1] == "="
else ast.FeatureConstraint(t[0], ast.Negation(t[2])))
return boolean_expr(constraint | feature_record())
FEATURE_CONSTRAINT = feature_constraint()
def node_description():
"""Defines an expression for node descriptions.
Node descriptions can either be a boolean expression of list of constraints, a
feature constraint variable definition or reference or a feature record.
:AST Node: `NodeDescription`
:Example: ``[pos="PREP" & word=("vor"|"vorm")]``, ``[T]``, ``[#a:(word = "safe")]``, ``[#b]``
"""
node_desc = surround(u"[", FEATURE_CONSTRAINT, u"]")
return node_desc.setParseAction(single_value_holder(ast.NodeDescription))
NODE_DESCRIPTION = node_description()
NODE_VAR_PREFIXES = {"#": ast.ContainerTypes.Single,
"%": ast.ContainerTypes.Set }
def node_variable_def():
"""Defines an expression for node variable definitions.
Node variables have the type `VariableTypes.NodeIdentifier`
:AST Node: `VariableReference`
:Example: ``#n1:[pos = "PREP" & word = ("vor", "vorm")]``
"""
return variable_definition(variable_name(NODE_VAR_PREFIXES),
ast.VariableTypes.NodeIdentifier, NODE_DESCRIPTION)
def node_variable_ref():
"""Defines an expression for node variable references.
Node variables have the type `VariableTypes.NodeIdentifier`
:AST Node: `VariableReference`
:Example: ``#n1``
"""
return variable_reference(variable_name(NODE_VAR_PREFIXES), ast.VariableTypes.NodeIdentifier)
def node_operand():
"""Defines an expression for node operands in predicate functions or constraints.
An operand can be a node variable definition, reference or a node description itself.
"""
return (node_variable_def() | node_variable_ref() | NODE_DESCRIPTION)
NODE_OPERAND = node_operand()
class ConstraintModifiers(object):
"""A class that contains all possible constraint modifiers.
Each modifier `MOD` is stored as a named result `res`.
Modifiers:
`NEGATION` : `negated`
the ! symbol before a constraint operator
`TRANSITIVE` : `indirect`
the * symbol after an operator
`DISTANCE` : `mindist` and `maxdist`
a single or a pair of integers after an operator
`EDGE_LABEL` : `label`
a string after a dominance operator
"""
NEGATION = pyparsing.Optional(pyparsing.Literal("!")).setResultsName("negated")
TRANSITIVE = pyparsing.Literal("*").setResultsName("indirect")
DISTANCE = (NUMBER("mindist") + pyparsing.Optional(suppressed_literal(",") + NUMBER("maxdist")))
EDGE_LABEL = WORD("label")
def operator_symbol(s, ast_node_class):
"""Defines an operator symbol `s`.
:Named Results:
- `op_class`: the AST class of the operator
"""
return pyparsing.Literal(s).setResultsName("op_class")\
.setParseAction(lambda s, l, t: ast_node_class)
def dominance_operator():
"""Defines an expression for dominance operators.
All variantes of the dominance operator can be negated.
:AST Node: `DominanceOperator`
:Example: ``>``, ``>*``, ``>L``, ``>n``, ``>n,m``
"""
return (ConstraintModifiers.NEGATION +
operator_symbol(">", ast.DominanceOperator) +
pyparsing.Optional(ConstraintModifiers.EDGE_LABEL |
ConstraintModifiers.TRANSITIVE |
ConstraintModifiers.DISTANCE))
def corner_operator():
"""Defines an expression for left- and rightmost terminal successors (corners).
All variants of the corner dominance operator can be negated.
:AST Node: `CornerDominance`
:Example: ``>@l``, ``>@r``
:Named Results:
- `corner`: the corner, either ``l`` or ``r``
"""
return (ConstraintModifiers.NEGATION +
operator_symbol(">@", ast.CornerOperator) +
pyparsing.oneOf("l r").setResultsName("corner"))
def precedence_operator():
"""Defines an expression for precendence operators.
All variants can be negated.
:AST Node: `PrecedenceOperator`
:Example: ``.``, ``.*``, ``.n``, ``.n,m``
"""
return (ConstraintModifiers.NEGATION +
operator_symbol(".", ast.PrecedenceOperator) +
pyparsing.Optional(ConstraintModifiers.TRANSITIVE |
ConstraintModifiers.DISTANCE))
def sec_edge_operator():
"""Defines an expression for secondary edge dominance operators.
All variants can be negated.
:AST Node: `SecEdgeOperator`
:Example: ``>~``, ``>~L``
"""
return (ConstraintModifiers.NEGATION +
operator_symbol(">~", ast.SecEdgeOperator) +
pyparsing.Optional(ConstraintModifiers.EDGE_LABEL))
def sibling_operator():
"""Defines an expression for sibling operators.
The ``$.*`` operator for siblings with precendence cannot be negated.
:AST Node: `SiblingOperator`
:Example: ``$.*``, ``$``
"""
return ((operator_symbol("$", ast.SiblingOperator) +
pyparsing.Optional(pyparsing.Literal(".*").setResultsName("ordered"))) |
(ConstraintModifiers.NEGATION + operator_symbol("$", ast.SiblingOperator)))
def node_relation_constraint():
"""Defines an expression for node relation constraints.
Please see the documentation of the operator AST nodes.
:Example: ``[cat="NP"] > #a``
:Named Results:
- `leftOperand`: the node operand on the left side
- `operator` the operator symbol, without modifiers
- `rightOperand` the node operand on the right side
"""
constraint_op = pyparsing.Group(sec_edge_operator() | corner_operator() | dominance_operator() |
precedence_operator() | sibling_operator())
constraint = (NODE_OPERAND.setResultsName("leftOperand") +
constraint_op.setResultsName("operator") +
NODE_OPERAND.setResultsName("rightOperand"))
return constraint.setParseAction(lambda s, l, t: t.operator.op_class.create(t))
def node_predicate():
"""Defines an expression for node predicates.
A node predicate is a function name and a parenthesized list of node operands or
integer literals. The list of supported predicates is not part of the parser.
:AST Node: `Predicate`
:Example: ``root(#a)``, ``arity([cat="NP", 5)``
:Named Results:
- `pred`: the name of the predicate
- `args`: the list of arguments, either node operands or integer literals
"""
arg = (NODE_OPERAND | integer_literal()).setResultsName("args", listAllMatches = True)
identifier = WORD("pred")
return (identifier + surround(u"(", pyparsing.delimitedList(arg), u")")
).setParseAction(lambda s, l, t: ast.Predicate(t.pred, t.args.asList()))
# the complete query language
def tsql_grammar():
"""Defines the expression for the complete TigerSearch query language.
A query term is either a node operand, a node relation constraint or node predicate. An
expression can be a single term or a conjunction of terms.
Toplevel disjunctions are not currently not supported, toplevel disjunction is not supported,
because it can always be represented by negations in the relations and node descriptions.
The returned expression must match the whole input string.
:AST Node: `TsqlExpression`
:Example: ``#a:[cat="NP"] & root(#a) and #a > [word="safe"]``
"""
atom = (node_predicate() | node_relation_constraint() | NODE_OPERAND)
expr = pyparsing.Group(atom + pyparsing.OneOrMore(suppressed_literal(u"&") + atom)
).setParseAction(lambda s, l, t: ast.Conjunction(t.asList()[0])) | atom
expr.setParseAction(single_value_holder(ast.TsqlExpression))
return expr + pyparsing.StringEnd()
class TsqlParser(object):
"""A simple façade for the PyParsing TSQL grammar."""
def __init__(self):
self._g = tsql_grammar()
def parse_query(self, query_string):
"""Parses a query string and returns the AST.
If the string cannot be parsed, a `TigerSyntaxError` will be raised.
"""
try:
return self._g.parseString(query_string)[0]
except pyparsing.ParseException, e:
raise TigerSyntaxError, e
|
e51f4c33a3da22e7acc17b393f3146f8a93853be
|
14efffc101321421dea2bc696e931738c1dff8c0
|
/test/e2e/toggle-scaledown.py
|
d23acd2e0cdf41e348587e6808e1d39326b2a604
|
[
"MIT"
] |
permissive
|
zalando-incubator/kubernetes-on-aws
|
c85bb2f62ac0f754b0db034866cc68759e7a431d
|
5904517d74b78401e453b58590539824d1a0407f
|
refs/heads/dev
| 2023-09-04T23:00:29.601571
| 2023-09-04T16:48:40
| 2023-09-04T16:48:40
| 70,882,616
| 642
| 175
|
MIT
| 2023-09-14T16:24:35
| 2016-10-14T06:55:28
|
Go
|
UTF-8
|
Python
| false
| false
| 1,796
|
py
|
toggle-scaledown.py
|
#!/usr/bin/env python3
import argparse
import json
import subprocess
def toggle_scaledown(enabled):
current = json.loads(
subprocess.check_output(
["kubectl", "get", "daemonset", "-o", "json", "-n", "kube-system", "kube-cluster-autoscaler"]
).decode("utf-8")
)
for i, container in enumerate(current["spec"]["template"]["spec"]["containers"]):
if container["name"] == "cluster-autoscaler":
command = container["command"]
updated_arg = "--scale-down-enabled={}".format("true" if enabled else "false")
updated_command = [updated_arg if "scale-down-enabled" in arg else arg for arg in command]
if command != updated_command:
patch = [
{
"op": "replace",
"path": "/spec/template/spec/containers/{}/command".format(i),
"value": updated_command,
}
]
subprocess.check_call(
[
"kubectl",
"patch",
"daemonset",
"-n",
"kube-system",
"kube-cluster-autoscaler",
"--type=json",
"-p",
json.dumps(patch),
]
)
def main():
parser = argparse.ArgumentParser(description="Enable or disable scale-down.")
parser.add_argument(
"action", help="Whether scale-down should be enabled or disabled.", choices=["enable", "disable"]
)
args = parser.parse_args()
enabled = args.action == "enable"
toggle_scaledown(enabled)
if __name__ == "__main__":
main()
|
b3a6c566a2b92a0a57704f6ba187fecf3fa3b9c0
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/pydev/pydev_tests_python/resources/_debugger_case_unhandled_exception_get_stack.py
|
e013693c8764ae6d8eab4d4e6d65e44a76e7903c
|
[
"Apache-2.0",
"EPL-1.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
_debugger_case_unhandled_exception_get_stack.py
|
from contextlib import contextmanager
@contextmanager
def something():
yield
with something():
raise ValueError('TEST SUCEEDED') # break line on unhandled exception
print('a')
print('b')
|
bdcca135c78843f7729a9c50090dec26808d24cc
|
1f297a6fabc562cc57bc8f67cbd3c8074839e0ad
|
/python数据结构与算法练习/singal.py
|
0e55853dd02560c5f3552f98b495cf244f3462df
|
[] |
no_license
|
imtiantian/Python_Awesome_Interview
|
22cbef021cbdb76233bcd8246f8ef583c305ecfa
|
be786f31bce583739a33535a63bce5c8d3968d6b
|
refs/heads/master
| 2023-02-03T17:37:05.528093
| 2023-02-02T06:08:08
| 2023-02-02T06:08:08
| 82,277,513
| 205
| 98
| null | 2023-02-02T06:08:10
| 2017-02-17T08:57:09
|
Python
|
UTF-8
|
Python
| false
| false
| 323
|
py
|
singal.py
|
class Node:
def __init__(self):
self.data=None
self.nextNode=None
def set_and_return_Next(self):
self.nextNode=Node()
return self.nextNode
def getNext(self):
return self.nextNode
def getData(self):
return self.data
def setDate(self,d):
self.data=d
|
42297e8406327579f0f62a116b94846a5fd4473f
|
abe704eb3b53944cdd6505f922f58558e334c589
|
/tools/cmake_utilities/scripts/relinker/configuration.py
|
d744d0a83fda3d97bc1c2edac80d1340156a2e12
|
[
"Apache-2.0"
] |
permissive
|
espressif/esp-iot-solution
|
c25079eb26a8f54ddacd23689a7288533c721916
|
2227f0ca21ab37df9cf9d74182fa84c52dffc76f
|
refs/heads/master
| 2023-09-01T15:06:17.000154
| 2023-08-31T10:26:21
| 2023-08-31T10:26:21
| 99,690,601
| 1,612
| 758
|
Apache-2.0
| 2023-09-08T08:43:59
| 2017-08-08T12:32:16
|
C
|
UTF-8
|
Python
| false
| false
| 6,406
|
py
|
configuration.py
|
#!/usr/bin/env python3
#
# SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import argparse
import csv
import os
import subprocess
import sys
import re
from io import StringIO
OPT_MIN_LEN = 7
espidf_objdump = None
espidf_missing_function_info = True
class sdkconfig_c:
def __init__(self, path):
lines = open(path).read().splitlines()
config = dict()
for l in lines:
if len(l) > OPT_MIN_LEN and l[0] != '#':
mo = re.match( r'(.*)=(.*)', l, re.M|re.I)
if mo:
config[mo.group(1)]=mo.group(2).replace('"', '')
self.config = config
def index(self, i):
return self.config[i]
def check(self, options):
options = options.replace(' ', '')
if '&&' in options:
for i in options.split('&&'):
if i[0] == '!':
i = i[1:]
if i in self.config:
return False
else:
if i not in self.config:
return False
else:
i = options
if i[0] == '!':
i = i[1:]
if i in self.config:
return False
else:
if i not in self.config:
return False
return True
class object_c:
def read_dump_info(self, pathes):
new_env = os.environ.copy()
new_env['LC_ALL'] = 'C'
dumps = list()
print('pathes:', pathes)
for path in pathes:
try:
dump = StringIO(subprocess.check_output([espidf_objdump, '-t', path], env=new_env).decode())
dumps.append(dump.readlines())
except subprocess.CalledProcessError as e:
raise RuntimeError('cmd:%s result:%s'%(e.cmd, e.returncode))
return dumps
def get_func_section(self, dumps, func):
for dump in dumps:
for l in dump:
if ' %s'%(func) in l and '*UND*' not in l:
m = re.match(r'(\S*)\s*([glw])\s*([F|O])\s*(\S*)\s*(\S*)\s*(\S*)\s*', l, re.M|re.I)
if m and m[6] == func:
return m[4].replace('.text.', '')
if espidf_missing_function_info:
print('%s failed to find section'%(func))
return None
else:
raise RuntimeError('%s failed to find section'%(func))
def __init__(self, name, pathes, libray):
self.name = name
self.libray = libray
self.funcs = dict()
self.pathes = pathes
self.dumps = self.read_dump_info(pathes)
def append(self, func):
section = self.get_func_section(self.dumps, func)
if section != None:
self.funcs[func] = section
def functions(self):
nlist = list()
for i in self.funcs:
nlist.append(i)
return nlist
def sections(self):
nlist = list()
for i in self.funcs:
nlist.append(self.funcs[i])
return nlist
class library_c:
def __init__(self, name, path):
self.name = name
self.path = path
self.objs = dict()
def append(self, obj, path, func):
if obj not in self.objs:
self.objs[obj] = object_c(obj, path, self.name)
self.objs[obj].append(func)
class libraries_c:
def __init__(self):
self.libs = dict()
def append(self, lib, lib_path, obj, obj_path, func):
if lib not in self.libs:
self.libs[lib] = library_c(lib, lib_path)
self.libs[lib].append(obj, obj_path, func)
def dump(self):
for libname in self.libs:
lib = self.libs[libname]
for objname in lib.objs:
obj = lib.objs[objname]
print('%s, %s, %s, %s'%(libname, objname, obj.path, obj.funcs))
class paths_c:
def __init__(self):
self.paths = dict()
def append(self, lib, obj, path):
if '$IDF_PATH' in path:
path = path.replace('$IDF_PATH', os.environ['IDF_PATH'])
if lib not in self.paths:
self.paths[lib] = dict()
if obj not in self.paths[lib]:
self.paths[lib][obj] = list()
self.paths[lib][obj].append(path)
def index(self, lib, obj):
if lib not in self.paths:
return None
if '*' in self.paths[lib]:
obj = '*'
return self.paths[lib][obj]
def generator(library_file, object_file, function_file, sdkconfig_file, missing_function_info, objdump='riscv32-esp-elf-objdump'):
global espidf_objdump, espidf_missing_function_info
espidf_objdump = objdump
espidf_missing_function_info = missing_function_info
sdkconfig = sdkconfig_c(sdkconfig_file)
lib_paths = paths_c()
for p in csv.DictReader(open(library_file, 'r')):
lib_paths.append(p['library'], '*', p['path'])
obj_paths = paths_c()
for p in csv.DictReader(open(object_file, 'r')):
obj_paths.append(p['library'], p['object'], p['path'])
libraries = libraries_c()
for d in csv.DictReader(open(function_file, 'r')):
if d['option'] and sdkconfig.check(d['option']) == False:
print('skip %s(%s)'%(d['function'], d['option']))
continue
lib_path = lib_paths.index(d['library'], '*')
obj_path = obj_paths.index(d['library'], d['object'])
if not obj_path:
obj_path = lib_path
libraries.append(d['library'], lib_path[0], d['object'], obj_path, d['function'])
return libraries
def main():
argparser = argparse.ArgumentParser(description='Libraries management')
argparser.add_argument(
'--library', '-l',
help='Library description file',
type=str)
argparser.add_argument(
'--object', '-b',
help='Object description file',
type=str)
argparser.add_argument(
'--function', '-f',
help='Function description file',
type=str)
argparser.add_argument(
'--sdkconfig', '-s',
help='sdkconfig file',
type=str)
args = argparser.parse_args()
libraries = generator(args.library, args.object, args.function, args.sdkconfig)
# libraries.dump()
if __name__ == '__main__':
main()
|
34120d38ac9e7454642439c80ff86ea5d5ad1d7b
|
9c8e06301f6559a106b805dfe0b372ad2e6bc4d8
|
/scripts/get_prerelease_version.py
|
431fda2a6f3c246c631bafc77368982748b82e58
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"Zlib",
"BSD-3-Clause",
"WTFPL",
"MIT",
"MPL-2.0",
"MPL-1.1",
"CC0-1.0",
"GPL-2.0-only",
"AFL-2.1",
"CC-BY-3.0",
"Unlicense"
] |
permissive
|
streamlit/streamlit
|
beecb89300d6f219f3a43ed328f22d3656243f26
|
4f45c18a4323a796440d651ba77b5eb29409cb2b
|
refs/heads/develop
| 2023-09-06T06:22:40.853489
| 2023-09-04T13:53:20
| 2023-09-04T13:53:20
| 204,086,862
| 27,877
| 2,739
|
Apache-2.0
| 2023-09-14T19:08:39
| 2019-08-24T00:14:52
|
Python
|
UTF-8
|
Python
| false
| false
| 3,032
|
py
|
get_prerelease_version.py
|
#!/usr/bin/env python
# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calculate the next pre-release semver based on the target version and store in a version file.
This has a small hack to get the pipeline on CircleCI to work properly. The command may run
multiple times, so this file is idempotent for each release build. To accomplish this, we store the
version into a file on the first run. Any subsequent calls will return the file contents if the file
exists instead of recalculating the release version.
- If a version exists in the version file, return the file contents.
- If the current version is the same as the target version, increment the pre-release only
- If the current version is less than the target version, first update the version to match, then
increment the pre-release version
A few examples:
- Target: 1.6.0
Current: 1.5.1
Output: 1.6.0-rc1
- Target: 1.6.0
Current: 1.6.0-rc1
Output: 1.6.0-rc2
"""
import fileinput
import os
import re
import sys
import semver
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
VERSION_FILE = ".prerelease-version"
def get_current_version():
"""Retrieve the current version by searching for a matching regex ('VERSION=') in setup.py"""
filename = os.path.join(BASE_DIR, "lib/setup.py")
regex = r"(?P<pre>.*VERSION = \")(.*)(?P<post>\" # PEP-440$)"
pattern = re.compile(regex)
for line in fileinput.input(filename):
match = pattern.match(line.rstrip())
if match:
return match.groups()[1]
raise Exception('Did not find regex "%s" for version in setup.py' % (regex))
def main():
if os.path.exists(VERSION_FILE):
with open(VERSION_FILE) as f:
print(f.read())
return
if len(sys.argv) != 2:
raise Exception(
'Specify target version as an argument: "%s 1.2.3"' % sys.argv[0]
)
target_version = semver.VersionInfo.parse(sys.argv[1])
# Ensure that current version is semver-compliant (it's stored as PEP440-compliant in setup.py)
current_version = semver.VersionInfo.parse(
get_current_version().replace("rc", "-rc.")
)
if current_version.finalize_version() < target_version:
current_version = target_version
new_version = str(current_version.bump_prerelease())
with open(VERSION_FILE, "w") as f:
f.write(new_version)
print(new_version)
if __name__ == "__main__":
main()
|
923e074893467d1f793b05771117da7061a8c3a7
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/sensibo/conftest.py
|
b2798224b148a02e0ac779148c507f1b3b8d925f
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,306
|
py
|
conftest.py
|
"""Fixtures for the Sensibo integration."""
from __future__ import annotations
import json
from typing import Any
from unittest.mock import patch
from pysensibo import SensiboClient
from pysensibo.model import SensiboData
import pytest
from homeassistant.components.sensibo.const import DOMAIN
from homeassistant.config_entries import SOURCE_USER
from homeassistant.core import HomeAssistant
from . import ENTRY_CONFIG
from tests.common import MockConfigEntry, load_fixture
from tests.test_util.aiohttp import AiohttpClientMocker
@pytest.fixture
async def load_int(hass: HomeAssistant, get_data: SensiboData) -> MockConfigEntry:
"""Set up the Sensibo integration in Home Assistant."""
config_entry = MockConfigEntry(
domain=DOMAIN,
source=SOURCE_USER,
data=ENTRY_CONFIG,
entry_id="1",
unique_id="username",
version=2,
)
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices",
return_value={"result": [{"id": "xyzxyz"}, {"id": "abcabc"}]},
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_me",
return_value={"result": {"username": "username"}},
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
return config_entry
@pytest.fixture(name="get_data")
async def get_data_from_library(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, load_json: dict[str, Any]
) -> SensiboData:
"""Retrieve data from upstream Sensibo library."""
client = SensiboClient("123467890", aioclient_mock.create_session(hass.loop))
with patch("pysensibo.SensiboClient.async_get_devices", return_value=load_json):
output = await client.async_get_devices_data()
await client._session.close()
return output
@pytest.fixture(name="load_json", scope="session")
def load_json_from_fixture() -> SensiboData:
"""Load fixture with json data and return."""
data_fixture = load_fixture("data.json", "sensibo")
json_data: dict[str, Any] = json.loads(data_fixture)
return json_data
|
0a025c663112470c633e81883f15326914e92610
|
65bb11770578babd76ffe83e6a6ff7c6d7c0f614
|
/test/mockupdb/test_mongos_command_read_mode.py
|
62bd76cf0f00868a32442b0610915c0be250bedf
|
[
"Apache-2.0"
] |
permissive
|
mongodb/mongo-python-driver
|
51e4672dd00488d571a70b196e55067daabdf6f8
|
f2867a9abf277e664b46d3d30dd380e6ae77c9d4
|
refs/heads/master
| 2023-08-31T17:55:32.242839
| 2023-08-29T20:40:15
| 2023-08-29T20:40:15
| 108,051
| 3,303
| 989
|
Apache-2.0
| 2023-09-14T21:40:03
| 2009-01-15T15:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 4,206
|
py
|
test_mongos_command_read_mode.py
|
# Copyright 2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import unittest
from mockupdb import MockupDB, OpMsg, going
from operations import operations # type: ignore[import]
from pymongo import MongoClient, ReadPreference
from pymongo.read_preferences import (
_MONGOS_MODES,
make_read_preference,
read_pref_mode_from_name,
)
class TestMongosCommandReadMode(unittest.TestCase):
def test_aggregate(self):
server = MockupDB()
server.autoresponds(
"ismaster", ismaster=True, msg="isdbgrid", minWireVersion=2, maxWireVersion=6
)
self.addCleanup(server.stop)
server.run()
client = MongoClient(server.uri)
self.addCleanup(client.close)
collection = client.test.collection
with going(collection.aggregate, []):
command = server.receives(aggregate="collection", pipeline=[])
self.assertFalse(command.slave_ok, "SlaveOkay set")
command.ok(result=[{}])
secondary_collection = collection.with_options(read_preference=ReadPreference.SECONDARY)
with going(secondary_collection.aggregate, []):
command = server.receives(
OpMsg(
{
"aggregate": "collection",
"pipeline": [],
"$readPreference": {"mode": "secondary"},
}
)
)
command.ok(result=[{}])
self.assertTrue(command.slave_ok, "SlaveOkay not set")
def create_mongos_read_mode_test(mode, operation):
def test(self):
server = MockupDB()
self.addCleanup(server.stop)
server.run()
server.autoresponds(
"ismaster", ismaster=True, msg="isdbgrid", minWireVersion=2, maxWireVersion=6
)
pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None)
client = MongoClient(server.uri, read_preference=pref)
self.addCleanup(client.close)
with going(operation.function, client):
request = server.receive()
request.reply(operation.reply)
if operation.op_type == "always-use-secondary":
self.assertEqual(ReadPreference.SECONDARY.document, request.doc.get("$readPreference"))
slave_ok = mode != "primary"
elif operation.op_type == "must-use-primary":
slave_ok = False
elif operation.op_type == "may-use-secondary":
slave_ok = mode != "primary"
actual_pref = request.doc.get("$readPreference")
if mode == "primary":
self.assertIsNone(actual_pref)
else:
self.assertEqual(pref.document, actual_pref)
else:
self.fail("unrecognized op_type %r" % operation.op_type)
if slave_ok:
self.assertTrue(request.slave_ok, "SlaveOkay not set")
else:
self.assertFalse(request.slave_ok, "SlaveOkay set")
return test
def generate_mongos_read_mode_tests():
matrix = itertools.product(_MONGOS_MODES, operations)
for entry in matrix:
mode, operation = entry
if mode == "primary" and operation.op_type == "always-use-secondary":
# Skip something like command('foo', read_preference=SECONDARY).
continue
test = create_mongos_read_mode_test(mode, operation)
test_name = "test_{}_with_mode_{}".format(operation.name.replace(" ", "_"), mode)
test.__name__ = test_name
setattr(TestMongosCommandReadMode, test_name, test)
generate_mongos_read_mode_tests()
if __name__ == "__main__":
unittest.main()
|
5bcf1289ee3c86f2c57cec8ba8468961519e1637
|
23652304566b1869ca65b95b116ee43d16e134f3
|
/h/models/user.py
|
80547740a5d5f917bbf5f0511dff1f03ef7596ab
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"BSD-2-Clause-Views"
] |
permissive
|
hypothesis/h
|
29399a26990856c336b05022e827541dd8aeedab
|
232446d776fdb906d2fb253cf0a409c6813a08d6
|
refs/heads/main
| 2023-08-30T16:21:33.754658
| 2023-08-30T09:26:50
| 2023-08-30T09:40:48
| 3,910,945
| 2,558
| 452
|
BSD-2-Clause
| 2023-09-14T11:25:06
| 2012-04-02T19:56:59
|
Python
|
UTF-8
|
Python
| false
| false
| 10,733
|
py
|
user.py
|
import datetime
import re
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import Comparator, hybrid_property
from h.db import Base
from h.exceptions import InvalidUserId
from h.util.user import split_user
USERNAME_MIN_LENGTH = 3
USERNAME_MAX_LENGTH = 30
USERNAME_PATTERN = "(?i)^[A-Z0-9._]+$"
EMAIL_MAX_LENGTH = 100
DISPLAY_NAME_MAX_LENGTH = 30
def _normalise_username(username):
# We normalize usernames by dots and case in order to discourage attempts
# at impersonation.
return sa.func.lower(sa.func.replace(username, sa.text("'.'"), sa.text("''")))
class UsernameComparator(Comparator): # pylint: disable=abstract-method
"""
Custom comparator for :py:attr:`~h.models.user.User.username`.
This ensures that all lookups against the username property, such as
session.query(User).filter_by(username='juanwood')
use the normalised username for the lookup and appropriately normalise the
RHS of the query. This means that a query like the one above will
correctly find a user with a username of "Juan.Wood", for example.
"""
def operate(self, op, other, **kwargs): # pylint: disable=arguments-differ
return op(
_normalise_username(self.__clause_element__()),
_normalise_username(other),
**kwargs,
)
class UserIDComparator(Comparator): # pylint: disable=abstract-method
"""
Custom comparator for :py:attr:`~h.models.user.User.userid`.
A user's userid is a compound property which depends on their username
and their authority. A naive comparator for this property would generate
SQL like the following:
... WHERE 'acct:' || username || '@' || authority = ...
This would be slow, due to the lack of an index on the LHS expression.
While we could add a functional index on this expression, we can also take
advantage of the existing index on (normalised_username, authority), which
is what this comparator does.
A query such as
session.query(User).filter_by(userid='acct:luis.silva@example.com')
will instead generate
WHERE
(lower(replace(username, '.', '')), authority ) =
(lower(replace('luis.silva', '.', '')), 'example.com')
"""
def __init__(self, username, authority):
super().__init__(sa.tuple_(_normalise_username(username), authority))
def __eq__(self, other):
"""
Compare the userid for equality with `other`.
`other` can be anything plausibly on the RHS of a comparison, which
can include other SQL clause elements or expressions, as in
User.userid == sa.tuple_(User.username, Group.authority)
or literals, as in
User.userid == 'acct:miruna@example.com'
We treat the literal case specially, and split the string into
username and authority ourselves. If the string is not a well-formed
userid, the comparison will always return False.
"""
if isinstance(other, str):
try:
val = split_user(other)
except InvalidUserId:
# The value being compared isn't a valid userid
return False
other = sa.tuple_(_normalise_username(val["username"]), val["domain"])
return self.expression == other
def in_(self, userids): # pylint: disable=arguments-renamed
others = []
for userid in userids:
try:
val = split_user(userid)
except InvalidUserId:
continue
other = sa.tuple_(_normalise_username(val["username"]), val["domain"])
others.append(other)
if not others:
return False
return self.expression.in_(others)
class User(Base):
__tablename__ = "user"
@declared_attr
def __table_args__(cls): # pylint:disable=no-self-argument
return (
# (email, authority) must be unique
sa.UniqueConstraint("email", "authority"),
# (normalised username, authority) must be unique. This index is
# also critical for making user lookups fast.
sa.Index(
"ix__user__userid",
_normalise_username(cls.username),
cls.authority,
unique=True,
),
# Optimize lookup of shadowbanned users.
sa.Index(
"ix__user__nipsa", cls.nipsa, postgresql_where=cls.nipsa.is_(True)
),
)
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
#: Username as chosen by the user on registration
_username = sa.Column("username", sa.UnicodeText(), nullable=False)
#: The "authority" for this user. This represents the "namespace" in which
#: this user lives. By default, all users are created in the namespace
#: corresponding to `request.domain`, but this can be overridden with the
#: `h.authority` setting.
authority = sa.Column("authority", sa.UnicodeText(), nullable=False)
#: The display name which will be used when rendering an annotation.
display_name = sa.Column(sa.UnicodeText())
#: A short user description/bio
description = sa.Column(sa.UnicodeText())
#: A free-form column to allow the user to say where they are
location = sa.Column(sa.UnicodeText())
#: The user's URI/link on the web
uri = sa.Column(sa.UnicodeText())
#: The user's ORCID ID
orcid = sa.Column(sa.UnicodeText())
#: Is this user an admin member?
admin = sa.Column(
sa.Boolean,
default=False,
nullable=False,
server_default=sa.sql.expression.false(),
)
#: Is this user a staff member?
staff = sa.Column(
sa.Boolean,
nullable=False,
default=False,
server_default=sa.sql.expression.false(),
)
#: Is this user flagged as "Not (Suitable) In Public Site Areas" (AKA
#: NIPSA). This flag is used to shadow-ban a user so their annotations
#: don't appear to anyone but themselves.
nipsa = sa.Column(
sa.Boolean,
nullable=False,
default=False,
server_default=sa.sql.expression.false(),
)
sidebar_tutorial_dismissed = sa.Column(
sa.Boolean, default=False, server_default=(sa.sql.expression.false())
)
#: A timestamp representing the last time the user accepted the privacy policy.
#: A NULL value in this column indicates the user has never accepted a privacy policy.
privacy_accepted = sa.Column(sa.DateTime, nullable=True)
# Has the user opted-in for news etc.
comms_opt_in = sa.Column(sa.Boolean, nullable=True)
identities = sa.orm.relationship(
"UserIdentity", backref="user", cascade="all, delete-orphan"
)
@hybrid_property
def username(self):
return self._username
@username.setter
def username(self, value):
self._username = value
@username.comparator
def username(cls): # pylint:disable=no-self-argument
return UsernameComparator(cls._username)
@hybrid_property
def userid(self):
return f"acct:{self.username}@{self.authority}"
@userid.comparator
def userid(cls): # pylint: disable=no-self-argument
return UserIDComparator(cls.username, cls.authority)
email = sa.Column(sa.UnicodeText())
last_login_date = sa.Column(sa.TIMESTAMP(timezone=False), nullable=True)
registered_date = sa.Column(
sa.TIMESTAMP(timezone=False),
default=datetime.datetime.utcnow,
server_default=sa.func.now(),
nullable=False,
)
activation_date = sa.Column(sa.TIMESTAMP(timezone=False), nullable=True)
# Activation foreign key
activation_id = sa.Column(sa.Integer, sa.ForeignKey("activation.id"))
activation = sa.orm.relationship("Activation", backref="user")
@property
def is_activated(self):
if self.activation_id is None:
return True
return False
def activate(self):
"""Activate the user by deleting any activation they have."""
session = sa.orm.object_session(self)
self.activation_date = datetime.datetime.utcnow()
session.delete(self.activation)
#: Hashed password
password = sa.Column(sa.UnicodeText(), nullable=True)
#: Last password update
password_updated = sa.Column(sa.DateTime(), nullable=True)
#: Password salt
#:
#: N.B. This field is DEPRECATED. The password context we use already
#: manages the generation of a random salt when hashing a password and we
#: don't need a separate salt column. This remains for "legacy" passwords
#: which were, sadly, double-salted. As users log in, we are slowly
#: upgrading their passwords and setting this column to None.
salt = sa.Column(sa.UnicodeText(), nullable=True)
@sa.orm.validates("email")
def validate_email(self, _key, email):
if email is None:
return email
if len(email) > EMAIL_MAX_LENGTH:
raise ValueError(
f"email must be less than {EMAIL_MAX_LENGTH} characters long"
)
return email
@sa.orm.validates("_username")
def validate_username(self, _key, username):
if not USERNAME_MIN_LENGTH <= len(username) <= USERNAME_MAX_LENGTH:
raise ValueError(
f"username must be between {USERNAME_MIN_LENGTH} and {USERNAME_MAX_LENGTH} "
"characters long"
)
if not re.match(USERNAME_PATTERN, username):
raise ValueError(
"username must have only letters, numbers, periods, and underscores."
)
return username
@classmethod
def get_by_email(cls, session, email, authority):
"""Fetch a user by email address."""
if email is None:
return None
return (
session.query(cls)
.filter(
sa.func.lower(cls.email) == email.lower(), cls.authority == authority
)
.first()
)
@classmethod
def get_by_activation(cls, session, activation):
"""Fetch a user by activation instance."""
user = session.query(cls).filter(cls.activation_id == activation.id).first()
return user
@classmethod
def get_by_username(cls, session, username, authority):
"""Fetch a user by username."""
return (
session.query(cls)
.filter(cls.username == username, cls.authority == authority)
.first()
)
def __repr__(self):
return f"<User: {self.username}>"
|
95a35bac2702cdaf6e8d73e6ac0eda394913d77a
|
4deb45c2aad0f6530c79abf35a4cb74c6c55f164
|
/examples/minimal.py
|
231586db5eaa6211762b74fcd783b13e66f076e2
|
[
"MIT"
] |
permissive
|
ERGO-Code/HiGHS
|
cd4e94a9db7ec82f8aeab756c69864b7a8190d98
|
9e519472fb3e630552178d8196b54e9b58dab27d
|
refs/heads/master
| 2023-09-04T20:30:12.901207
| 2023-07-05T21:29:49
| 2023-07-05T21:29:49
| 126,994,758
| 610
| 148
|
MIT
| 2023-09-14T09:57:08
| 2018-03-27T13:57:58
|
C++
|
UTF-8
|
Python
| false
| false
| 555
|
py
|
minimal.py
|
import highspy
import numpy as np
inf = highspy.kHighsInf
h = highspy.Highs()
h.addVars(2, np.array([-inf, -inf]), np.array([inf, inf]))
h.changeColsCost(2, np.array([0, 1]), np.array([0, 1], dtype=np.double))
num_cons = 2
lower = np.array([2, 0], dtype=np.double)
upper = np.array([inf, inf], dtype=np.double)
num_new_nz = 4
starts = np.array([0, 2])
indices = np.array([0, 1, 0, 1])
values = np.array([-1, 1, 1, 1], dtype=np.double)
h.addRows(num_cons, lower, upper, num_new_nz, starts, indices, values)
h.setOptionValue('log_to_console', True)
h.run()
|
e11fbfdb7be78926e050dfaeec46152bc98c88db
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/python_modules/libraries/dagster-pagerduty/dagster_pagerduty/resources.py
|
e124b8a8cee3775f0b46c3a225252fc3a8e10410
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794
| 2023-09-05T19:54:52
| 2023-09-05T19:54:52
| 131,619,646
| 8,565
| 1,154
|
Apache-2.0
| 2023-09-14T21:57:37
| 2018-04-30T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 7,939
|
py
|
resources.py
|
from typing import Dict, Optional, cast
import pypd
from dagster import ConfigurableResource, resource
from dagster._config.pythonic_config import infer_schema_from_config_class
from dagster._core.definitions.resource_definition import dagster_maintained_resource
from dagster._utils.warnings import suppress_dagster_warnings
from pydantic import Field as PyField
class PagerDutyService(ConfigurableResource):
"""This resource is for posting events to PagerDuty."""
"""Integrates with PagerDuty via the pypd library.
See:
https://v2.developer.pagerduty.com/docs/events-api-v2
https://v2.developer.pagerduty.com/docs/send-an-event-events-api-v2
https://support.pagerduty.com/docs/services-and-integrations#section-events-api-v2
https://github.com/PagerDuty/pagerduty-api-python-client
for documentation and more information.
"""
routing_key: str = PyField(
...,
description=(
"The routing key provisions access to your PagerDuty service. You"
"will need to include the integration key for your new integration, as a"
"routing_key in the event payload."
),
)
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
def EventV2_create(
self,
summary: str,
source: str,
severity: str,
event_action: str = "trigger",
dedup_key: Optional[str] = None,
timestamp: Optional[str] = None,
component: Optional[str] = None,
group: Optional[str] = None,
event_class: Optional[str] = None,
custom_details: Optional[object] = None,
) -> object:
"""Events API v2 enables you to add PagerDuty's advanced event and incident management
functionality to any system that can make an outbound HTTP connection.
Args:
summary (str):
A high-level, text summary message of the event. Will be used to construct an
alert's description. Example:
"PING OK - Packet loss = 0%, RTA = 1.41 ms" "Host
'acme-andromeda-sv1-c40 :: 179.21.24.50' is DOWN"
source (str):
Specific human-readable unique identifier, such as a hostname, for the system having
the problem. Examples:
"prod05.theseus.acme-widgets.com"
"171.26.23.22"
"aws:elasticache:us-east-1:852511987:cluster/api-stats-prod-003"
"9c09acd49a25"
severity (str):
How impacted the affected system is. Displayed to users in lists and influences the
priority of any created incidents. Must be one of {info, warning, error, critical}
Keyword Args:
event_action (str):
There are three types of events that PagerDuty recognizes, and are used to represent
different types of activity in your monitored systems. (default: 'trigger')
* trigger: When PagerDuty receives a trigger event, it will either open a new alert,
or add a new trigger log entry to an existing alert, depending on the
provided dedup_key. Your monitoring tools should send PagerDuty a trigger
when a new problem has been detected. You may send additional triggers
when a previously detected problem has occurred again.
* acknowledge: acknowledge events cause the referenced incident to enter the
acknowledged state. While an incident is acknowledged, it won't
generate any additional notifications, even if it receives new
trigger events. Your monitoring tools should send PagerDuty an
acknowledge event when they know someone is presently working on the
problem.
* resolve: resolve events cause the referenced incident to enter the resolved state.
Once an incident is resolved, it won't generate any additional
notifications. New trigger events with the same dedup_key as a resolved
incident won't re-open the incident. Instead, a new incident will be
created. Your monitoring tools should send PagerDuty a resolve event when
the problem that caused the initial trigger event has been fixed.
dedup_key (str):
Deduplication key for correlating triggers and resolves. The maximum permitted
length of this property is 255 characters.
timestamp (str):
Timestamp (ISO 8601). When the upstream system detected / created the event. This is
useful if a system batches or holds events before sending them to PagerDuty. This
will be auto-generated by PagerDuty if not provided. Example:
2015-07-17T08:42:58.315+0000
component (str):
The part or component of the affected system that is broken. Examples:
"keepalive"
"webping"
"mysql"
"wqueue"
group (str):
A cluster or grouping of sources. For example, sources "prod-datapipe-02" and
"prod-datapipe-03" might both be part of "prod-datapipe". Examples:
"prod-datapipe"
"www"
"web_stack"
event_class (str):
The class/type of the event. Examples:
"High CPU"
"Latency"
"500 Error"
custom_details (Dict[str, str]):
Additional details about the event and affected system. Example:
{"ping time": "1500ms", "load avg": 0.75 }
"""
data = {
"routing_key": self.routing_key,
"event_action": event_action,
"payload": {"summary": summary, "source": source, "severity": severity},
}
if dedup_key is not None:
data["dedup_key"] = dedup_key
payload: Dict[str, object] = cast(Dict[str, object], data["payload"])
if timestamp is not None:
payload["timestamp"] = timestamp
if component is not None:
payload["component"] = component
if group is not None:
payload["group"] = group
if event_class is not None:
payload["class"] = event_class
if custom_details is not None:
payload["custom_details"] = custom_details
return pypd.EventV2.create(data=data)
@dagster_maintained_resource
@resource(
config_schema=infer_schema_from_config_class(PagerDutyService),
description="""This resource is for posting events to PagerDuty.""",
)
@suppress_dagster_warnings
def pagerduty_resource(context) -> PagerDutyService:
"""A resource for posting events (alerts) to PagerDuty.
Example:
.. code-block:: python
@op
def pagerduty_op(pagerduty: PagerDutyService):
pagerduty.EventV2_create(
summary='alert from dagster'
source='localhost',
severity='error',
event_action='trigger',
)
@job(resource_defs={ 'pagerduty': pagerduty_resource })
def pagerduty_test():
pagerduty_op()
pagerduty_test.execute_in_process(
run_config={
"resources": {
'pagerduty': {'config': {'routing_key': '0123456789abcdef0123456789abcdef'}}
}
}
)
"""
return PagerDutyService(**context.resource_config)
|
3b955755d11ee5025eea77cfa2fc4c91e1fc7ed1
|
92141e59ff8f6d923769b1cbfeb8413c069deb5f
|
/pyupgrade/_token_helpers.py
|
3d28bc7e0a2a013bb8905b8c4aabe57c10e48341
|
[
"MIT"
] |
permissive
|
asottile/pyupgrade
|
de70614c2f8be3693a864a185ef255754fc77587
|
83350b641a97064541c9a02792986eba4edd4ae2
|
refs/heads/main
| 2023-08-25T00:12:16.172350
| 2023-08-22T13:08:14
| 2023-08-22T13:08:14
| 83,462,592
| 2,802
| 211
|
MIT
| 2023-09-12T20:06:22
| 2017-02-28T17:50:31
|
Python
|
UTF-8
|
Python
| false
| false
| 14,118
|
py
|
_token_helpers.py
|
from __future__ import annotations
import ast
import keyword
from typing import NamedTuple
from typing import Sequence
from tokenize_rt import NON_CODING_TOKENS
from tokenize_rt import Token
from tokenize_rt import tokens_to_src
from tokenize_rt import UNIMPORTANT_WS
_OPENING = frozenset('([{')
_CLOSING = frozenset(')]}')
KEYWORDS = frozenset(keyword.kwlist)
def immediately_paren(func: str, tokens: list[Token], i: int) -> bool:
return tokens[i].src == func and tokens[i + 1].src == '('
class Victims(NamedTuple):
starts: list[int]
ends: list[int]
first_comma_index: int | None
arg_index: int
def is_open(token: Token) -> bool:
return token.name == 'OP' and token.src in _OPENING
def is_close(token: Token) -> bool:
return token.name == 'OP' and token.src in _CLOSING
def _find_token(tokens: list[Token], i: int, name: str, src: str) -> int:
while not tokens[i].matches(name=name, src=src):
i += 1
return i
def find_name(tokens: list[Token], i: int, src: str) -> int:
return _find_token(tokens, i, 'NAME', src)
def find_op(tokens: list[Token], i: int, src: str) -> int:
return _find_token(tokens, i, 'OP', src)
def find_end(tokens: list[Token], i: int) -> int:
while tokens[i].name != 'NEWLINE':
i += 1
return i + 1
def _arg_token_index(tokens: list[Token], i: int, arg: ast.expr) -> int:
offset = (arg.lineno, arg.col_offset)
while tokens[i].offset != offset:
i += 1
i += 1
while tokens[i].name in NON_CODING_TOKENS:
i += 1
return i
def victims(
tokens: list[Token],
start: int,
arg: ast.expr,
gen: bool,
) -> Victims:
starts = [start]
start_depths = [1]
ends: list[int] = []
first_comma_index = None
arg_depth = None
arg_index = _arg_token_index(tokens, start, arg)
depth = 1
i = start + 1
while depth:
is_start_brace = is_open(tokens[i])
is_end_brace = is_close(tokens[i])
if i == arg_index:
arg_depth = depth
if is_start_brace:
depth += 1
# Remove all braces before the first element of the inner
# comprehension's target.
if is_start_brace and arg_depth is None:
start_depths.append(depth)
starts.append(i)
if (
tokens[i].matches(name='OP', src=',') and
depth == arg_depth and
first_comma_index is None
):
first_comma_index = i
if is_end_brace and depth in start_depths:
if tokens[i - 2].src == ',' and tokens[i - 1].src == ' ':
ends.extend((i - 2, i - 1, i))
elif tokens[i - 1].src == ',':
ends.extend((i - 1, i))
else:
ends.append(i)
if depth > 1 and tokens[i + 1].src == ',':
ends.append(i + 1)
if is_end_brace:
depth -= 1
i += 1
# May need to remove a trailing comma for a comprehension
if gen:
i -= 2
while tokens[i].name in NON_CODING_TOKENS:
i -= 1
if tokens[i].src == ',':
ends.append(i)
return Victims(starts, sorted(set(ends)), first_comma_index, arg_index)
def find_closing_bracket(tokens: list[Token], i: int) -> int:
assert tokens[i].src in _OPENING
depth = 1
i += 1
while depth:
if is_open(tokens[i]):
depth += 1
elif is_close(tokens[i]):
depth -= 1
i += 1
return i - 1
def find_block_start(tokens: list[Token], i: int) -> int:
depth = 0
while depth or not tokens[i].matches(name='OP', src=':'):
if is_open(tokens[i]):
depth += 1
elif is_close(tokens[i]):
depth -= 1
i += 1
return i
class Block(NamedTuple):
start: int
colon: int
block: int
end: int
line: bool
def _initial_indent(self, tokens: list[Token]) -> int:
if tokens[self.start].src.isspace():
return len(tokens[self.start].src)
else:
return 0
def _minimum_indent(self, tokens: list[Token]) -> int:
block_indent = None
for i in range(self.block, self.end):
if (
tokens[i - 1].name in ('NL', 'NEWLINE') and
tokens[i].name in ('INDENT', UNIMPORTANT_WS) and
# comments can have arbitrary indentation so ignore them
tokens[i + 1].name != 'COMMENT'
):
token_indent = len(tokens[i].src)
if block_indent is None:
block_indent = token_indent
else:
block_indent = min(block_indent, token_indent)
assert block_indent is not None
return block_indent
def dedent(self, tokens: list[Token]) -> None:
if self.line:
return
initial_indent = self._initial_indent(tokens)
diff = self._minimum_indent(tokens) - initial_indent
for i in range(self.block, self.end):
if (
tokens[i - 1].name in ('DEDENT', 'NL', 'NEWLINE') and
tokens[i].name in ('INDENT', UNIMPORTANT_WS)
):
# make sure we preserve *at least* the initial indent
s = tokens[i].src
s = s[:initial_indent] + s[initial_indent + diff:]
tokens[i] = tokens[i]._replace(src=s)
def replace_condition(self, tokens: list[Token], new: list[Token]) -> None:
start = self.start
while tokens[start].name == 'UNIMPORTANT_WS':
start += 1
tokens[start:self.colon] = new
def _trim_end(self, tokens: list[Token]) -> Block:
"""the tokenizer reports the end of the block at the beginning of
the next block
"""
i = last_token = self.end - 1
while tokens[i].name in NON_CODING_TOKENS | {'DEDENT', 'NEWLINE'}:
# if we find an indented comment inside our block, keep it
if (
tokens[i].name in {'NL', 'NEWLINE'} and
tokens[i + 1].name == UNIMPORTANT_WS and
len(tokens[i + 1].src) > self._initial_indent(tokens)
):
break
# otherwise we've found another line to remove
elif tokens[i].name in {'NL', 'NEWLINE'}:
last_token = i
i -= 1
return self._replace(end=last_token + 1)
@classmethod
def find(
cls,
tokens: list[Token],
i: int,
trim_end: bool = False,
) -> Block:
if i > 0 and tokens[i - 1].name in {'INDENT', UNIMPORTANT_WS}:
i -= 1
start = i
colon = find_block_start(tokens, i)
j = colon + 1
while (
tokens[j].name != 'NEWLINE' and
tokens[j].name in NON_CODING_TOKENS
):
j += 1
if tokens[j].name == 'NEWLINE': # multi line block
block = j + 1
while tokens[j].name != 'INDENT':
j += 1
level = 1
j += 1
while level:
level += {'INDENT': 1, 'DEDENT': -1}.get(tokens[j].name, 0)
j += 1
ret = cls(start, colon, block, j, line=False)
if trim_end:
return ret._trim_end(tokens)
else:
return ret
else: # single line block
block = j
j = find_end(tokens, j)
return cls(start, colon, block, j, line=True)
def _is_on_a_line_by_self(tokens: list[Token], i: int) -> bool:
return (
tokens[i - 2].name == 'NL' and
tokens[i - 1].name == UNIMPORTANT_WS and
tokens[i + 1].name == 'NL'
)
def remove_brace(tokens: list[Token], i: int) -> None:
if _is_on_a_line_by_self(tokens, i):
del tokens[i - 1:i + 2]
else:
del tokens[i]
def remove_base_class(i: int, tokens: list[Token]) -> None:
# look forward and backward to find commas / parens
brace_stack = []
j = i
while tokens[j].src not in {',', ':'}:
if tokens[j].src == ')':
brace_stack.append(j)
j += 1
right = j
if tokens[right].src == ':':
brace_stack.pop()
else:
# if there's a close-paren after a trailing comma
j = right + 1
while tokens[j].name in NON_CODING_TOKENS:
j += 1
if tokens[j].src == ')':
while tokens[j].src != ':':
j += 1
right = j
if brace_stack:
last_part = brace_stack[-1]
else:
last_part = i
j = i
while brace_stack:
if tokens[j].src == '(':
brace_stack.pop()
j -= 1
while tokens[j].src not in {',', '('}:
j -= 1
left = j
# single base, remove the entire bases
if tokens[left].src == '(' and tokens[right].src == ':':
del tokens[left:right]
# multiple bases, base is first
elif tokens[left].src == '(' and tokens[right].src != ':':
# if there's space / comment afterwards remove that too
while tokens[right + 1].name in {UNIMPORTANT_WS, 'COMMENT'}:
right += 1
del tokens[left + 1:right + 1]
# multiple bases, base is not first
else:
del tokens[left:last_part + 1]
def remove_decorator(i: int, tokens: list[Token]) -> None:
while tokens[i - 1].src != '@':
i -= 1
if i > 1 and tokens[i - 2].name not in {'NEWLINE', 'NL'}:
i -= 1
end = i + 1
while tokens[end].name != 'NEWLINE':
end += 1
del tokens[i - 1:end + 1]
def parse_call_args(
tokens: list[Token],
i: int,
) -> tuple[list[tuple[int, int]], int]:
args = []
depth = 1
i += 1
arg_start = i
while depth:
if depth == 1 and tokens[i].src == ',':
args.append((arg_start, i))
arg_start = i + 1
elif is_open(tokens[i]):
depth += 1
elif is_close(tokens[i]):
depth -= 1
# if we're at the end, append that argument
if not depth and tokens_to_src(tokens[arg_start:i]).strip():
args.append((arg_start, i))
i += 1
return args, i
def arg_str(tokens: list[Token], start: int, end: int) -> str:
return tokens_to_src(tokens[start:end]).strip()
def _arg_contains_newline(tokens: list[Token], start: int, end: int) -> bool:
while tokens[start].name in {'NL', 'NEWLINE', UNIMPORTANT_WS}:
start += 1
for i in range(start, end):
if tokens[i].name in {'NL', 'NEWLINE'}:
return True
else:
return False
def replace_call(
tokens: list[Token],
start: int,
end: int,
args: list[tuple[int, int]],
tmpl: str,
*,
parens: Sequence[int] = (),
) -> None:
arg_strs = [arg_str(tokens, *arg) for arg in args]
for paren in parens:
arg_strs[paren] = f'({arg_strs[paren]})'
# there are a few edge cases which cause syntax errors when the first
# argument contains newlines (especially when moved outside of a natural
# continuation context)
if _arg_contains_newline(tokens, *args[0]) and 0 not in parens:
# this attempts to preserve more of the whitespace by using the
# original non-stripped argument string
arg_strs[0] = f'({tokens_to_src(tokens[slice(*args[0])])})'
start_rest = args[0][1] + 1
while (
start_rest < end and
tokens[start_rest].name in {'COMMENT', UNIMPORTANT_WS}
):
start_rest += 1
# Remove trailing comma
end_rest = end - 1
if tokens[end_rest - 1].matches(name='OP', src=','):
end_rest -= 1
rest = tokens_to_src(tokens[start_rest:end_rest])
src = tmpl.format(args=arg_strs, rest=rest)
tokens[start:end] = [Token('CODE', src)]
def find_and_replace_call(
i: int,
tokens: list[Token],
*,
template: str,
parens: tuple[int, ...] = (),
) -> None:
j = find_op(tokens, i, '(')
func_args, end = parse_call_args(tokens, j)
replace_call(tokens, i, end, func_args, template, parens=parens)
def replace_name(i: int, tokens: list[Token], *, name: str, new: str) -> None:
# preserve token offset in case we need to match it later
new_token = tokens[i]._replace(name='CODE', src=new)
j = i
while not tokens[j].matches(name='NAME', src=name):
# timid: if we see a parenthesis here, skip it
if tokens[j].src == ')':
return
j += 1
tokens[i:j + 1] = [new_token]
def delete_argument(
i: int, tokens: list[Token],
func_args: Sequence[tuple[int, int]],
) -> None:
if i == 0:
# delete leading whitespace before next token
end_idx, _ = func_args[i + 1]
while tokens[end_idx].name == 'UNIMPORTANT_WS':
end_idx += 1
del tokens[func_args[i][0]:end_idx]
else:
del tokens[func_args[i - 1][1]:func_args[i][1]]
def replace_argument(
i: int,
tokens: list[Token],
func_args: Sequence[tuple[int, int]],
*,
new: str,
) -> None:
start_idx, end_idx = func_args[i]
# don't replace leading whitespace / newlines
while tokens[start_idx].name in {'UNIMPORTANT_WS', 'NL'}:
start_idx += 1
tokens[start_idx:end_idx] = [Token('SRC', new)]
def has_space_before(i: int, tokens: list[Token]) -> bool:
return i >= 1 and tokens[i - 1].name in {UNIMPORTANT_WS, 'INDENT'}
def indented_amount(i: int, tokens: list[Token]) -> str:
if i == 0:
return ''
elif has_space_before(i, tokens):
if i >= 2 and tokens[i - 2].name in {'NL', 'NEWLINE', 'DEDENT'}:
return tokens[i - 1].src
else: # inline import
raise ValueError('not at beginning of line')
elif tokens[i - 1].name not in {'NL', 'NEWLINE', 'DEDENT'}:
raise ValueError('not at beginning of line')
else:
return ''
|
23cf086d41958d4ad6a3c5533ab0f36f50c8e8ff
|
e45c6f36a065b6a44e873a773428105de4d3758e
|
/bases/br_cgu_servidores_executivo_federal/code_/settings.py
|
b74aa26236f984c9d069f99b49ede83a5f62ecaf
|
[
"MIT"
] |
permissive
|
basedosdados/mais
|
080cef1de14376699ef65ba71297e40784410f12
|
2836c8cfad11c27191f7a8aca5ca26b94808c1da
|
refs/heads/master
| 2023-09-05T20:55:27.351309
| 2023-09-02T03:21:02
| 2023-09-02T03:21:02
| 294,702,369
| 376
| 98
|
MIT
| 2023-08-30T21:17:28
| 2020-09-11T13:26:45
|
SQL
|
UTF-8
|
Python
| false
| false
| 1,732
|
py
|
settings.py
|
"""
Settings for the project.
"""
# pylint: disable=invalid-name
from pathlib import Path
URL_CGU_DOWNLOADS = "http://www.portaltransparencia.gov.br/download-de-dados/servidores"
CGU_FILES = {
"militar": {"ativo": ["Militares"], "reservista": ["Reserva_Reforma_Militares"]},
"civil": {
"ativo": ["Servidores_BACEN", "Servidores_SIAPE"],
"aposentado": ["Aposentados_BACEN", "Aposentados_SIAPE"],
},
}
TMP_DIR = Path.cwd().resolve().parent / "tmp"
IN_DIR = Path.cwd().resolve().parent / "input"
OUT_DIR = Path.cwd().resolve().parent / "output"
REMUNERACAO_COLUMNS = [
"ano",
"mes",
"id_servidor_portal",
"cpf",
"nome",
"remuneracao_bruta_brl",
"remuneracao_bruta_usd",
"abate_teto_brl",
"abate_teto_usd",
"gratificao_natalina_brl",
"gratificao_natalina_usd",
"abate_teto_gratificacao_natalina_brl",
"abate_teto_gratificacao_natalina_usd",
"ferias_brl",
"ferias_usd",
"outras_remuneracoes_brl",
"outras_remuneracoes_usd",
"irrf_brl",
"irrf_usd",
"pss_rgps_brl",
"pss_rgps_usd",
"demais_deducoes_brl",
"demais_deducoes_usd",
"pensao_militar_brl",
"pensao_militar_usd",
"fundo_saude_brl",
"fundo_saude_usd",
"taxa_ocupacao_imovel_funcional_brl",
"taxa_ocupacao_imovel_funcional_usd",
"remuneracao_liquida_militar_brl",
"remuneracao_liquida_militar_usd",
"verba_indenizatoria_civil_brl",
"verba_indenizatoria_civil_usd",
"verba_indenizatoria_militar_brl",
"verba_indenizatoria_militar_usd",
"verba_indenizatoria_deslig_voluntario_brl",
"verba_indenizatoria_deslig_voluntario_usd",
"total_verba_indenizatoria_brl",
"total_verba_indenizatoria_usd",
]
|
4e7606862974bf9b22993c85e51623c537584cb4
|
376e1818d427b5e4d32fa6dd6c7b71e9fd88afdb
|
/lang/python27/patches/patch-Lib_test_test__urlparse.py
|
971fab41e97691be5eee0abd64660ebd06299844
|
[] |
no_license
|
NetBSD/pkgsrc
|
a0732c023519650ef821ab89c23ab6ab59e25bdb
|
d042034ec4896cc5b47ed6f2e5b8802d9bc5c556
|
refs/heads/trunk
| 2023-09-01T07:40:12.138283
| 2023-09-01T05:25:19
| 2023-09-01T05:25:19
| 88,439,572
| 321
| 138
| null | 2023-07-12T22:34:14
| 2017-04-16T20:04:15
| null |
UTF-8
|
Python
| false
| false
| 18,200
|
py
|
patch-Lib_test_test__urlparse.py
|
$NetBSD: patch-Lib_test_test__urlparse.py,v 1.3 2023/05/29 23:33:48 gutteridge Exp $
Fix CVE-2021-23336: Add `separator` argument to parse_qs; warn with default
Via Fedora:
https://src.fedoraproject.org/rpms/python2.7/blob/rawhide/f/00359-CVE-2021-23336.patch
Fix CVE-2022-0391: urlparse does not sanitize URLs containing ASCII newline and tabs
Via Fedora:
https://src.fedoraproject.org/rpms/python2.7/raw/40dd05e5d77dbfa81777c9f84b704bc2239bf710/f/00377-CVE-2022-0391.patch
Fix CVE-2023-24329: Add more sanitizing to respect the "Remove any leading C0 control or space from input" rule
Via Fedora:
https://src.fedoraproject.org/rpms/python2.7/c/3f00cdccd59ef2955a7f4b4c42bb59c631cce4c1.patch
--- Lib/test/test_urlparse.py.orig 2020-04-19 21:13:39.000000000 +0000
+++ Lib/test/test_urlparse.py
@@ -3,6 +3,12 @@ import sys
import unicodedata
import unittest
import urlparse
+from test.support import EnvironmentVarGuard
+from warnings import catch_warnings, filterwarnings
+import tempfile
+import contextlib
+import os.path
+import shutil
RFC1808_BASE = "http://a/b/c/d;p?q#f"
RFC2396_BASE = "http://a/b/c/d;p?q"
@@ -24,16 +30,29 @@ parse_qsl_test_cases = [
("&a=b", [('a', 'b')]),
("a=a+b&b=b+c", [('a', 'a b'), ('b', 'b c')]),
("a=1&a=2", [('a', '1'), ('a', '2')]),
+]
+
+parse_qsl_test_cases_semicolon = [
(";", []),
(";;", []),
(";a=b", [('a', 'b')]),
("a=a+b;b=b+c", [('a', 'a b'), ('b', 'b c')]),
("a=1;a=2", [('a', '1'), ('a', '2')]),
- (b";", []),
- (b";;", []),
- (b";a=b", [(b'a', b'b')]),
- (b"a=a+b;b=b+c", [(b'a', b'a b'), (b'b', b'b c')]),
- (b"a=1;a=2", [(b'a', b'1'), (b'a', b'2')]),
+]
+
+parse_qsl_test_cases_legacy = [
+ ("a=1;a=2&a=3", [('a', '1'), ('a', '2'), ('a', '3')]),
+ ("a=1;b=2&c=3", [('a', '1'), ('b', '2'), ('c', '3')]),
+ ("a=1&b=2&c=3;", [('a', '1'), ('b', '2'), ('c', '3')]),
+]
+
+parse_qsl_test_cases_warn = [
+ (";a=b", [(';a', 'b')]),
+ ("a=a+b;b=b+c", [('a', 'a b;b=b c')]),
+ (b";a=b", [(b';a', b'b')]),
+ (b"a=a+b;b=b+c", [(b'a', b'a b;b=b c')]),
+ ("a=1;a=2&a=3", [('a', '1;a=2'), ('a', '3')]),
+ (b"a=1;a=2&a=3", [(b'a', b'1;a=2'), (b'a', b'3')]),
]
parse_qs_test_cases = [
@@ -57,6 +76,9 @@ parse_qs_test_cases = [
(b"&a=b", {b'a': [b'b']}),
(b"a=a+b&b=b+c", {b'a': [b'a b'], b'b': [b'b c']}),
(b"a=1&a=2", {b'a': [b'1', b'2']}),
+]
+
+parse_qs_test_cases_semicolon = [
(";", {}),
(";;", {}),
(";a=b", {'a': ['b']}),
@@ -69,6 +91,24 @@ parse_qs_test_cases = [
(b"a=1;a=2", {b'a': [b'1', b'2']}),
]
+parse_qs_test_cases_legacy = [
+ ("a=1;a=2&a=3", {'a': ['1', '2', '3']}),
+ ("a=1;b=2&c=3", {'a': ['1'], 'b': ['2'], 'c': ['3']}),
+ ("a=1&b=2&c=3;", {'a': ['1'], 'b': ['2'], 'c': ['3']}),
+ (b"a=1;a=2&a=3", {b'a': [b'1', b'2', b'3']}),
+ (b"a=1;b=2&c=3", {b'a': [b'1'], b'b': [b'2'], b'c': [b'3']}),
+ (b"a=1&b=2&c=3;", {b'a': [b'1'], b'b': [b'2'], b'c': [b'3']}),
+]
+
+parse_qs_test_cases_warn = [
+ (";a=b", {';a': ['b']}),
+ ("a=a+b;b=b+c", {'a': ['a b;b=b c']}),
+ (b";a=b", {b';a': [b'b']}),
+ (b"a=a+b;b=b+c", {b'a':[ b'a b;b=b c']}),
+ ("a=1;a=2&a=3", {'a': ['1;a=2', '3']}),
+ (b"a=1;a=2&a=3", {b'a': [b'1;a=2', b'3']}),
+]
+
class UrlParseTestCase(unittest.TestCase):
def checkRoundtrips(self, url, parsed, split):
@@ -141,6 +181,40 @@ class UrlParseTestCase(unittest.TestCase
self.assertEqual(result, expect_without_blanks,
"Error parsing %r" % orig)
+ def test_qs_default_warn(self):
+ for orig, expect in parse_qs_test_cases_warn:
+ with catch_warnings(record=True) as w:
+ filterwarnings(action='always',
+ category=urlparse._QueryStringSeparatorWarning)
+ result = urlparse.parse_qs(orig, keep_blank_values=True)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 1)
+ self.assertEqual(w[0].category, urlparse._QueryStringSeparatorWarning)
+
+ def test_qsl_default_warn(self):
+ for orig, expect in parse_qsl_test_cases_warn:
+ with catch_warnings(record=True) as w:
+ filterwarnings(action='always',
+ category=urlparse._QueryStringSeparatorWarning)
+ result = urlparse.parse_qsl(orig, keep_blank_values=True)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 1)
+ self.assertEqual(w[0].category, urlparse._QueryStringSeparatorWarning)
+
+ def test_default_qs_no_warnings(self):
+ for orig, expect in parse_qs_test_cases:
+ with catch_warnings(record=True) as w:
+ result = urlparse.parse_qs(orig, keep_blank_values=True)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+
+ def test_default_qsl_no_warnings(self):
+ for orig, expect in parse_qsl_test_cases:
+ with catch_warnings(record=True) as w:
+ result = urlparse.parse_qsl(orig, keep_blank_values=True)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+
def test_roundtrips(self):
testcases = [
('file:///tmp/junk.txt',
@@ -544,6 +618,112 @@ class UrlParseTestCase(unittest.TestCase
self.assertEqual(p1.path, '863-1234')
self.assertEqual(p1.params, 'phone-context=+1-914-555')
+ def test_urlsplit_remove_unsafe_bytes(self):
+ # Remove ASCII tabs and newlines from input, for http common case scenario.
+ url = "h\nttp://www.python\n.org\t/java\nscript:\talert('msg\r\n')/?query\n=\tsomething#frag\nment"
+ p = urlparse.urlsplit(url)
+ self.assertEqual(p.scheme, "http")
+ self.assertEqual(p.netloc, "www.python.org")
+ self.assertEqual(p.path, "/javascript:alert('msg')/")
+ self.assertEqual(p.query, "query=something")
+ self.assertEqual(p.fragment, "fragment")
+ self.assertEqual(p.username, None)
+ self.assertEqual(p.password, None)
+ self.assertEqual(p.hostname, "www.python.org")
+ self.assertEqual(p.port, None)
+ self.assertEqual(p.geturl(), "http://www.python.org/javascript:alert('msg')/?query=something#fragment")
+
+ # Remove ASCII tabs and newlines from input as bytes, for http common case scenario.
+ url = b"h\nttp://www.python\n.org\t/java\nscript:\talert('msg\r\n')/?query\n=\tsomething#frag\nment"
+ p = urlparse.urlsplit(url)
+ self.assertEqual(p.scheme, b"http")
+ self.assertEqual(p.netloc, b"www.python.org")
+ self.assertEqual(p.path, b"/javascript:alert('msg')/")
+ self.assertEqual(p.query, b"query=something")
+ self.assertEqual(p.fragment, b"fragment")
+ self.assertEqual(p.username, None)
+ self.assertEqual(p.password, None)
+ self.assertEqual(p.hostname, b"www.python.org")
+ self.assertEqual(p.port, None)
+ self.assertEqual(p.geturl(), b"http://www.python.org/javascript:alert('msg')/?query=something#fragment")
+
+ # any scheme
+ url = "x-new-scheme\t://www.python\n.org\t/java\nscript:\talert('msg\r\n')/?query\n=\tsomething#frag\nment"
+ p = urlparse.urlsplit(url)
+ self.assertEqual(p.geturl(), "x-new-scheme://www.python.org/javascript:alert('msg')/?query=something#fragment")
+
+ # Remove ASCII tabs and newlines from input as bytes, any scheme.
+ url = b"x-new-scheme\t://www.python\n.org\t/java\nscript:\talert('msg\r\n')/?query\n=\tsomething#frag\nment"
+ p = urlparse.urlsplit(url)
+ self.assertEqual(p.geturl(), b"x-new-scheme://www.python.org/javascript:alert('msg')/?query=something#fragment")
+
+ # Unsafe bytes is not returned from urlparse cache.
+ # scheme is stored after parsing, sending an scheme with unsafe bytes *will not* return an unsafe scheme
+ url = "https://www.python\n.org\t/java\nscript:\talert('msg\r\n')/?query\n=\tsomething#frag\nment"
+ scheme = "htt\nps"
+ for _ in range(2):
+ p = urlparse.urlsplit(url, scheme=scheme)
+ self.assertEqual(p.scheme, "https")
+ self.assertEqual(p.geturl(), "https://www.python.org/javascript:alert('msg')/?query=something#fragment")
+
+ def test_urlsplit_strip_url(self):
+ noise = "".join([chr(i) for i in range(0, 0x20 + 1)])
+ base_url = "http://User:Pass@www.python.org:080/doc/?query=yes#frag"
+
+ url = noise.decode("utf-8") + base_url
+ p = urlparse.urlsplit(url)
+ self.assertEqual(p.scheme, "http")
+ self.assertEqual(p.netloc, "User:Pass@www.python.org:080")
+ self.assertEqual(p.path, "/doc/")
+ self.assertEqual(p.query, "query=yes")
+ self.assertEqual(p.fragment, "frag")
+ self.assertEqual(p.username, "User")
+ self.assertEqual(p.password, "Pass")
+ self.assertEqual(p.hostname, "www.python.org")
+ self.assertEqual(p.port, 80)
+ self.assertEqual(p.geturl(), base_url)
+
+ url = noise + base_url.encode("utf-8")
+ p = urlparse.urlsplit(url)
+ self.assertEqual(p.scheme, b"http")
+ self.assertEqual(p.netloc, b"User:Pass@www.python.org:080")
+ self.assertEqual(p.path, b"/doc/")
+ self.assertEqual(p.query, b"query=yes")
+ self.assertEqual(p.fragment, b"frag")
+ self.assertEqual(p.username, b"User")
+ self.assertEqual(p.password, b"Pass")
+ self.assertEqual(p.hostname, b"www.python.org")
+ self.assertEqual(p.port, 80)
+ self.assertEqual(p.geturl(), base_url.encode("utf-8"))
+
+ # Test that trailing space is preserved as some applications rely on
+ # this within query strings.
+ query_spaces_url = "https://www.python.org:88/doc/?query= "
+ p = urlparse.urlsplit(noise.decode("utf-8") + query_spaces_url)
+ self.assertEqual(p.scheme, "https")
+ self.assertEqual(p.netloc, "www.python.org:88")
+ self.assertEqual(p.path, "/doc/")
+ self.assertEqual(p.query, "query= ")
+ self.assertEqual(p.port, 88)
+ self.assertEqual(p.geturl(), query_spaces_url)
+
+ p = urlparse.urlsplit("www.pypi.org ")
+ # That "hostname" gets considered a "path" due to the
+ # trailing space and our existing logic... YUCK...
+ # and re-assembles via geturl aka unurlsplit into the original.
+ # django.core.validators.URLValidator (at least through v3.2) relies on
+ # this, for better or worse, to catch it in a ValidationError via its
+ # regular expressions.
+ # Here we test the basic round trip concept of such a trailing space.
+ self.assertEqual(urlparse.urlunsplit(p), "www.pypi.org ")
+
+ # with scheme as cache-key
+ url = "//www.python.org/"
+ scheme = noise.decode("utf-8") + "https" + noise.decode("utf-8")
+ for _ in range(2):
+ p = urlparse.urlsplit(url, scheme=scheme)
+ self.assertEqual(p.scheme, "https")
+ self.assertEqual(p.geturl(), "https://www.python.org/")
def test_attributes_bad_port(self):
"""Check handling of non-integer ports."""
@@ -626,6 +806,132 @@ class UrlParseTestCase(unittest.TestCase
self.assertEqual(urlparse.urlparse("http://www.python.org:80"),
('http','www.python.org:80','','','',''))
+ def test_parse_qs_separator_bytes(self):
+ expected = {b'a': [b'1'], b'b': [b'2']}
+
+ result = urlparse.parse_qs(b'a=1;b=2', separator=b';')
+ self.assertEqual(result, expected)
+ result = urlparse.parse_qs(b'a=1;b=2', separator=';')
+ self.assertEqual(result, expected)
+ result = urlparse.parse_qs('a=1;b=2', separator=';')
+ self.assertEqual(result, {'a': ['1'], 'b': ['2']})
+
+ @contextlib.contextmanager
+ def _qsl_sep_config(self, sep):
+ """Context for the given parse_qsl default separator configured in config file"""
+ old_filename = urlparse._QS_SEPARATOR_CONFIG_FILENAME
+ urlparse._default_qs_separator = None
+ try:
+ tmpdirname = tempfile.mkdtemp()
+ filename = os.path.join(tmpdirname, 'conf.cfg')
+ with open(filename, 'w') as file:
+ file.write('[parse_qs]\n')
+ file.write('PYTHON_URLLIB_QS_SEPARATOR = {}'.format(sep))
+ urlparse._QS_SEPARATOR_CONFIG_FILENAME = filename
+ yield
+ finally:
+ urlparse._QS_SEPARATOR_CONFIG_FILENAME = old_filename
+ urlparse._default_qs_separator = None
+ shutil.rmtree(tmpdirname)
+
+ def test_parse_qs_separator_semicolon(self):
+ for orig, expect in parse_qs_test_cases_semicolon:
+ result = urlparse.parse_qs(orig, separator=';')
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ with EnvironmentVarGuard() as environ, catch_warnings(record=True) as w:
+ environ['PYTHON_URLLIB_QS_SEPARATOR'] = ';'
+ result = urlparse.parse_qs(orig)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+ with self._qsl_sep_config(';'), catch_warnings(record=True) as w:
+ result = urlparse.parse_qs(orig)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+
+ def test_parse_qsl_separator_semicolon(self):
+ for orig, expect in parse_qsl_test_cases_semicolon:
+ result = urlparse.parse_qsl(orig, separator=';')
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ with EnvironmentVarGuard() as environ, catch_warnings(record=True) as w:
+ environ['PYTHON_URLLIB_QS_SEPARATOR'] = ';'
+ result = urlparse.parse_qsl(orig)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+ with self._qsl_sep_config(';'), catch_warnings(record=True) as w:
+ result = urlparse.parse_qsl(orig)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+
+ def test_parse_qs_separator_legacy(self):
+ for orig, expect in parse_qs_test_cases_legacy:
+ with EnvironmentVarGuard() as environ, catch_warnings(record=True) as w:
+ environ['PYTHON_URLLIB_QS_SEPARATOR'] = 'legacy'
+ result = urlparse.parse_qs(orig)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+ with self._qsl_sep_config('legacy'), catch_warnings(record=True) as w:
+ result = urlparse.parse_qs(orig)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+
+ def test_parse_qsl_separator_legacy(self):
+ for orig, expect in parse_qsl_test_cases_legacy:
+ with EnvironmentVarGuard() as environ, catch_warnings(record=True) as w:
+ environ['PYTHON_URLLIB_QS_SEPARATOR'] = 'legacy'
+ result = urlparse.parse_qsl(orig)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+ with self._qsl_sep_config('legacy'), catch_warnings(record=True) as w:
+ result = urlparse.parse_qsl(orig)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+
+ def test_parse_qs_separator_bad_value_env_or_config(self):
+ for bad_sep in '', 'abc', 'safe', '&;', 'SEP':
+ with EnvironmentVarGuard() as environ, catch_warnings(record=True) as w:
+ environ['PYTHON_URLLIB_QS_SEPARATOR'] = bad_sep
+ with self.assertRaises(ValueError):
+ urlparse.parse_qsl('a=1;b=2')
+ with self._qsl_sep_config('bad_sep'), catch_warnings(record=True) as w:
+ with self.assertRaises(ValueError):
+ urlparse.parse_qsl('a=1;b=2')
+
+ def test_parse_qs_separator_bad_value_arg(self):
+ for bad_sep in True, {}, '':
+ with self.assertRaises(ValueError):
+ urlparse.parse_qsl('a=1;b=2', separator=bad_sep)
+
+ def test_parse_qs_separator_num_fields(self):
+ for qs, sep in (
+ ('a&b&c', '&'),
+ ('a;b;c', ';'),
+ ('a&b;c', 'legacy'),
+ ):
+ with EnvironmentVarGuard() as environ, catch_warnings(record=True) as w:
+ if sep != 'legacy':
+ with self.assertRaises(ValueError):
+ urlparse.parse_qsl(qs, separator=sep, max_num_fields=2)
+ if sep:
+ environ['PYTHON_URLLIB_QS_SEPARATOR'] = sep
+ with self.assertRaises(ValueError):
+ urlparse.parse_qsl(qs, max_num_fields=2)
+
+ def test_parse_qs_separator_priority(self):
+ # env variable trumps config file
+ with self._qsl_sep_config('~'), EnvironmentVarGuard() as environ:
+ environ['PYTHON_URLLIB_QS_SEPARATOR'] = '!'
+ result = urlparse.parse_qs('a=1!b=2~c=3')
+ self.assertEqual(result, {'a': ['1'], 'b': ['2~c=3']})
+ # argument trumps config file
+ with self._qsl_sep_config('~'):
+ result = urlparse.parse_qs('a=1$b=2~c=3', separator='$')
+ self.assertEqual(result, {'a': ['1'], 'b': ['2~c=3']})
+ # argument trumps env variable
+ with EnvironmentVarGuard() as environ:
+ environ['PYTHON_URLLIB_QS_SEPARATOR'] = '~'
+ result = urlparse.parse_qs('a=1$b=2~c=3', separator='$')
+ self.assertEqual(result, {'a': ['1'], 'b': ['2~c=3']})
+
def test_urlsplit_normalization(self):
# Certain characters should never occur in the netloc,
# including under normalization.
|
9b1022dd2dd8110093708185848c8b64d1198124
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/quickFixes/PyAddImportQuickFixTest/beforeStatementBelowFileCommentBlock/main_after.py
|
1f1375ed8d892d8c204bb814bb20f21066c01a89
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 52
|
py
|
main_after.py
|
# some comment #
################
import a
print(a)
|
182e39a2fbb654c6425d394695fe775067ccfcff
|
3b1cdc517234d1e2baa77ee6190589c9e6d8ed30
|
/graphlearn/examples/tf/seal_v2/seal_link_predict.py
|
56ce7200ed93dac66b79de0a446951d8ec45a1c2
|
[
"Apache-2.0"
] |
permissive
|
alibaba/graph-learn
|
e7ef7ee75f0d40a4d57f51691338b7826cf947b2
|
9682c9b399a57ea318325cdb0c9f4f2f79644e82
|
refs/heads/master
| 2023-09-01T05:37:28.606948
| 2023-08-31T04:52:47
| 2023-08-31T04:52:47
| 250,229,724
| 1,302
| 289
|
Apache-2.0
| 2023-08-31T04:52:48
| 2020-03-26T10:39:32
|
C++
|
UTF-8
|
Python
| false
| false
| 6,948
|
py
|
seal_link_predict.py
|
# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import print_function
import datetime
import numpy as np
import graphlearn as gl
try:
# https://www.tensorflow.org/guide/migrate
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
except ImportError:
import tensorflow as tf
import graphlearn.python.nn.tf as tfg
from node_label_processor import LabelProcessor
def load_graph(config):
data_dir = config['dataset_folder']
g = gl.Graph() \
.node(data_dir+'ogbl_collab_node', node_type='i',
decoder=gl.Decoder(attr_types=['float'] * config['features_num'],
attr_dims=[0]*config['features_num'])) \
.edge(data_dir+'ogbl_collab_train_edge', edge_type=('i', 'i', 'train'),
decoder=gl.Decoder(weighted=True), directed=False) \
.edge(data_dir+'ogbl_collab_train_neg_edge', edge_type=('i', 'i', 'train_neg'),
decoder=gl.Decoder(weighted=True), directed=True) \
.edge(data_dir+'ogbl_collab_val_edge', edge_type=('i', 'i', 'val'),
decoder=gl.Decoder(weighted=True), directed=True) \
.edge(data_dir+'ogbl_collab_val_edge_neg', edge_type=('i', 'i', 'val_neg'),
decoder=gl.Decoder(weighted=True), directed=True) \
.edge(data_dir+'ogbl_collab_test_edge', edge_type=('i', 'i', 'test'),
decoder=gl.Decoder(weighted=True), directed=True) \
.edge(data_dir+'ogbl_collab_test_edge_neg', edge_type=('i', 'i', 'test_neg'),
decoder=gl.Decoder(weighted=True), directed=True)
return g
def eval_hits(y_pred_pos, y_pred_neg, k):
'''
compute Hits@K
For each positive target node, the negative target nodes are the same.
y_pred_neg is an array.
rank y_pred_pos[i] against y_pred_neg for each i
'''
if len(y_pred_neg) < k or len(y_pred_pos) == 0:
return {'hits@{}'.format(k): 1.}
kth_score_in_negative_edges = np.sort(y_pred_neg)[-k]
hitsK = float(np.sum(y_pred_pos > kth_score_in_negative_edges)) / len(y_pred_pos)
return {'hits@{}'.format(k): hitsK}
def train(g, model, predictor, config):
tfg.conf.training = True
query = g.E('train').batch(1).shuffle(traverse=True).alias('train')\
.SubGraph('train', config['nbrs_num'], need_dist=True).alias('sub')
pos_dataset = tfg.Dataset(query.values(),
processor=LabelProcessor(config['strut_label_spec']),
batch_size=config['batch_size'])
pos_graph, _ = pos_dataset.get_batchgraph()
neg_query = g.E('train_neg').batch(1).shuffle(traverse=False).alias('train_neg')\
.SubGraph('train', config['nbrs_num'], need_dist=True).alias('sub')
neg_dataset = tfg.Dataset(neg_query.values(),
processor=LabelProcessor(config['strut_label_spec']),
batch_size=config['batch_size'])
neg_graph, _ = neg_dataset.get_batchgraph()
pos_src, pos_dst = model.forward(batchgraph=pos_graph)
neg_src, neg_dst = model.forward(batchgraph=neg_graph)
pos_h = predictor(pos_src * pos_dst)
neg_h = predictor(neg_src * neg_dst)
# train loss
loss = tfg.sigmoid_cross_entropy_loss(pos_h, neg_h)
return pos_dataset.iterator, neg_dataset.iterator, loss
def test(g, model, predictor, config, edge_type='test'):
tfg.conf.training = False
query = g.E(edge_type).batch(1).alias(edge_type).SubGraph('train',
config['nbrs_num'], need_dist=True).alias('sub')
dataset = tfg.Dataset(query.values(),
processor=LabelProcessor(config['strut_label_spec']),
batch_size=config['batch_size'])
pos_graph, _ = dataset.get_batchgraph()
pos_src, pos_dst = model.forward(batchgraph=pos_graph)
logits = predictor(pos_src * pos_dst)
return dataset.iterator, logits
def run(config):
gl.set_default_full_nbr_num(100)
# graph input data
g = load_graph(config=config)
g.init()
# model
model = tfg.SEAL(config['batch_size'],
input_dim=config['features_num'],
hidden_dim=config['hidden_dim'],
output_dim=config['output_dim'],
depth=config['depth'],
drop_rate=config['drop_out'],
agg_type=config['agg_type'])
predictor = tfg.LinkPredictor(name="link_pred",
input_dim=config['output_dim'], num_layers=config['predictor_layers'])
def eval(sess, logits, iterator):
"""evaluate accuracy"""
sess.run(iterator.initializer)
outs = np.array([])
while True:
try:
outs = np.append(sess.run(logits), outs)
except tf.errors.OutOfRangeError:
print('End of an epoch.')
break
return outs
# trainer
pos_iterator, neg_iterator, loss = train(g, model, predictor, config)
optimizer = tf.train.AdamOptimizer(learning_rate=config['learning_rate'])
train_op = optimizer.minimize(loss)
train_ops = [loss, train_op]
test_iter, test_logits = test(g, model, predictor, config, edge_type='test')
test_neg_iter, test_neg_logits = test(g, model, predictor, config, edge_type='test_neg')
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
sess.run((pos_iterator.initializer, neg_iterator.initializer))
epoch = 0
for step in range(config['steps']):
try:
ret = sess.run(train_ops)
if step and step % 100 == 0:
print(datetime.datetime.now(),
'Epoch {}, Iter {}, Loss {:.5f}'.format(epoch, step, ret[0]))
if step and step % 1000 == 0: # test
pos_logits = eval(sess, test_logits, test_iter)
neg_logits = eval(sess, test_neg_logits, test_neg_iter)
print('Test hits@50:', eval_hits(pos_logits, neg_logits, 50))
except tf.errors.OutOfRangeError:
sess.run(pos_iterator.initializer) # reinitialize dataset.
epoch += 1
g.close()
if __name__ == "__main__":
config = {'dataset_folder': '../../data/ogbl_collab/',
'batch_size': 128,
'hidden_dim': 32,
'output_dim': 32,
'features_num': 128,
'nbrs_num': [100],
'depth': 3,
'neg_num': 1,
'learning_rate': 0.0001,
'agg_type': 'mean',
'drop_out': 0.0,
'predictor_layers': 3,
'steps': 100000,
'strut_label_spec': {'struct_label': [tf.int32, tf.TensorShape([None])]}
}
run(config)
|
3c806efe03de9632397de28fc6cf56ce1ab2c4de
|
057a475216e9beed41983481aafcaf109bbf58da
|
/tests/queries/0_stateless/00386_long_in_pk.python
|
c7b04102dc504070cf3e0d5738e1f9cdb9b35d03
|
[
"Apache-2.0",
"BSL-1.0"
] |
permissive
|
ClickHouse/ClickHouse
|
fece5204263a5b4d693854b6039699265f1bb27f
|
6649328db809d51a694c358571539bc5820464be
|
refs/heads/master
| 2023-08-31T18:48:36.615225
| 2023-08-31T17:51:24
| 2023-08-31T17:51:24
| 60,246,359
| 23,878
| 5,449
|
Apache-2.0
| 2023-09-14T20:10:52
| 2016-06-02T08:28:18
|
C++
|
UTF-8
|
Python
| false
| false
| 3,060
|
python
|
00386_long_in_pk.python
|
#!/usr/bin/env python3
def gen_queries():
create_template = "create table tab_00386 (a Int8, b String, c Tuple(Int8), d Tuple(Tuple(Int8)), e Tuple(Int8, String), f Tuple(Tuple(Int8, String))) engine = MergeTree order by ({}) partition by {}"
drop_query = "drop table if exists tab_00386"
values = ("1", "'a'", "tuple(1)", "tuple(tuple(1))", "(1, 'a')", "tuple((1, 'a'))")
insert_query = "insert into tab_00386 values (1, 'a', tuple(1), tuple(tuple(1)), (1, 'a'), tuple((1, 'a')))"
columns = tuple("a b c d".split())
order_by_columns = tuple("a b c".split())
partition_by_columns = tuple(" tuple() a".split())
for partition in partition_by_columns:
for key_mask in range(1, 1 << len(order_by_columns)):
key = ",".join(
order_by_columns[i]
for i in range(len(order_by_columns))
if (1 << i) & key_mask != 0
)
create_query = create_template.format(key, partition)
for q in (drop_query, create_query, insert_query):
yield q
for column, value in zip(columns, values):
yield "select {} in {} from tab_00386".format(column, value)
yield "select {} in tuple({}) from tab_00386".format(column, value)
yield "select {} in (select {} from tab_00386) from tab_00386".format(
column, column
)
for i in range(len(columns)):
for j in range(i, len(columns)):
yield "select ({}, {}) in tuple({}, {}) from tab_00386".format(
columns[i], columns[j], values[i], values[j]
)
yield "select ({}, {}) in (select {}, {} from tab_00386) from tab_00386".format(
columns[i], columns[j], columns[i], columns[j]
)
yield "select ({}, {}) in (select ({}, {}) from tab_00386) from tab_00386".format(
columns[i], columns[j], columns[i], columns[j]
)
yield "select e in (1, 'a') from tab_00386"
yield "select f in tuple((1, 'a')) from tab_00386"
yield "select f in tuple(tuple((1, 'a'))) from tab_00386"
yield "select e in (select a, b from tab_00386) from tab_00386"
yield "select e in (select (a, b) from tab_00386) from tab_00386"
yield "select f in (select tuple((a, b)) from tab_00386) from tab_00386"
yield "select tuple(f) in (select tuple(tuple((a, b))) from tab_00386) from tab_00386"
import requests
import os
def main():
url = os.environ["CLICKHOUSE_URL"]
for q in gen_queries():
resp = requests.post(url, data=q)
if resp.status_code != 200 or resp.text.strip() not in ("1", ""):
print("Query:", q)
print("Code:", resp.status_code)
print(resp.text)
break
requests.post(url, data="drop table tab_00386")
if __name__ == "__main__":
main()
|
87fb93cbfaed53809f6c10a33222be70ed918a6f
|
e83005ad68df95d4976099620bc60dc9f539355c
|
/tests/test_sqlalchemy_data_layer.py
|
2ae1b64af165b85b33009226d70951f789547801
|
[
"MIT"
] |
permissive
|
fossasia/flask-rest-jsonapi
|
6c921dcfbd912929bf5d86f68b4d824925377c2d
|
a03408bffd5ef96bf3b8abe3a30d147db46fbe47
|
refs/heads/fossasia
| 2021-08-07T08:42:05.441633
| 2021-04-14T12:58:15
| 2021-04-14T12:58:15
| 124,872,416
| 1,827
| 40
|
MIT
| 2021-04-14T12:52:22
| 2018-03-12T10:29:51
|
Python
|
UTF-8
|
Python
| false
| false
| 54,424
|
py
|
test_sqlalchemy_data_layer.py
|
# -*- coding: utf-8 -*-
from six.moves.urllib.parse import urlencode
import pytest
import json
from sqlalchemy import create_engine, Column, Integer, DateTime, String, ForeignKey
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
from flask import Blueprint, make_response
from marshmallow_jsonapi.flask import Schema, Relationship
from marshmallow_jsonapi import fields
from marshmallow import ValidationError
from flask_rest_jsonapi import Api, ResourceList, ResourceDetail, ResourceRelationship, JsonApiException
from flask_rest_jsonapi.pagination import add_pagination_links
from flask_rest_jsonapi.exceptions import RelationNotFound, InvalidSort, InvalidFilters, InvalidInclude, BadRequest
from flask_rest_jsonapi.querystring import QueryStringManager as QSManager
from flask_rest_jsonapi.data_layers.alchemy import SqlalchemyDataLayer
from flask_rest_jsonapi.data_layers.base import BaseDataLayer
from flask_rest_jsonapi.data_layers.filtering.alchemy import Node
import flask_rest_jsonapi.decorators
import flask_rest_jsonapi.resource
import flask_rest_jsonapi.schema
@pytest.fixture(autouse=True)
def flask_app(monkeypatch):
app = type('app', (object,), dict(config=dict(DEBUG=True)))
monkeypatch.setattr(flask_rest_jsonapi.data_layers.alchemy, 'current_app', app)
monkeypatch.setattr(flask_rest_jsonapi.data_layers.filtering.alchemy, 'current_app', app) # test Node
monkeypatch.setattr(flask_rest_jsonapi.querystring, 'current_app', app) # test querystring
@pytest.fixture(scope="module")
def base():
yield declarative_base()
@pytest.fixture(scope="module")
def person_model(base):
class Person(base):
__tablename__ = 'person'
person_id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
birth_date = Column(DateTime)
computers = relationship("Computer", backref="person")
yield Person
@pytest.fixture(scope="module")
def computer_model(base):
class Computer(base):
__tablename__ = 'computer'
id = Column(Integer, primary_key=True)
serial = Column(String, nullable=False)
person_id = Column(Integer, ForeignKey('person.person_id'))
yield Computer
@pytest.fixture(scope="module")
def engine(person_model, computer_model):
engine = create_engine("sqlite:///:memory:")
person_model.metadata.create_all(engine)
computer_model.metadata.create_all(engine)
return engine
@pytest.fixture(scope="module")
def session(engine):
Session = sessionmaker(bind=engine)
return Session()
@pytest.fixture()
def person(session, person_model):
person_ = person_model(name='test')
session_ = session
session_.add(person_)
session_.commit()
yield person_
session_.delete(person_)
session_.commit()
@pytest.fixture()
def person_2(session, person_model):
person_ = person_model(name='test2')
session_ = session
session_.add(person_)
session_.commit()
yield person_
session_.delete(person_)
session_.commit()
@pytest.fixture()
def computer(session, computer_model):
computer_ = computer_model(serial='1')
session_ = session
session_.add(computer_)
session_.commit()
yield computer_
session_.delete(computer_)
session_.commit()
@pytest.fixture(scope="module")
def dummy_decorator():
def deco(f):
def wrapper_f(*args, **kwargs):
return f(*args, **kwargs)
return wrapper_f
yield deco
@pytest.fixture(scope="module")
def person_schema():
class PersonSchema(Schema):
class Meta:
type_ = 'person'
self_view = 'api.person_detail'
self_view_kwargs = {'person_id': '<id>'}
id = fields.Str(dump_only=True, attribute='person_id')
name = fields.Str(required=True)
birth_date = fields.DateTime()
computers = Relationship(related_view='api.computer_list',
related_view_kwargs={'person_id': '<person_id>'},
schema='ComputerSchema',
type_='computer',
many=True)
yield PersonSchema
@pytest.fixture(scope="module")
def computer_schema():
class ComputerSchema(Schema):
class Meta:
type_ = 'computer'
self_view = 'api.computer_detail'
self_view_kwargs = {'id': '<id>'}
id = fields.Str(dump_only=True)
serial = fields.Str(required=True)
owner = Relationship(attribute='person',
related_view='api.person_detail',
related_view_kwargs={'person_id': '<person.person_id>'},
schema='PersonSchema',
id_field='person_id',
type_='person')
yield ComputerSchema
@pytest.fixture(scope="module")
def before_create_object():
def before_create_object_(self, data, view_kwargs):
pass
yield before_create_object_
@pytest.fixture(scope="module")
def before_update_object():
def before_update_object_(self, obj, data, view_kwargs):
pass
yield before_update_object_
@pytest.fixture(scope="module")
def before_delete_object():
def before_delete_object_(self, obj, view_kwargs):
pass
yield before_delete_object_
@pytest.fixture(scope="module")
def person_list(session, person_model, dummy_decorator, person_schema, before_create_object):
class PersonList(ResourceList):
schema = person_schema
data_layer = {'model': person_model,
'session': session,
'mzthods': {'before_create_object': before_create_object}}
get_decorators = [dummy_decorator]
post_decorators = [dummy_decorator]
get_schema_kwargs = dict()
post_schema_kwargs = dict()
yield PersonList
@pytest.fixture(scope="module")
def person_detail(session, person_model, dummy_decorator, person_schema, before_update_object, before_delete_object):
class PersonDetail(ResourceDetail):
schema = person_schema
data_layer = {'model': person_model,
'session': session,
'url_field': 'person_id',
'methods': {'before_update_object': before_update_object,
'before_delete_object': before_delete_object}}
get_decorators = [dummy_decorator]
patch_decorators = [dummy_decorator]
delete_decorators = [dummy_decorator]
get_schema_kwargs = dict()
patch_schema_kwargs = dict()
delete_schema_kwargs = dict()
yield PersonDetail
@pytest.fixture(scope="module")
def person_computers(session, person_model, dummy_decorator, person_schema):
class PersonComputersRelationship(ResourceRelationship):
schema = person_schema
data_layer = {'session': session,
'model': person_model,
'url_field': 'person_id'}
get_decorators = [dummy_decorator]
post_decorators = [dummy_decorator]
patch_decorators = [dummy_decorator]
delete_decorators = [dummy_decorator]
yield PersonComputersRelationship
@pytest.fixture(scope="module")
def person_list_raise_jsonapiexception():
class PersonList(ResourceList):
def get(self):
raise JsonApiException('', '')
yield PersonList
@pytest.fixture(scope="module")
def person_list_raise_exception():
class PersonList(ResourceList):
def get(self):
raise Exception()
yield PersonList
@pytest.fixture(scope="module")
def person_list_response():
class PersonList(ResourceList):
def get(self):
return make_response('')
yield PersonList
@pytest.fixture(scope="module")
def person_list_without_schema(session, person_model):
class PersonList(ResourceList):
data_layer = {'model': person_model,
'session': session}
def get(self):
return make_response('')
yield PersonList
@pytest.fixture(scope="module")
def query():
def query_(self, view_kwargs):
if view_kwargs.get('person_id') is not None:
return self.session.query(computer_model).join(person_model).filter_by(person_id=view_kwargs['person_id'])
return self.session.query(computer_model)
yield query_
@pytest.fixture(scope="module")
def computer_list(session, computer_model, computer_schema, query):
class ComputerList(ResourceList):
schema = computer_schema
data_layer = {'model': computer_model,
'session': session,
'methods': {'query': query}}
yield ComputerList
@pytest.fixture(scope="module")
def computer_detail(session, computer_model, dummy_decorator, computer_schema):
class ComputerDetail(ResourceDetail):
schema = computer_schema
data_layer = {'model': computer_model,
'session': session}
methods = ['GET', 'PATCH']
yield ComputerDetail
@pytest.fixture(scope="module")
def computer_owner(session, computer_model, dummy_decorator, computer_schema):
class ComputerOwnerRelationship(ResourceRelationship):
schema = computer_schema
data_layer = {'session': session,
'model': computer_model}
yield ComputerOwnerRelationship
@pytest.fixture(scope="module")
def api_blueprint(client):
bp = Blueprint('api', __name__)
yield bp
@pytest.fixture(scope="module")
def register_routes(client, app, api_blueprint, person_list, person_detail, person_computers,
person_list_raise_jsonapiexception, person_list_raise_exception, person_list_response,
person_list_without_schema, computer_list, computer_detail, computer_owner):
api = Api(blueprint=api_blueprint)
api.route(person_list, 'person_list', '/persons')
api.route(person_detail, 'person_detail', '/persons/<int:person_id>')
api.route(person_computers, 'person_computers', '/persons/<int:person_id>/relationships/computers')
api.route(person_computers, 'person_computers_error', '/persons/<int:person_id>/relationships/computer')
api.route(person_list_raise_jsonapiexception, 'person_list_jsonapiexception', '/persons_jsonapiexception')
api.route(person_list_raise_exception, 'person_list_exception', '/persons_exception')
api.route(person_list_response, 'person_list_response', '/persons_response')
api.route(person_list_without_schema, 'person_list_without_schema', '/persons_without_schema')
api.route(computer_list, 'computer_list', '/computers', '/persons/<int:person_id>/computers')
api.route(computer_list, 'computer_detail', '/computers/<int:id>')
api.route(computer_owner, 'computer_owner', '/computers/<int:id>/relationships/owner')
api.init_app(app)
@pytest.fixture(scope="module")
def get_object_mock():
class get_object(object):
foo = type('foo', (object,), {
'property': type('prop', (object,), {
'mapper': type('map', (object,), {
'class_': 'test'
})()
})()
})()
def __init__(self, kwargs):
pass
return get_object
def test_add_pagination_links():
qs = {'page[number]': '15', 'page[size]': '10'}
qsm = QSManager(qs, None)
add_pagination_links(dict(), 1000, qsm, str())
def test_Node(person_model, person_schema, monkeypatch):
from copy import deepcopy
filt = {
'val': '0000',
'field': True,
'not': dict(),
'name': 'name',
'op': 'eq',
'strip': lambda: 's'
}
filt['not'] = deepcopy(filt)
del filt['not']['not']
n = Node(person_model,
filt,
None,
person_schema)
with pytest.raises(TypeError):
# print(n.val is None and n.field is None)
# # n.column
n.resolve()
with pytest.raises(AttributeError):
n.model = None
n.column
with pytest.raises(InvalidFilters):
n.model = person_model
n.filter_['op'] = ''
n.operator
with pytest.raises(InvalidFilters):
n.related_model
with pytest.raises(InvalidFilters):
n.related_schema
def test_check_method_requirements(monkeypatch):
self = type('self', (object,), dict())
request = type('request', (object,), dict(method='GET'))
monkeypatch.setattr(flask_rest_jsonapi.decorators, 'request', request)
with pytest.raises(Exception):
flask_rest_jsonapi.\
decorators.check_method_requirements(lambda: 1)(self())
def test_json_api_exception():
JsonApiException(None, None, title='test', status='test')
def test_query_string_manager(person_schema):
query_string = {'page[slumber]': '3'}
qsm = QSManager(query_string, person_schema)
with pytest.raises(BadRequest):
qsm.pagination
qsm.qs['sort'] = 'computers'
with pytest.raises(InvalidSort):
qsm.sorting
def test_resource(person_model, person_schema, session, monkeypatch):
def schema_load_mock(*args):
raise ValidationError(dict(errors=[dict(status=None, title=None)]))
query_string = {'page[slumber]': '3'}
app = type('app', (object,), dict(config=dict(DEBUG=True)))
headers = {'Content-Type': 'application/vnd.api+json'}
request = type('request', (object,), dict(method='POST',
headers=headers,
get_json=dict,
args=query_string))
dl = SqlalchemyDataLayer(dict(session=session, model=person_model))
rl = ResourceList()
rd = ResourceDetail()
rl._data_layer = dl
rl.schema = person_schema
rd._data_layer = dl
rd.schema = person_schema
monkeypatch.setattr(flask_rest_jsonapi.resource, 'request', request)
monkeypatch.setattr(flask_rest_jsonapi.resource, 'current_app', app)
monkeypatch.setattr(flask_rest_jsonapi.decorators, 'request', request)
monkeypatch.setattr(rl.schema, 'load', schema_load_mock)
r = super(flask_rest_jsonapi.resource.Resource, ResourceList)\
.__new__(ResourceList)
with pytest.raises(Exception):
r.dispatch_request()
rl.post()
rd.patch()
def test_compute_schema(person_schema):
query_string = {'page[number]': '3', 'fields[person]': list()}
qsm = QSManager(query_string, person_schema)
with pytest.raises(InvalidInclude):
flask_rest_jsonapi.schema.compute_schema(person_schema, dict(), qsm, ['id'])
flask_rest_jsonapi.schema.compute_schema(person_schema, dict(only=list()), qsm, list())
# test good cases
def test_get_list(client, register_routes, person, person_2):
with client:
querystring = urlencode({'page[number]': 1,
'page[size]': 1,
'fields[person]': 'name,birth_date',
'sort': '-name',
'include': 'computers.owner',
'filter': json.dumps(
[
{
'and': [
{
'name': 'computers',
'op': 'any',
'val': {
'name': 'serial',
'op': 'eq',
'val': '0000'
}
},
{
'or': [
{
'name': 'name',
'op': 'like',
'val': '%test%'
},
{
'name': 'name',
'op': 'like',
'val': '%test2%'
}
]
}
]
}
])})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 200
def test_get_list_disable_pagination(client, register_routes):
with client:
querystring = urlencode({'page[size]': 0})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 200
def test_head_list(client, register_routes):
with client:
response = client.head('/persons', content_type='application/vnd.api+json')
assert response.status_code == 200
def test_post_list(client, register_routes, computer):
payload = {
'data': {
'type': 'person',
'attributes': {
'name': 'test'
},
'relationships': {
'computers': {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
}
}
}
with client:
response = client.post('/persons', data=json.dumps(payload), content_type='application/vnd.api+json')
assert response.status_code == 201
def test_post_list_single(client, register_routes, person):
payload = {
'data': {
'type': 'computer',
'attributes': {
'serial': '1'
},
'relationships': {
'owner': {
'data': {
'type': 'person',
'id': str(person.person_id)
}
}
}
}
}
with client:
response = client.post('/computers', data=json.dumps(payload), content_type='application/vnd.api+json')
assert response.status_code == 201
def test_get_detail(client, register_routes, person):
with client:
response = client.get('/persons/' + str(person.person_id), content_type='application/vnd.api+json')
assert response.status_code == 200
def test_patch_detail(client, register_routes, computer, person):
payload = {
'data': {
'id': str(person.person_id),
'type': 'person',
'attributes': {
'name': 'test2'
},
'relationships': {
'computers': {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
}
}
}
with client:
response = client.patch('/persons/' + str(person.person_id),
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_delete_detail(client, register_routes, person):
with client:
response = client.delete('/persons/' + str(person.person_id), content_type='application/vnd.api+json')
assert response.status_code == 200
def test_get_relationship(session, client, register_routes, computer, person):
session_ = session
person.computers = [computer]
session_.commit()
with client:
response = client.get('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_get_relationship_empty(client, register_routes, person):
with client:
response = client.get('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_get_relationship_single(session, client, register_routes, computer, person):
session_ = session
computer.person = person
session_.commit()
with client:
response = client.get('/computers/' + str(computer.id) + '/relationships/owner',
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_get_relationship_single_empty(session, client, register_routes, computer):
with client:
response = client.get('/computers/' + str(computer.id) + '/relationships/owner',
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_post_relationship(client, register_routes, computer, person):
payload = {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
with client:
response = client.post('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_post_relationship_not_list(client, register_routes, computer, person):
payload = {
'data': {
'type': 'person',
'id': str(person.person_id)
}
}
with client:
response = client.post('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_patch_relationship(client, register_routes, computer, person):
payload = {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
with client:
response = client.patch('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_patch_relationship_single(client, register_routes, computer, person):
payload = {
'data': {
'type': 'person',
'id': str(person.person_id)
}
}
with client:
response = client.patch('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_delete_relationship(session, client, register_routes, computer, person):
session_ = session
person.computers = [computer]
session_.commit()
payload = {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
with client:
response = client.delete('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_delete_relationship_single(session, client, register_routes, computer, person):
session_ = session
computer.owner = person
session_.commit()
payload = {
'data': {
'type': 'person',
'id': str(person.person_id)
}
}
with client:
response = client.delete('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_get_list_response(client, register_routes):
with client:
response = client.get('/persons_response', content_type='application/vnd.api+json')
assert response.status_code == 200
# test various Accept headers
def test_single_accept_header(client, register_routes):
with client:
response = client.get('/persons', content_type='application/vnd.api+json', headers={'Accept': 'application/vnd.api+json'})
assert response.status_code == 200
def test_multiple_accept_header(client, register_routes):
with client:
response = client.get('/persons', content_type='application/vnd.api+json', headers={'Accept': '*/*, application/vnd.api+json'})
assert response.status_code == 200
def test_wrong_accept_header(client, register_routes):
with client:
response = client.get('/persons', content_type='application/vnd.api+json', headers={'Accept': 'error'})
assert response.status_code == 406
# test Content-Type error
def test_wrong_content_type(client, register_routes):
with client:
response = client.post('/persons')
assert response.status_code == 415
@pytest.fixture(scope="module")
def wrong_data_layer():
class WrongDataLayer(object):
pass
yield WrongDataLayer
def test_wrong_data_layer_inheritence(wrong_data_layer):
with pytest.raises(Exception):
class PersonDetail(ResourceDetail):
data_layer = {'class': wrong_data_layer}
PersonDetail()
def test_wrong_data_layer_kwargs_type():
with pytest.raises(Exception):
class PersonDetail(ResourceDetail):
data_layer = list()
PersonDetail()
def test_get_list_jsonapiexception(client, register_routes):
with client:
response = client.get('/persons_jsonapiexception', content_type='application/vnd.api+json')
assert response.status_code == 500
def test_get_list_exception(client, register_routes):
with client:
response = client.get('/persons_exception', content_type='application/vnd.api+json')
assert response.status_code == 500
def test_get_list_without_schema(client, register_routes):
with client:
response = client.post('/persons_without_schema', content_type='application/vnd.api+json')
assert response.status_code == 500
def test_get_list_bad_request(client, register_routes):
with client:
querystring = urlencode({'page[number': 3})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_get_list_invalid_fields(client, register_routes):
with client:
querystring = urlencode({'fields[person]': 'error'})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_get_list_invalid_include(client, register_routes):
with client:
querystring = urlencode({'include': 'error'})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_get_list_invalid_filters_parsing(client, register_routes):
with client:
querystring = urlencode({'filter': 'error'})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_get_list_invalid_page(client, register_routes):
with client:
querystring = urlencode({'page[number]': 'error'})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_get_list_invalid_sort(client, register_routes):
with client:
querystring = urlencode({'sort': 'error'})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_get_detail_object_not_found(client, register_routes):
with client:
response = client.get('/persons/3', content_type='application/vnd.api+json')
assert response.status_code == 404
def test_post_relationship_related_object_not_found(client, register_routes, person):
payload = {
'data': [
{
'type': 'computer',
'id': '2'
}
]
}
with client:
response = client.post('/persons/' + str(person.person_id) + '/relationships/computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 404
def test_get_relationship_relationship_field_not_found(client, register_routes, person):
with client:
response = client.get('/persons/' + str(person.person_id) + '/relationships/computer',
content_type='application/vnd.api+json')
assert response.status_code == 500
def test_get_list_invalid_filters_val(client, register_routes):
with client:
querystring = urlencode({'filter': json.dumps([{'name': 'computers', 'op': 'any'}])})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_get_list_name(client, register_routes):
with client:
querystring = urlencode({'filter': json.dumps([{'name': 'computers__serial', 'op': 'any', 'val': '1'}])})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 200
def test_get_list_no_name(client, register_routes):
with client:
querystring = urlencode({'filter': json.dumps([{'op': 'any', 'val': '1'}])})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_get_list_no_op(client, register_routes):
with client:
querystring = urlencode({'filter': json.dumps([{'name': 'computers__serial', 'val': '1'}])})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_get_list_attr_error(client, register_routes):
with client:
querystring = urlencode({'filter': json.dumps([{'name': 'error', 'op': 'eq', 'val': '1'}])})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_get_list_field_error(client, register_routes):
with client:
querystring = urlencode({'filter': json.dumps([{'name': 'name', 'op': 'eq', 'field': 'error'}])})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_sqlalchemy_data_layer_without_session(person_model, person_list):
with pytest.raises(Exception):
SqlalchemyDataLayer(dict(model=person_model, resource=person_list))
def test_sqlalchemy_data_layer_without_model(session, person_list):
with pytest.raises(Exception):
SqlalchemyDataLayer(dict(session=session, resource=person_list))
def test_sqlalchemy_data_layer_create_object_error(session, person_model, person_list, monkeypatch):
with pytest.raises(JsonApiException):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model, resource=person_list))
dl.create_object(dict(), dict())
def test_sqlalchemy_data_layer_get_object_error(session, person_model):
with pytest.raises(Exception):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model, id_field='error'))
dl.get_object(dict())
def test_sqlalchemy_data_layer_update_object_error(session, person_model, person_list, monkeypatch):
def commit_mock():
raise JsonApiException()
with pytest.raises(JsonApiException):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model, resource=person_list))
monkeypatch.setattr(dl.session, 'commit', commit_mock)
dl.update_object(dict(), dict(), dict())
def test_sqlalchemy_data_layer_delete_object_error(session, person_model, person_list, monkeypatch):
def commit_mock():
raise JsonApiException()
def delete_mock(obj):
pass
with pytest.raises(JsonApiException):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model, resource=person_list))
monkeypatch.setattr(dl.session, 'commit', commit_mock)
monkeypatch.setattr(dl.session, 'delete', delete_mock)
dl.delete_object(dict(), dict())
def test_sqlalchemy_data_layer_create_relationship_field_not_found(session, person_model):
with pytest.raises(Exception):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model))
dl.create_relationship(dict(), 'error', '', dict(id=1))
def test_sqlalchemy_data_layer_create_relationship_error(session, person_model, get_object_mock, monkeypatch):
def commit_mock():
raise JsonApiException()
with pytest.raises(JsonApiException):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model))
monkeypatch.setattr(dl.session, 'commit', commit_mock)
monkeypatch.setattr(dl, 'get_object', get_object_mock)
dl.create_relationship(dict(data=None), 'foo', '', dict(id=1))
def test_sqlalchemy_data_layer_get_relationship_field_not_found(session, person_model):
with pytest.raises(RelationNotFound):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model))
dl.get_relationship('error', '', '', dict(id=1))
def test_sqlalchemy_data_layer_update_relationship_field_not_found(session, person_model):
with pytest.raises(Exception):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model))
dl.update_relationship(dict(), 'error', '', dict(id=1))
def test_sqlalchemy_data_layer_update_relationship_error(session, person_model, get_object_mock, monkeypatch):
def commit_mock():
raise JsonApiException()
with pytest.raises(JsonApiException):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model))
monkeypatch.setattr(dl.session, 'commit', commit_mock)
monkeypatch.setattr(dl, 'get_object', get_object_mock)
dl.update_relationship(dict(data=None), 'foo', '', dict(id=1))
def test_sqlalchemy_data_layer_delete_relationship_field_not_found(session, person_model):
with pytest.raises(Exception):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model))
dl.delete_relationship(dict(), 'error', '', dict(id=1))
def test_sqlalchemy_data_layer_delete_relationship_error(session, person_model, get_object_mock, monkeypatch):
def commit_mock():
raise JsonApiException()
with pytest.raises(JsonApiException):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model))
monkeypatch.setattr(dl.session, 'commit', commit_mock)
monkeypatch.setattr(dl, 'get_object', get_object_mock)
dl.delete_relationship(dict(data=None), 'foo', '', dict(id=1))
def test_sqlalchemy_data_layer_sort_query_error(session, person_model, monkeypatch):
with pytest.raises(InvalidSort):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model))
dl.sort_query(None, [dict(field='test')])
def test_post_list_incorrect_type(client, register_routes, computer):
payload = {
'data': {
'type': 'error',
'attributes': {
'name': 'test'
},
'relationships': {
'computers': {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
}
}
}
with client:
response = client.post('/persons', data=json.dumps(payload), content_type='application/vnd.api+json')
assert response.status_code == 409
def test_post_list_validation_error(client, register_routes, computer):
payload = {
'data': {
'type': 'person',
'attributes': {},
'relationships': {
'computers': {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
}
}
}
with client:
response = client.post('/persons', data=json.dumps(payload), content_type='application/vnd.api+json')
assert response.status_code == 422
def test_patch_detail_incorrect_type(client, register_routes, computer, person):
payload = {
'data': {
'id': str(person.person_id),
'type': 'error',
'attributes': {
'name': 'test2'
},
'relationships': {
'computers': {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
}
}
}
with client:
response = client.patch('/persons/' + str(person.person_id),
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 409
def test_patch_detail_validation_error(client, register_routes, computer, person):
payload = {
'data': {
'id': str(person.person_id),
'type': 'person',
'attributes': {
'name': {'test2': 'error'}
},
'relationships': {
'computers': {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
}
}
}
with client:
response = client.patch('/persons/' + str(person.person_id),
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 422
def test_patch_detail_missing_id(client, register_routes, computer, person):
payload = {
'data': {
'type': 'person',
'attributes': {
'name': 'test2'
},
'relationships': {
'computers': {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
}
}
}
with client:
response = client.patch('/persons/' + str(person.person_id),
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_patch_detail_wrong_id(client, register_routes, computer, person):
payload = {
'data': {
'id': 'error',
'type': 'person',
'attributes': {
'name': 'test2'
},
'relationships': {
'computers': {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
}
}
}
with client:
response = client.patch('/persons/' + str(person.person_id),
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_post_relationship_no_data(client, register_routes, computer, person):
with client:
response = client.post('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(dict()),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_post_relationship_not_list_missing_type(client, register_routes, computer, person):
payload = {
'data': {
'id': str(person.person_id)
}
}
with client:
response = client.post('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_post_relationship_not_list_missing_id(client, register_routes, computer, person):
payload = {
'data': {
'type': 'person'
}
}
with client:
response = client.post('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_post_relationship_not_list_wrong_type(client, register_routes, computer, person):
payload = {
'data': {
'type': 'error',
'id': str(person.person_id)
}
}
with client:
response = client.post('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 409
def test_post_relationship_missing_type(client, register_routes, computer, person):
payload = {
'data': [
{
'id': str(computer.id)
}
]
}
with client:
response = client.post('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_post_relationship_missing_id(client, register_routes, computer, person):
payload = {
'data': [
{
'type': 'computer',
}
]
}
with client:
response = client.post('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_post_relationship_wrong_type(client, register_routes, computer, person):
payload = {
'data': [
{
'type': 'error',
'id': str(computer.id)
}
]
}
with client:
response = client.post('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 409
def test_patch_relationship_no_data(client, register_routes, computer, person):
with client:
response = client.patch('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(dict()),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_patch_relationship_not_list_missing_type(client, register_routes, computer, person):
payload = {
'data': {
'id': str(person.person_id)
}
}
with client:
response = client.patch('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_patch_relationship_not_list_missing_id(client, register_routes, computer, person):
payload = {
'data': {
'type': 'person'
}
}
with client:
response = client.patch('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_patch_relationship_not_list_wrong_type(client, register_routes, computer, person):
payload = {
'data': {
'type': 'error',
'id': str(person.person_id)
}
}
with client:
response = client.patch('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 409
def test_patch_relationship_missing_type(client, register_routes, computer, person):
payload = {
'data': [
{
'id': str(computer.id)
}
]
}
with client:
response = client.patch('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_patch_relationship_missing_id(client, register_routes, computer, person):
payload = {
'data': [
{
'type': 'computer',
}
]
}
with client:
response = client.patch('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_patch_relationship_wrong_type(client, register_routes, computer, person):
payload = {
'data': [
{
'type': 'error',
'id': str(computer.id)
}
]
}
with client:
response = client.patch('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 409
def test_delete_relationship_no_data(client, register_routes, computer, person):
with client:
response = client.delete('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(dict()),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_delete_relationship_not_list_missing_type(client, register_routes, computer, person):
payload = {
'data': {
'id': str(person.person_id)
}
}
with client:
response = client.delete('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_delete_relationship_not_list_missing_id(client, register_routes, computer, person):
payload = {
'data': {
'type': 'person'
}
}
with client:
response = client.delete('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_delete_relationship_not_list_wrong_type(client, register_routes, computer, person):
payload = {
'data': {
'type': 'error',
'id': str(person.person_id)
}
}
with client:
response = client.delete('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 409
def test_delete_relationship_missing_type(client, register_routes, computer, person):
payload = {
'data': [
{
'id': str(computer.id)
}
]
}
with client:
response = client.delete('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_delete_relationship_missing_id(client, register_routes, computer, person):
payload = {
'data': [
{
'type': 'computer',
}
]
}
with client:
response = client.delete('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_delete_relationship_wrong_type(client, register_routes, computer, person):
payload = {
'data': [
{
'type': 'error',
'id': str(computer.id)
}
]
}
with client:
response = client.delete('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 409
def test_base_data_layer():
base_dl = BaseDataLayer(dict())
with pytest.raises(NotImplementedError):
base_dl.create_object(None, dict())
with pytest.raises(NotImplementedError):
base_dl.get_object(dict())
with pytest.raises(NotImplementedError):
base_dl.get_collection(None, dict())
with pytest.raises(NotImplementedError):
base_dl.update_object(None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.delete_object(None, dict())
with pytest.raises(NotImplementedError):
base_dl.create_relationship(None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.get_relationship(None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.update_relationship(None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.delete_relationship(None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.query(dict())
with pytest.raises(NotImplementedError):
base_dl.before_create_object(None, dict())
with pytest.raises(NotImplementedError):
base_dl.after_create_object(None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.before_get_object(dict())
with pytest.raises(NotImplementedError):
base_dl.after_get_object(None, dict())
with pytest.raises(NotImplementedError):
base_dl.before_get_collection(None, dict())
with pytest.raises(NotImplementedError):
base_dl.after_get_collection(None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.before_update_object(None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.after_update_object(None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.before_delete_object(None, dict())
with pytest.raises(NotImplementedError):
base_dl.after_delete_object(None, dict())
with pytest.raises(NotImplementedError):
base_dl.before_create_relationship(None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.after_create_relationship(None, None, None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.before_get_relationship(None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.after_get_relationship(None, None, None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.before_update_relationship(None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.after_update_relationship(None, None, None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.before_delete_relationship(None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.after_delete_relationship(None, None, None, None, None, dict())
def test_qs_manager():
with pytest.raises(ValueError):
QSManager([], None)
def test_api(app, person_list):
api = Api(app)
api.route(person_list, 'person_list', '/persons', '/person_list')
api.init_app()
def test_api_resources(app, person_list):
api = Api()
api.route(person_list, 'person_list2', '/persons', '/person_list')
api.init_app(app)
|
041081c608ce0d661cef51309b16406d3d45e14d
|
6d088ec295b33db11e378212d42d40d5a190c54c
|
/contrib/brl/bseg/bstm/pyscripts/bstm_scene_adaptor.py
|
f0438b997ad80a4677b34349b6a8e5210b39a8a1
|
[] |
no_license
|
vxl/vxl
|
29dffd5011f21a67e14c1bcbd5388fdbbc101b29
|
594ebed3d5fb6d0930d5758630113e044fee00bc
|
refs/heads/master
| 2023-08-31T03:56:24.286486
| 2023-08-29T17:53:12
| 2023-08-29T17:53:12
| 9,819,799
| 224
| 126
| null | 2023-09-14T15:52:32
| 2013-05-02T18:32:27
|
C++
|
UTF-8
|
Python
| false
| false
| 20,572
|
py
|
bstm_scene_adaptor.py
|
"""
Wrapper around bstm_adaptor functions. Creates "Scene" class to manage scene, caches, etc.
"""
import brl_init
import bstm_batch as batch
dbvalue = brl_init.register_batch(batch)
import sys
from os.path import basename, splitext
import bstm_adaptor
import vil_adaptor_bstm_batch as vil
import vpgl_adaptor_bstm_batch as vpgl
class bstm_scene_adaptor(object):
# scene adaptor init
def __init__(self, scene_str, device_string="gpu",
opencl_multi_scene_cache=False):
# init (list) self vars
self.scene = None
self.active_cache = None
self.device_string = None
self.cpu_cache = None
self.device = None
self.opencl_cache = None
self.str_cache = None
self.model_dir = None
self.bbox = None
self.lvcs = None
# if device_string is gpu, load up opencl
self.device_string = device_string
if device_string[0:3] == "gpu" or device_string[0:3] == "cpu":
self.scene, self.cpu_cache, self.device, self.opencl_cache = bstm_adaptor.load_opencl(
scene_str, device_string)
self.active_cache = self.opencl_cache
elif device_string[0:3] == "cpp":
self.scene, self.cpu_cache = bstm_adaptor.load_cpp(scene_str)
self.active_cache = self.cpu_cache
else:
print "UNKNOWN device type: ", device_string
print "exiting."
sys.exit(-1)
# store model directory for later use
self.bbox = bstm_adaptor.scene_bbox(self.scene)
self.description = bstm_adaptor.describe_scene(self.scene)
self.model_dir = self.description['dataPath']
# stores whether appearance model contains RGB - also includes view_dep
self.rgb = self.description['appType'].startswith("bstm_gauss_rgb")
self.view = ("view" in self.description['appType'])
self.lvcs = bstm_adaptor.scene_lvcs(self.scene)
def __del__(self):
if self.scene is not None:
batch.remove_data(self.scene.id)
if self.cpu_cache is not None:
batch.remove_data(self.cpu_cache.id)
if self.device is not None:
batch.remove_data(self.device.id)
if self.opencl_cache is not None:
batch.remove_data(self.opencl_cache.id)
if self.lvcs is not None:
batch.remove_data(self.lvcs.id)
# describe scene (returns data path)
def describe(self):
return self.description
# returns scene bounding box
def bounding_box(self):
return self.bbox
def lvcs(self):
return self.lvcs
def transform_to_scene(self, to_scene, trans, rot, scale):
if self.opencl_cache.type == "bstm_opencl_cache_sptr":
print("transforming scene")
batch.init_process("bstmVecfOclTransformSceneProcess")
batch.set_input_from_db(0, self.scene)
batch.set_input_from_db(1, to_scene)
batch.set_input_from_db(2, self.opencl_cache)
batch.set_input_double(3, trans[0])
batch.set_input_double(4, trans[1])
batch.set_input_double(5, trans[2])
batch.set_input_double(6, rot[0][0])
batch.set_input_double(7, rot[0][1])
batch.set_input_double(8, rot[0][2])
batch.set_input_double(9, rot[1][0])
batch.set_input_double(10, rot[1][1])
batch.set_input_double(11, rot[1][2])
batch.set_input_double(12, rot[2][0])
batch.set_input_double(13, rot[2][1])
batch.set_input_double(14, rot[2][2])
batch.set_input_double(15, scale[0])
batch.set_input_double(16, scale[1])
batch.set_input_double(17, scale[2])
return batch.run_process()
else:
print "ERROR: Cache type not recognized: ", self.opencl_cache.type
return False
# update wrapper, can pass in a Null device to use
def update(self, cam, img, time=0, update_alpha=True, update_changes_only=False,
mask=None, device_string="", var=-1.0):
cache = self.active_cache
dev = self.device
# check if force gpu or cpu
if device_string == "gpu":
cache = self.opencl_cache
elif device_string == "cpp":
cache = self.cpu_cache
dev = None
# run update grey or RGB
if self.rgb:
return bstm_adaptor.update_color(
self.scene, dev, cache, cam, img, time, var, mask, update_alpha, update_changes_only)
else:
return bstm_adaptor.update(self.scene, dev, cache, cam, img,
time, var, mask, update_alpha, update_changes_only)
# update wrapper, can pass in a Null device to use
def update_app(self, cam, img, time, var, mask,
device_string="", force_grey=False):
cache = self.active_cache
dev = self.device
# check if force gpu or cpu
if device_string == "gpu":
cache = self.opencl_cache
elif device_string == "cpp":
print " Not implemented in C++ yet "
return
if self.rgb and not force_grey:
bstm_adaptor.update_color(
self.scene,
dev,
cache,
cam,
img,
time,
var,
mask,
update_alpha=False)
else:
bstm_adaptor.update(
self.scene,
dev,
cache,
cam,
img,
time,
var,
mask,
update_alpha=False)
def render(self, cam, time=0, ni=1280, nj=720, device_string="",
ident_string="", tnear=1000000.0, tfar=1000000.0, render_label=False):
cache = self.active_cache
dev = self.device
# check if force gpu or cpu
if device_string == "gpu":
cache = self.opencl_cache
elif device_string == "cpp":
cache = self.cpu_cache
dev = None
exp_img, vis_img = bstm_adaptor.render(
self.scene, dev, cache, cam, time, ni, nj, render_label)
return exp_img, vis_img
# detect change wrapper,
def change_detect(self, cam, img, exp_img, n=1, raybelief="",
max_mode=False, rgb=False, device_string="", ident=""):
cache = self.active_cache
dev = self.device
if device_string == "gpu":
cache = self.opencl_cache
elif device_string == "cpp":
cache = self.cpu_cache
dev = None
cd_img = bstm_adaptor.change_detect(self.scene, cache, cam, img,
exp_img, dev, rgb, n, raybelief, max_mode, ident)
return cd_img
def refine(self, thresh=0.3, device_string=""):
if device_string == "":
nCells = bstm_adaptor.refine(
self.scene, self.active_cache, thresh, self.device)
elif device_string == "gpu":
nCells = bstm_adaptor.refine(
self.scene, self.opencl_cache, thresh, self.device)
elif device_string == "cpp":
nCells = bstm_adaptor.refine(
self.scene, self.cpu_cache, thresh, None)
return nCells
def merge(self, thresh=0.3, device_string=""):
if device_string == "":
bstm_adaptor.merge(
self.scene,
self.active_cache,
thresh,
self.device)
elif device_string == "gpu":
bstm_adaptor.merge(
self.scene,
self.opencl_cache,
thresh,
self.device)
elif device_string == "cpp":
bstm_adaptor.merge(self.scene, self.cpu_cache, thresh, None)
# only write the cpu_cache to disk
def write_cache(self, do_clear=0):
bstm_adaptor.write_cache(self.cpu_cache, do_clear)
# clear cache (both caches if OPENCL scene)
def clear_cache(self):
bstm_adaptor.clear_cache(self.cpu_cache)
if self.opencl_cache:
bstm_adaptor.clear_cache(self.opencl_cache)
################################
# get info functions
# def get_info_along_ray(self, cam, u, v, prefix, identifier=""):
# return get_info_along_ray(
# self.scene, self.cpu_cache, cam, u, v, prefix, identifier)
# def query_cell_brdf(self, point, model_type):
# return query_cell_brdf(self.scene, self.cpu_cache, point, model_type)
#####################################################################
######### BATCH UPDATE METHODS ######################################
#####################################################################
def create_stream_cache(self, imgs, interval=1, types="", max_gb=6.0):
# write image identifiers to file
# imgRange = range(0, len(imgs), interval);
# num_imgs = len(imgRange);
image_id_fname = self.model_dir + "/image_list.txt"
fd = open(image_id_fname, "w")
print >> fd, len(imgs)
# for i in imgRange:
# print >>fd, "img_%05d"%i
for img in imgs:
fname, fextension = splitext(img)
bname = basename(fname)
print >> fd, bname
fd.close()
# write type identifiers into file
type_id_fname = self.model_dir + "/type_names_list.txt"
fd2 = open(type_id_fname, "w")
print >>fd2, 4
print >>fd2, "aux0"
print >>fd2, "aux1"
print >>fd2, "aux2"
print >>fd2, "aux3"
fd2.close()
# open the stream cache, this is a read-only cache
batch.init_process("bstmCreateStreamCacheProcess")
batch.set_input_from_db(0, self.scene)
batch.set_input_string(1, type_id_fname)
batch.set_input_string(2, image_id_fname)
batch.set_input_float(3, max_gb)
batch.run_process()
(cache_id, cache_type) = batch.commit_output(0)
self.str_cache = batch.dbvalue(cache_id, cache_type)
# remove stream cache object from database
def destroy_stream_cache(self):
if self.str_cache:
batch.remove_data(self.str_cache.id)
self.str_cache = None
# writes aux data for each image in imgs array
def write_aux_data(self, imgs, cams):
for idx in range(len(imgs)):
print '--------------------------'
print "processing image " + imgs[idx]
# load cam/img
img, ni, nj = vil.load_image(imgs[idx])
pcam = vpgl.load_perspective_camera(cams[idx])
gcam = vpgl.persp2gen(pcam, ni, nj)
# update aux per view call
fname, fextension = splitext(imgs[idx])
imageID = basename(fname)
self.update_aux(img, gcam, imageID)
# takes already created aux buffers (for each image) and fits a Mixture of 3
# Gaussians to each cell, saves the appearance
def batch_paint(self, imgs, cams, device_string=""):
# verify stream cache
if (self.str_cache is None):
self.create_stream_cache(imgs)
# sigma norm table?
under_estimation_probability = 0.2
batch.init_process("bstaSigmaNormTableProcess")
batch.set_input_float(0, under_estimation_probability)
batch.run_process()
(id, type) = batch.commit_output(0)
n_table = batch.dvalue(id, type)
# call batch paint process
if device_string == "":
batch.init_process("bstmOclPaintBatchProcess")
batch.set_input_from_db(0, self.device)
batch.set_input_from_db(1, self.scene)
batch.set_input_from_db(2, self.opencl_cache)
batch.set_input_from_db(3, self.str_cache)
batch.set_input_from_db(4, n_table)
batch.run_process()
elif device_string == "cpu":
print "Can't use CPU for Paint Batch Process."
# close the files so that they can be reloaded after the next iteration
batch.init_process("bstmStreamCacheCloseFilesProcess")
batch.set_input_from_db(0, self.str_cache)
batch.run_process()
# write out afterwards
self.write_cache()
def cpu_batch_paint(self, imgs, cams):
if (self.str_cache is None):
self.create_stream_cache(imgs)
# sigma norm table?
under_estimation_probability = 0.2
batch.init_process("bstaSigmaNormTableProcess")
batch.set_input_float(0, under_estimation_probability)
batch.run_process()
(id, type) = batch.commit_output(0)
n_table = batch.dvalue(id, type)
# loop over images creating aux data
for idx in range(0, len(imgs)):
# load cam/img
img, ni, nj = vil.load_image(imgs[idx])
pcam = vpgl.load_perspective_camera(cams[idx])
gcam = vpgl.persp2gen(pcam, ni, nj)
# create norm intensity (num rays...)
batch.init_process("bstmCppCreateNormIntensitiesProcess")
batch.set_input_from_db(0, self.scene)
batch.set_input_from_db(1, self.cpu_cache)
batch.set_input_from_db(2, gcam)
batch.set_input_from_db(3, img)
batch.set_input_string(4, "img_" + "%05d" % idx)
batch.run_process()
# create aux
batch.init_process("bstmCppCreateAuxDataOPT2Process")
batch.set_input_from_db(0, self.scene)
batch.set_input_from_db(1, self.cpu_cache)
batch.set_input_from_db(2, gcam)
batch.set_input_from_db(3, img)
batch.set_input_string(4, "img_" + "%05d" % idx)
batch.run_process()
self.write_cache(True)
batch.init_process("bstmCppBatchUpdateOPT2Process")
batch.set_input_from_db(0, self.scene)
batch.set_input_from_db(1, self.cpu_cache)
batch.set_input_from_db(2, self.str_cache)
batch.set_input_from_db(3, n_table)
batch.run_process()
# close the files so that they can be reloaded after the next iteration
batch.init_process("bstmStreamCacheCloseFilesProcess")
batch.set_input_from_db(0, self.str_cache)
batch.run_process()
self.write_cache()
def cpu_batch_compute_normal_albedo(
self, metadata_filename_list, atmospheric_params_filename_list):
batch.init_process("bstmCppBatchComputeNormalAlbedoProcess")
batch.set_input_from_db(0, self.scene)
batch.set_input_from_db(1, self.cpu_cache)
batch.set_input_from_db(2, self.str_cache)
batch.set_input_string(3, metadata_filename_list)
batch.set_input_string(4, atmospheric_params_filename_list)
batch.run_process()
# close the files so that they can be reloaded after the next iteration
batch.init_process("bstmStreamCacheCloseFilesProcess")
batch.set_input_from_db(0, self.str_cache)
batch.run_process()
def ocl_batch_compute_normal_albedo(
self, img_id_list, metadata_filename_list, atmospheric_params_filename_list):
batch.init_process(
"bstmOclBatchComputeNormalAlbedoArrayProcess")
batch.set_input_from_db(0, self.device)
batch.set_input_from_db(1, self.scene)
batch.set_input_from_db(2, self.opencl_cache)
batch.set_input_string(3, img_id_list)
batch.set_input_string(4, metadata_filename_list)
batch.set_input_string(5, atmospheric_params_filename_list)
batch.run_process()
def render_expected_image_naa(
self, camera, ni, nj, metadata, atmospheric_params):
batch.init_process("bstmOclRenderExpectedImageNAAProcess")
batch.set_input_from_db(0, self.device)
batch.set_input_from_db(1, self.scene)
batch.set_input_from_db(2, self.opencl_cache)
batch.set_input_from_db(3, camera)
batch.set_input_unsigned(4, ni)
batch.set_input_unsigned(5, nj)
batch.set_input_from_db(6, metadata)
batch.set_input_from_db(7, atmospheric_params)
batch.run_process()
(id, type) = batch.commit_output(0)
exp_image = batch.dvalue(id, type)
(id, type) = batch.commit_output(1)
mask_image = batch.dvalue(id, type)
return(exp_image, mask_image)
def update_alpha_naa(self, image, camera, metadata,
atmospheric_params, alt_prior, alt_density):
batch.init_process("bstmOclUpdateAlphaNAAProcess")
batch.set_input_from_db(0, self.device)
batch.set_input_from_db(1, self.scene)
batch.set_input_from_db(2, self.opencl_cache)
batch.set_input_from_db(3, camera)
batch.set_input_from_db(4, image)
batch.set_input_from_db(5, metadata)
batch.set_input_from_db(6, atmospheric_params)
batch.set_input_from_db(7, alt_prior)
batch.set_input_from_db(8, alt_density)
if not (batch.run_process()):
print("ERROR: run_process() returned False")
return
def render_expected_albedo_normal(self, camera, ni, nj):
batch.init_process("bstmOclRenderExpectedAlbedoNormalProcess")
batch.set_input_from_db(0, self.device)
batch.set_input_from_db(1, self.scene)
batch.set_input_from_db(2, self.opencl_cache)
batch.set_input_from_db(3, camera)
batch.set_input_unsigned(4, ni)
batch.set_input_unsigned(5, nj)
batch.run_process()
(id, type) = batch.commit_output(0)
exp_albedo = batch.dvalue(id, type)
(id, type) = batch.commit_output(1)
exp_normal = batch.dvalue(id, type)
(id, type) = batch.commit_output(2)
mask_image = batch.dvalue(id, type)
return(exp_albedo, exp_normal, mask_image)
def transform(self, tx, ty, tz, rx, ry, rz, scale):
batch.init_process("bstmTransformModelProcess")
batch.set_input_from_db(0, self.scene)
batch.set_input_float(1, tx)
batch.set_input_float(2, ty)
batch.set_input_float(3, tz)
batch.set_input_float(4, rx)
batch.set_input_float(5, ry)
batch.set_input_float(6, rz)
batch.set_input_float(7, scale)
batch.run_process()
return
def cache_neighbor_info(self):
batch.init_process("bstmVecfOclCacheNeighborInfoProcess")
batch.set_input_from_db(0, self.scene)
batch.set_input_from_db(1, self.opencl_cache)
return batch.run_process()
def refine_scene_around_geometry(
self, filter_v, n_times, p_thresh, use_gpu):
if self.opencl_cache.type == "bstm_opencl_cache_sptr":
print("Refining around surface geometry")
batch.init_process(
"bstm_ocl_refine_scene_around_geometry_process")
batch.set_input_from_db(0, self.scene)
batch.set_input_from_db(1, self.opencl_cache)
batch.set_input_from_db(2, self.device)
batch.set_input_from_db(3, filter_v)
batch.set_input_int(4, n_times)
# use negative value to refine all
batch.set_input_float(5, p_thresh)
batch.set_input_bool(6, use_gpu)
return batch.run_process()
else:
print "ERROR: Cache type not recognized: ", cache.type
return False
def scene_statistics(self):
return bstm_adaptor.scene_statistics(self.scene, self.cpu_cache)
def ingest_boxm2_scene(self, boxm2_scene, boxm2_cpu_cache, time,
p_threshold=0.1, app_threshold=0.1):
""" Ingests a BOXM2 scene for a certain time step.
@param boxm2_scene - a boxm2_scene_adaptor object
@param time - the timestemp in which to insert the BOXM2 scene
"""
bstm_adaptor.ingest_boxm2_scene(
self.scene,
self.cpu_cache,
boxm2_scene,
boxm2_cpu_cache,
time,
p_threshold,
app_threshold)
def analyze_coherency(self, center, lengths, initial_time,
end_time, p_threshold, output_filename):
bstm_adaptor.analyze_coherency(
self.scene,
self.cpu_cache,
center,
lengths,
initial_time,
end_time,
p_threshold,
output_filename)
|
6579a88ce8de2d56f15e8fb6d2d830deb096bb0c
|
083e6e9cb64b15df45de504890581616225041ff
|
/mean.py
|
222f32ac68cc510daf34d36ebf3b46bd56ff398f
|
[
"MIT"
] |
permissive
|
okankop/Efficient-3DCNNs
|
50cc32da0e9a621bf19c0f7332349bf1e28a7b48
|
9db09f629d8acad06b316e40c351fa96d01c2268
|
refs/heads/master
| 2023-06-04T07:05:45.268148
| 2021-10-18T09:24:26
| 2021-10-18T09:24:26
| 179,437,464
| 723
| 160
|
MIT
| 2022-12-21T17:46:28
| 2019-04-04T06:42:05
|
Python
|
UTF-8
|
Python
| false
| false
| 635
|
py
|
mean.py
|
def get_mean(norm_value=255, dataset='activitynet'):
assert dataset in ['activitynet', 'kinetics']
if dataset == 'activitynet':
return [
114.7748 / norm_value, 107.7354 / norm_value, 99.4750 / norm_value
]
elif dataset == 'kinetics':
# Kinetics (10 videos for each class)
return [
110.63666788 / norm_value, 103.16065604 / norm_value,
96.29023126 / norm_value
]
def get_std(norm_value=255):
# Kinetics (10 videos for each class)
return [
38.7568578 / norm_value, 37.88248729 / norm_value,
40.02898126 / norm_value
]
|
6c6023f54fae562376a0e0998b33517eaf228382
|
119646d6e1f13582c577fd7b87c9654839a0b806
|
/hubspot/discovery/communication_preferences/discovery.py
|
3e6287106900786bbb03f2ccc8399c102a999905
|
[] |
permissive
|
HubSpot/hubspot-api-python
|
446daaceeb3a6ce27edcd0414603c6d4bc07e327
|
d51a64c413461c0b82d8a41743e752d878747ca1
|
refs/heads/master
| 2023-08-31T09:52:56.583803
| 2023-08-07T11:00:27
| 2023-08-07T11:00:27
| 248,865,684
| 227
| 98
|
Apache-2.0
| 2023-09-14T15:25:19
| 2020-03-20T22:41:24
|
Python
|
UTF-8
|
Python
| false
| false
| 407
|
py
|
discovery.py
|
import hubspot.communication_preferences as api_client
from ..discovery_base import DiscoveryBase
class Discovery(DiscoveryBase):
@property
def definition_api(self) -> api_client.DefinitionApi:
return self._configure_api_client(api_client, "DefinitionApi")
@property
def status_api(self) -> api_client.StatusApi:
return self._configure_api_client(api_client, "StatusApi")
|
41413cfdd5863a7c41a8c6539156a4a3da684e2c
|
6b265b404d74b09e1b1e3710e8ea872cd50f4263
|
/Python/Interfacing_C_C++_Fortran/F2py/comp_pi_f2py.py
|
6f3d6d699afb7966b9d1c11324477310b224dc24
|
[
"CC-BY-4.0"
] |
permissive
|
gjbex/training-material
|
cdc189469ae2c7d43784ecdcb4bcca10ecbc21ae
|
e748466a2af9f3388a8b0ed091aa061dbfc752d6
|
refs/heads/master
| 2023-08-17T11:02:27.322865
| 2023-04-27T14:42:55
| 2023-04-27T14:42:55
| 18,587,808
| 130
| 60
|
CC-BY-4.0
| 2023-08-03T07:07:25
| 2014-04-09T06:35:58
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 502
|
py
|
comp_pi_f2py.py
|
#!/usr/bin/env python
from argparse import ArgumentParser
import sys
from comp_pi import compute_pi
def main():
arg_parser = ArgumentParser(description='compute pi using Fortran '
'function')
arg_parser.add_argument('n', default=1000, nargs='?',
help='number of random points')
options = arg_parser.parse_args()
print(compute_pi(options.n))
return 0
if __name__ == '__main__':
status = main()
sys.exit(status)
|
d592e40af2e101afddcca11226c2e2e55150e3cb
|
6181fcd4a266d963a0ee85971768c97922ca77cd
|
/benchmarks/src/garage_benchmarks/parameters.py
|
10ec5cccbbac21b7da0fd6c4b8fee9be280d0ed5
|
[
"MIT"
] |
permissive
|
rlworkgroup/garage
|
5d215bbecb3a4e74b504988d6684a7b04df69a80
|
2d594803636e341660cab0e81343abbe9a325353
|
refs/heads/master
| 2023-08-21T22:58:49.338034
| 2023-01-04T06:06:27
| 2023-01-04T06:06:27
| 136,846,372
| 1,832
| 363
|
MIT
| 2023-09-11T11:36:40
| 2018-06-10T21:31:23
|
Python
|
UTF-8
|
Python
| false
| false
| 651
|
py
|
parameters.py
|
"""Global parameters for benchmarking."""
from garage_benchmarks import benchmarks
Fetch1M_ENV_SET = [
task['env_id'] for task in benchmarks.get_benchmark('Fetch1M')['tasks']
]
MuJoCo1M_ENV_SET = [
task['env_id'] for task in benchmarks.get_benchmark('Mujoco1M')['tasks']
]
Atari10M_ENV_SET = [
task['env_id'] for task in benchmarks.get_benchmark('Atari10M')['tasks']
]
PIXEL_ENV_SET = ['CubeCrash-v0', 'MemorizeDigits-v0']
STATE_ENV_SET = [
'LunarLander-v2',
'Assault-ramDeterministic-v4',
'Breakout-ramDeterministic-v4',
'ChopperCommand-ramDeterministic-v4',
'Tutankham-ramDeterministic-v4',
'CartPole-v1',
]
|
9faf0bf9fbf25aa25e5fb4928025e6a8c92c1694
|
7f6c49d23b0b1d90a84f40dcba674f2543a39592
|
/wan2good/백준/19주차_새로운게임.py
|
0242a5cc65c60c4f46b832a58a2f813988dd6656
|
[] |
no_license
|
CodeTest-StudyGroup/Code-Test-Study
|
76c130007b94508d72c4ac4dd2ccd7c250707e02
|
ef90f26c47d92ed46a512d46a043550d32369658
|
refs/heads/master
| 2023-08-03T15:52:40.487588
| 2022-07-11T13:20:15
| 2022-07-11T13:20:15
| 253,215,176
| 1,205
| 241
| null | 2023-07-26T09:06:20
| 2020-04-05T11:06:52
|
C++
|
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
19주차_새로운게임.py
|
import sys
input=sys.stdin.readline
MIS=lambda:map(int,input().rstrip().split())
dx=[0,0,-1,1];dy=[1,-1,0,0]
n,k=MIS();board=[]
for _ in range(n):
board.append(list(MIS()))
chess_board=[[[] for _ in range(n)] for _ in range(n)]
chess=[0 for _ in range(k)]
for i in range(k):
x,y,z=MIS()
chess_board[x-1][y-1].append(i)
chess[i]=[x-1,y-1,z-1]
def solve(idx):
x,y,z=chess[idx]
if idx!=chess_board[x][y][0]:
return False
nx,ny=x+dx[z],y+dy[z]
if not 0<=nx<n or not 0<=ny<n or board[nx][ny]==2:
if z==0: nz=1
elif z==1: nz=0
elif z==2: nz=3
elif z==3: nz=2
chess[idx][2]=nz
nx=x+dx[nz]
ny=y+dy[nz]
if not 0<=nx<n or not 0<=ny<n or board[nx][ny]==2:
return False
chess_set=[]
chess_set.extend(chess_board[x][y])
chess_board[x][y]=[]
if board[nx][ny]==1:
chess_set=chess_set[::-1]
for i in chess_set:
chess_board[nx][ny].append(i)
chess[i][0]=nx
chess[i][1]=ny
if len(chess_board[nx][ny])>=4: return True
return False
turn=1
while turn<1001:
for i in range(k):
result=solve(i)
if result:
print(turn)
sys.exit()
turn+=1
print(-1)
|
5dbcf6936bc4215aa262b683dbcce76858cf0a12
|
e7aad0b1c5d8907dbb52000c482c396d1b801751
|
/test/functional/test-framework/test_tools/fio/fio_param.py
|
8db911b47bd9d3ef345e5b267d2ff895b21679d0
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
Open-CAS/open-cas-linux
|
c57d60f860702d7bc380c5d85cf502c0bf5e1bae
|
93334b4675afee8815f8ea12bb7297e0fd2a4195
|
refs/heads/master
| 2023-07-12T10:32:26.921455
| 2023-07-03T12:24:47
| 2023-07-03T12:24:47
| 178,356,155
| 202
| 84
|
BSD-3-Clause
| 2023-07-03T12:24:49
| 2019-03-29T07:37:15
|
Python
|
UTF-8
|
Python
| false
| false
| 13,426
|
py
|
fio_param.py
|
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import datetime
import json
import secrets
from enum import Enum
from types import SimpleNamespace as Namespace
from connection.base_executor import BaseExecutor
from core.test_run import TestRun
from storage_devices.device import Device
from test_tools.fio.fio_result import FioResult
from test_utils.linux_command import LinuxCommand
from test_utils.size import Size
class CpusAllowedPolicy(Enum):
shared = 0,
split = 1
class ErrorFilter(Enum):
none = 0,
read = 1,
write = 2,
io = 3,
verify = 4,
all = 5
class FioOutput(Enum):
normal = 'normal'
terse = 'terse'
json = 'json'
jsonplus = 'json+'
class IoEngine(Enum):
# Basic read or write I/O. fseek is used to position the I/O location.
sync = 0,
# Linux native asynchronous I/O.
libaio = 1,
# Basic pread or pwrite I/O.
psync = 2,
# Basic readv or writev I/O.
# Will emulate queuing by coalescing adjacent IOs into a single submission.
vsync = 3,
# Basic preadv or pwritev I/O.
pvsync = 4,
# POSIX asynchronous I/O using aio_read and aio_write.
posixaio = 5,
# File is memory mapped with mmap and data copied using memcpy.
mmap = 6,
# RADOS Block Device
rbd = 7,
# SPDK Block Device
spdk_bdev = 8
class ReadWrite(Enum):
randread = 0,
randrw = 1,
randwrite = 2,
read = 3,
readwrite = 4,
write = 5,
trim = 6,
randtrim = 7,
trimwrite = 8
class VerifyMethod(Enum):
# Use an md5 sum of the data area and store it in the header of each block.
md5 = 0,
# Use an experimental crc64 sum of the data area and store it in the header of each block.
crc64 = 1,
# Use optimized sha1 as the checksum function.
sha1 = 2,
# Verify a strict pattern.
# Normally fio includes a header with some basic information and a checksum, but if this
# option is set, only the specific pattern set with verify_pattern is verified.
pattern = 3,
# Write extra information about each I/O (timestamp, block number, etc.).
# The block number is verified.
meta = 4
class RandomGenerator(Enum):
tausworthe = 0,
lfsr = 1,
tausworthe64 = 2
class FioParam(LinuxCommand):
def __init__(self, fio, command_executor: BaseExecutor, command_name):
LinuxCommand.__init__(self, command_executor, command_name)
self.verification_pattern = ''
self.fio = fio
def get_verification_pattern(self):
if not self.verification_pattern:
self.verification_pattern = f'0x{secrets.token_hex(32)}'
return self.verification_pattern
def allow_mounted_write(self, value: bool = True):
return self.set_param('allow_mounted_write', int(value))
# example: "bs=8k,32k" => 8k for reads, 32k for writes and trims
def block_size(self, *sizes: Size):
return self.set_param('blocksize', *[int(size) for size in sizes])
def blocksize_range(self, ranges):
value = []
for bs_range in ranges:
str_range = str(int(bs_range[0])) + '-' + str(int(bs_range[1]))
value.append(str_range)
return self.set_param('blocksize_range', ",".join(value))
def bs_split(self, value):
return self.set_param('bssplit', value)
def buffer_pattern(self, pattern):
return self.set_param('buffer_pattern', pattern)
def continue_on_error(self, value: ErrorFilter):
return self.set_param('continue_on_error', value.name)
def cpus_allowed(self, value):
return self.set_param('cpus_allowed', ",".join(value))
def cpus_allowed_policy(self, value: CpusAllowedPolicy):
return self.set_param('cpus_allowed_policy', value.name)
def direct(self, value: bool = True):
if 'buffered' in self.command_param:
self.remove_param('buffered')
return self.set_param('direct', int(value))
def directory(self, directory):
return self.set_param('directory', directory)
def do_verify(self, value: bool = True):
return self.set_param('do_verify', int(value))
def exit_all_on_error(self, value: bool = True):
return self.set_flags('exitall_on_error') if value \
else self.remove_flag('exitall_on_error')
def group_reporting(self, value: bool = True):
return self.set_flags('group_reporting') if value else self.remove_flag('group_reporting')
def file_name(self, path):
return self.set_param('filename', path)
def file_size(self, size: Size):
return self.set_param('filesize', int(size))
def file_size_range(self, ranges):
value = []
for bs_range in ranges:
str_range = str(int(bs_range[0])) + '-' + str(int(bs_range[1]))
value.append(str_range)
return self.set_param('filesize', ",".join(value))
def fsync(self, value: int):
return self.set_param('fsync', value)
def ignore_errors(self, read_errors, write_errors, verify_errors):
separator = ':'
return self.set_param(
'ignore_error',
separator.join(str(err) for err in read_errors),
separator.join(str(err) for err in write_errors),
separator.join(str(err) for err in verify_errors))
def io_depth(self, value: int):
if value != 1:
if 'ioengine' in self.command_param and \
self.command_param['ioengine'] == 'sync':
TestRun.LOGGER.warning("Setting iodepth will have no effect with "
"'ioengine=sync' setting")
return self.set_param('iodepth', value)
def io_engine(self, value: IoEngine):
if value == IoEngine.sync:
if 'iodepth' in self.command_param and self.command_param['iodepth'] != 1:
TestRun.LOGGER.warning("Setting 'ioengine=sync' will cause iodepth setting "
"to be ignored")
return self.set_param('ioengine', value.name)
def io_size(self, value: Size):
return self.set_param('io_size', int(value.get_value()))
def loops(self, value: int):
return self.set_param('loops', value)
def no_random_map(self, value: bool = True):
if 'verify' in self.command_param:
raise ValueError("'NoRandomMap' parameter is mutually exclusive with verify")
if value:
return self.set_flags('norandommap')
else:
return self.remove_flag('norandommap')
def nr_files(self, value: int):
return self.set_param('nrfiles', value)
def num_ios(self, value: int):
return self.set_param('number_ios', value)
def num_jobs(self, value: int):
return self.set_param('numjobs', value)
def offset(self, value: Size):
return self.set_param('offset', int(value.get_value()))
def offset_increment(self, value: Size):
return self.set_param('offset_increment', f"{value.value}{value.unit.get_short_name()}")
def percentage_random(self, value: int):
if value <= 100:
return self.set_param('percentage_random', value)
raise ValueError("Argument out of range. Should be 0-100.")
def pool(self, value):
return self.set_param('pool', value)
def ramp_time(self, value: datetime.timedelta):
return self.set_param('ramp_time', int(value.total_seconds()))
def random_distribution(self, value):
return self.set_param('random_distribution', value)
def rand_repeat(self, value: int):
return self.set_param('randrepeat', value)
def rand_seed(self, value: int):
return self.set_param('randseed', value)
def read_write(self, rw: ReadWrite):
return self.set_param('readwrite', rw.name)
def run_time(self, value: datetime.timedelta):
if value.total_seconds() == 0:
raise ValueError("Runtime parameter must not be set to 0.")
return self.set_param('runtime', int(value.total_seconds()))
def serialize_overlap(self, value: bool = True):
return self.set_param('serialize_overlap', int(value))
def size(self, value: Size):
return self.set_param('size', int(value.get_value()))
def stonewall(self, value: bool = True):
return self.set_flags('stonewall') if value else self.remove_param('stonewall')
def sync(self, value: bool = True):
return self.set_param('sync', int(value))
def time_based(self, value: bool = True):
return self.set_flags('time_based') if value else self.remove_flag('time_based')
def thread(self, value: bool = True):
return self.set_flags('thread') if value else self.remove_param('thread')
def lat_percentiles(self, value: bool):
return self.set_param('lat_percentiles', int(value))
def scramble_buffers(self, value: bool):
return self.set_param('scramble_buffers', int(value))
def slat_percentiles(self, value: bool):
return self.set_param('slat_percentiles', int(value))
def spdk_core_mask(self, value: str):
return self.set_param('spdk_core_mask', value)
def spdk_json_conf(self, path):
return self.set_param('spdk_json_conf', path)
def clat_percentiles(self, value: bool):
return self.set_param('clat_percentiles', int(value))
def percentile_list(self, value: []):
val = ':'.join(value) if len(value) > 0 else '100'
return self.set_param('percentile_list', val)
def verification_with_pattern(self, pattern=None):
if pattern is not None and pattern != '':
self.verification_pattern = pattern
return self.verify(VerifyMethod.pattern) \
.set_param('verify_pattern', self.get_verification_pattern()) \
.do_verify()
def verify(self, value: VerifyMethod):
return self.set_param('verify', value.name)
def create_only(self, value: bool = False):
return self.set_param('create_only', int(value))
def verify_pattern(self, pattern=None):
return self.set_param('verify_pattern', pattern or self.get_verification_pattern())
def verify_backlog(self, value: int):
return self.set_param('verify_backlog', value)
def verify_dump(self, value: bool = True):
return self.set_param('verify_dump', int(value))
def verify_fatal(self, value: bool = True):
return self.set_param('verify_fatal', int(value))
def verify_only(self, value: bool = True):
return self.set_flags('verify_only') if value else self.remove_param('verify_only')
def write_hint(self, value: str):
return self.set_param('write_hint', value)
def write_percentage(self, value: int):
if value <= 100:
return self.set_param('rwmixwrite', value)
raise ValueError("Argument out of range. Should be 0-100.")
def random_generator(self, value: RandomGenerator):
return self.set_param('random_generator', value.name)
def target(self, target):
if isinstance(target, Device):
return self.file_name(target.path)
return self.file_name(target)
def add_job(self, job_name=None):
if not job_name:
job_name = f'job{len(self.fio.jobs)}'
new_job = FioParamConfig(self.fio, self.command_executor, f'[{job_name}]')
self.fio.jobs.append(new_job)
return new_job
def clear_jobs(self):
self.fio.jobs = []
return self
def edit_global(self):
return self.fio.global_cmd_parameters
def run(self, fio_timeout: datetime.timedelta = None):
if "per_job_logs" in self.fio.global_cmd_parameters.command_param:
self.fio.global_cmd_parameters.set_param("per_job_logs", '0')
fio_output = self.fio.run(fio_timeout)
if fio_output.exit_code != 0:
raise Exception(f"Exception occurred while trying to execute fio, exit_code:"
f"{fio_output.exit_code}.\n"
f"stdout: {fio_output.stdout}\nstderr: {fio_output.stderr}")
TestRun.executor.run(f"sed -i '/^[[:alnum:]]/d' {self.fio.fio_file}") # Remove warnings
out = self.command_executor.run_expect_success(f"cat {self.fio.fio_file}").stdout
return self.get_results(out)
def run_in_background(self):
if "per_job_logs" in self.fio.global_cmd_parameters.command_param:
self.fio.global_cmd_parameters.set_param("per_job_logs", '0')
return self.fio.run_in_background()
@staticmethod
def get_results(result):
data = json.loads(result, object_hook=lambda d: Namespace(**d))
jobs_list = []
if hasattr(data, 'jobs'):
jobs = data.jobs
for job in jobs:
job_result = FioResult(data, job)
jobs_list.append(job_result)
return jobs_list
class FioParamCmd(FioParam):
def __init__(self, fio, command_executor: BaseExecutor, command_name='fio'):
FioParam.__init__(self, fio, command_executor, command_name)
self.param_name_prefix = "--"
class FioParamConfig(FioParam):
def __init__(self, fio, command_executor: BaseExecutor, command_name='[global]'):
FioParam.__init__(self, fio, command_executor, command_name)
self.param_name_prefix = "\n"
|
c95ccab01c1bb1e27143bb02fb3d6f6c5a21ddef
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-iotanalytics/huaweicloudsdkiotanalytics/v1/model/run.py
|
f97a8d98630da7f0c2a2a198ae3b12ef943125ea
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 10,279
|
py
|
run.py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class Run:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'run_id': 'str',
'job_id': 'str',
'job_name': 'str',
'job_type': 'str',
'start_time': 'str',
'duration': 'int',
'status': 'str',
'is_schedule_job': 'bool',
'computing_resource_name': 'str',
'sql_job': 'SqlJobRun'
}
attribute_map = {
'run_id': 'run_id',
'job_id': 'job_id',
'job_name': 'job_name',
'job_type': 'job_type',
'start_time': 'start_time',
'duration': 'duration',
'status': 'status',
'is_schedule_job': 'is_schedule_job',
'computing_resource_name': 'computing_resource_name',
'sql_job': 'sql_job'
}
def __init__(self, run_id=None, job_id=None, job_name=None, job_type=None, start_time=None, duration=None, status=None, is_schedule_job=None, computing_resource_name=None, sql_job=None):
"""Run
The model defined in huaweicloud sdk
:param run_id: 作业运行ID。
:type run_id: str
:param job_id: 作业ID。
:type job_id: str
:param job_name: 作业名称。
:type job_name: str
:param job_type: 作业类型。
:type job_type: str
:param start_time: 作业开始的时间。时间格式为ISO日期时间格式yyyy-MM-dd'T'HH:mm:ss'Z'
:type start_time: str
:param duration: 作业运行时长,单位毫秒。
:type duration: int
:param status: 此作业的当前状态,包含提交(LAUNCHING)、运行中(RUNNING)、完成(FINISHED)、失败(FAILED)、取消(CANCELLED)。
:type status: str
:param is_schedule_job: 是否定时作业。
:type is_schedule_job: bool
:param computing_resource_name: 计算资源名称。
:type computing_resource_name: str
:param sql_job:
:type sql_job: :class:`huaweicloudsdkiotanalytics.v1.SqlJobRun`
"""
self._run_id = None
self._job_id = None
self._job_name = None
self._job_type = None
self._start_time = None
self._duration = None
self._status = None
self._is_schedule_job = None
self._computing_resource_name = None
self._sql_job = None
self.discriminator = None
self.run_id = run_id
if job_id is not None:
self.job_id = job_id
self.job_name = job_name
self.job_type = job_type
self.start_time = start_time
self.duration = duration
self.status = status
if is_schedule_job is not None:
self.is_schedule_job = is_schedule_job
if computing_resource_name is not None:
self.computing_resource_name = computing_resource_name
if sql_job is not None:
self.sql_job = sql_job
@property
def run_id(self):
"""Gets the run_id of this Run.
作业运行ID。
:return: The run_id of this Run.
:rtype: str
"""
return self._run_id
@run_id.setter
def run_id(self, run_id):
"""Sets the run_id of this Run.
作业运行ID。
:param run_id: The run_id of this Run.
:type run_id: str
"""
self._run_id = run_id
@property
def job_id(self):
"""Gets the job_id of this Run.
作业ID。
:return: The job_id of this Run.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this Run.
作业ID。
:param job_id: The job_id of this Run.
:type job_id: str
"""
self._job_id = job_id
@property
def job_name(self):
"""Gets the job_name of this Run.
作业名称。
:return: The job_name of this Run.
:rtype: str
"""
return self._job_name
@job_name.setter
def job_name(self, job_name):
"""Sets the job_name of this Run.
作业名称。
:param job_name: The job_name of this Run.
:type job_name: str
"""
self._job_name = job_name
@property
def job_type(self):
"""Gets the job_type of this Run.
作业类型。
:return: The job_type of this Run.
:rtype: str
"""
return self._job_type
@job_type.setter
def job_type(self, job_type):
"""Sets the job_type of this Run.
作业类型。
:param job_type: The job_type of this Run.
:type job_type: str
"""
self._job_type = job_type
@property
def start_time(self):
"""Gets the start_time of this Run.
作业开始的时间。时间格式为ISO日期时间格式yyyy-MM-dd'T'HH:mm:ss'Z'
:return: The start_time of this Run.
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this Run.
作业开始的时间。时间格式为ISO日期时间格式yyyy-MM-dd'T'HH:mm:ss'Z'
:param start_time: The start_time of this Run.
:type start_time: str
"""
self._start_time = start_time
@property
def duration(self):
"""Gets the duration of this Run.
作业运行时长,单位毫秒。
:return: The duration of this Run.
:rtype: int
"""
return self._duration
@duration.setter
def duration(self, duration):
"""Sets the duration of this Run.
作业运行时长,单位毫秒。
:param duration: The duration of this Run.
:type duration: int
"""
self._duration = duration
@property
def status(self):
"""Gets the status of this Run.
此作业的当前状态,包含提交(LAUNCHING)、运行中(RUNNING)、完成(FINISHED)、失败(FAILED)、取消(CANCELLED)。
:return: The status of this Run.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Run.
此作业的当前状态,包含提交(LAUNCHING)、运行中(RUNNING)、完成(FINISHED)、失败(FAILED)、取消(CANCELLED)。
:param status: The status of this Run.
:type status: str
"""
self._status = status
@property
def is_schedule_job(self):
"""Gets the is_schedule_job of this Run.
是否定时作业。
:return: The is_schedule_job of this Run.
:rtype: bool
"""
return self._is_schedule_job
@is_schedule_job.setter
def is_schedule_job(self, is_schedule_job):
"""Sets the is_schedule_job of this Run.
是否定时作业。
:param is_schedule_job: The is_schedule_job of this Run.
:type is_schedule_job: bool
"""
self._is_schedule_job = is_schedule_job
@property
def computing_resource_name(self):
"""Gets the computing_resource_name of this Run.
计算资源名称。
:return: The computing_resource_name of this Run.
:rtype: str
"""
return self._computing_resource_name
@computing_resource_name.setter
def computing_resource_name(self, computing_resource_name):
"""Sets the computing_resource_name of this Run.
计算资源名称。
:param computing_resource_name: The computing_resource_name of this Run.
:type computing_resource_name: str
"""
self._computing_resource_name = computing_resource_name
@property
def sql_job(self):
"""Gets the sql_job of this Run.
:return: The sql_job of this Run.
:rtype: :class:`huaweicloudsdkiotanalytics.v1.SqlJobRun`
"""
return self._sql_job
@sql_job.setter
def sql_job(self, sql_job):
"""Sets the sql_job of this Run.
:param sql_job: The sql_job of this Run.
:type sql_job: :class:`huaweicloudsdkiotanalytics.v1.SqlJobRun`
"""
self._sql_job = sql_job
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Run):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
db76daab0aca284201133a88c0dc298a4dc429cb
|
518bf342bc4138982af3e2724e75f1d9ca3ba56c
|
/solutions/0092. Reverse Linked List II/0092.py
|
bd46373390664b6cd2ebed2895063cb75418f951
|
[
"MIT"
] |
permissive
|
walkccc/LeetCode
|
dae85af7cc689882a84ee5011f0a13a19ad97f18
|
a27be41c174565d365cbfe785f0633f634a01b2a
|
refs/heads/main
| 2023-08-28T01:32:43.384999
| 2023-08-20T19:00:45
| 2023-08-20T19:00:45
| 172,231,974
| 692
| 302
|
MIT
| 2023-08-13T14:48:42
| 2019-02-23T15:46:23
|
C++
|
UTF-8
|
Python
| false
| false
| 516
|
py
|
0092.py
|
class Solution:
def reverseBetween(self, head: Optional[ListNode], left: int, right: int) -> Optional[ListNode]:
if left == 1:
return self.reverseN(head, right)
head.next = self.reverseBetween(head.next, left - 1, right - 1)
return head
def reverseN(self, head: Optional[ListNode], n: int) -> Optional[ListNode]:
if n == 1:
return head
newHead = self.reverseN(head.next, n - 1)
headNext = head.next
head.next = headNext.next
headNext.next = head
return newHead
|
70fd200e72e8454a2b5c6959561ca864c057cfb2
|
1b364500b756c5096d94358d3ad745e248c20dc4
|
/wradlib/io/hdf.py
|
dda7af964f1e2124706bea27ab4d1026a2ffd9a8
|
[
"MIT"
] |
permissive
|
wradlib/wradlib
|
fdf3b3670aa8b2ea6ddf4bb6083321992eb361a9
|
17f876c2c6257171888d6e04f5cbb86f0ac46f90
|
refs/heads/main
| 2023-08-26T09:07:45.866267
| 2023-05-31T06:12:50
| 2023-05-31T06:12:50
| 52,089,638
| 228
| 89
|
MIT
| 2023-09-11T23:29:48
| 2016-02-19T13:32:22
|
Python
|
UTF-8
|
Python
| false
| false
| 25,910
|
py
|
hdf.py
|
#!/usr/bin/env python
# Copyright (c) 2011-2023, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
HDF Data I/O
^^^^^^^^^^^^
Former available xarray based code has been ported to xradar-package `[1]`_.
.. _[1]: https://xradar.rtfd.io
.. autosummary::
:nosignatures:
:toctree: generated/
{}
"""
__all__ = [
"open_gpm_dataset",
"read_generic_hdf5",
"read_opera_hdf5",
"read_gamic_hdf5",
"to_hdf5",
"from_hdf5",
"read_gpm",
"read_trmm",
]
__doc__ = __doc__.format("\n ".join(__all__))
import datetime as dt
import numpy as np
import xarray as xr
from packaging.version import Version
from wradlib.util import import_optional
h5py = import_optional("h5py")
h5netcdf = import_optional("h5netcdf")
nc = import_optional("netCDF4")
def read_generic_hdf5(fname):
"""Reads hdf5 files according to their structure
In contrast to other file readers under :meth:`wradlib.io`, this function
will *not* return a two item tuple with (data, metadata). Instead, this
function returns ONE dictionary that contains all the file contents - both
data and metadata. The keys of the output dictionary conform to the
Group/Subgroup directory branches of the original file.
Parameters
----------
fname : str or file-like
a hdf5 file path or file-like object
Returns
-------
output : dict
a dictionary that contains both data and metadata according to the
original hdf5 file structure
Examples
--------
See :ref:`/notebooks/fileio/wradlib_radar_formats.ipynb#Generic-HDF5`.
"""
fcontent = {}
def filldict(x, y):
# create a new container
tmp = {}
# add attributes if present
if len(y.attrs) > 0:
tmp["attrs"] = dict(y.attrs)
# add data if it is a dataset
if isinstance(y, h5py.Dataset):
tmp["data"] = np.array(y)
# only add to the dictionary, if we have something meaningful to add
if tmp != {}:
fcontent[x] = tmp
with h5py.File(fname, "r") as f:
f.visititems(filldict)
return fcontent
def read_opera_hdf5(fname):
"""Reads hdf5 files according to OPERA conventions
Please refer to the OPERA data model documentation :cite:`OPERA-data-model`
in order to understand how an hdf5 file is organized that conforms to the
OPERA ODIM_H5 conventions.
In contrast to other file readers under :meth:`wradlib.io`, this function
will *not* return a two item tuple with (data, metadata). Instead, this
function returns ONE dictionary that contains all the file contents - both
data and metadata. The keys of the output dictionary conform to the
Group/Subgroup directory branches of the original file.
If the end member of a branch (or path) is "data", then the corresponding
item of output dictionary is a numpy array with actual data.
Any other end member (either *how*, *where*,
and *what*) will contain the meta information applying to the corresponding
level of the file hierarchy.
Parameters
----------
fname : str or file-like
a hdf5 file path or file-like object
Returns
-------
output : dict
a dictionary that contains both data and metadata according to the
original hdf5 file structure
"""
# now we browse through all Groups and Datasets and store the info in one
# dictionary
fcontent = {}
def filldict(x, y):
if isinstance(y, h5py.Group):
if len(y.attrs) > 0:
fcontent[x] = dict(y.attrs)
elif isinstance(y, h5py.Dataset):
fcontent[x] = np.array(y)
with h5py.File(fname, "r") as f:
f.visititems(filldict)
return fcontent
def read_gamic_scan_attributes(scan, scan_type):
"""Read attributes from one particular scan from a GAMIC hdf5 file
Parameters
----------
scan : object
scan object from hdf5 file
scan_type : str
"PVOL" (plan position indicator) or "RHI" (range height indicator)
Returns
-------
sattrs : dict
dictionary of scan attributes
"""
# global zero_index, el, az
# placeholder for attributes
sattrs = {}
# link to scans 'how' hdf5 group
sg1 = scan["how"]
# get scan attributes
for attrname in list(sg1.attrs):
sattrs[attrname] = sg1.attrs.get(attrname)
sattrs["bin_range"] = sattrs["range_step"] * sattrs["range_samples"]
# get scan header
ray_header = scan["ray_header"]
# az, el, zero_index for PPI scans
if scan_type == "PVOL":
azi_start = ray_header["azimuth_start"]
azi_stop = ray_header["azimuth_stop"]
# Azimuth corresponding to 1st ray
zero_index = np.where(azi_stop < azi_start)
azi_stop[zero_index[0]] += 360
zero_index = zero_index[0] + 1
az = (azi_start + azi_stop) / 2
az = np.roll(az, -zero_index, axis=0)
az = np.round(az, 1)
el = sg1.attrs.get("elevation")
# az, el, zero_index for RHI scans
if scan_type == "RHI":
ele_start = np.round(ray_header["elevation_start"], 1)
ele_stop = np.round(ray_header["elevation_stop"], 1)
angle_step = np.round(sattrs["angle_step"], 1)
angle_step = int(np.round(sattrs["ele_stop"], 1) / angle_step)
# Elevation corresponding to 1st ray
if ele_start[0] < 0:
ele_start = ele_start[1:]
ele_stop = ele_stop[1:]
zero_index = np.where(ele_stop > ele_start)
zero_index = zero_index[0] # - 1
el = (ele_start + ele_stop) / 2
el = np.round(el, 1)
el = el[-angle_step:]
az = sg1.attrs.get("azimuth")
# save zero_index (first ray) to scan attributes
sattrs["zero_index"] = zero_index[0]
# create range array
r = np.arange(
sattrs["bin_range"],
sattrs["bin_range"] * sattrs["bin_count"] + sattrs["bin_range"],
sattrs["bin_range"],
)
# save variables to scan attributes
sattrs["az"] = az
sattrs["el"] = el
sattrs["r"] = r
sattrs["Time"] = sattrs.pop("timestamp")
sattrs["max_range"] = r[-1]
return sattrs
def read_gamic_scan(scan, scan_type, wanted_moments):
"""Read data from one particular scan from GAMIC hdf5 file
Parameters
----------
scan : object
scan object from hdf5 file
scan_type : str
"PVOL" (plan position indicator) or "RHI" (range height indicator)
wanted_moments : sequence
sequence of strings containing upper case names of moment(s) to
be returned
Returns
-------
data : dict
dictionary of moment data (numpy arrays)
sattrs : dict
dictionary of scan attributes
"""
# placeholder for data and attrs
data = {}
sattrs = {}
# try to read wanted moments
for mom in list(scan):
if "moment" in mom:
data1 = {}
sg2 = scan[mom]
actual_moment = sg2.attrs.get("moment")
if Version(h5py.__version__) < Version("3.0.0"):
actual_moment = actual_moment.decode()
actual_moment = actual_moment.upper()
if (actual_moment in wanted_moments) or (wanted_moments == "all"):
# read attributes only once
if not sattrs:
sattrs = read_gamic_scan_attributes(scan, scan_type)
mdata = sg2[...]
dyn_range_max = sg2.attrs.get("dyn_range_max")
dyn_range_min = sg2.attrs.get("dyn_range_min")
bin_format = sg2.attrs.get("format")
if Version(h5py.__version__) < Version("3.0.0"):
bin_format = bin_format.decode()
if bin_format == "UV8":
div = 254
else:
div = 65534
mdata = (
dyn_range_min + (mdata - 1) * (dyn_range_max - dyn_range_min) / div
)
if scan_type == "PVOL":
# rotate accordingly
mdata = np.roll(mdata, -1 * sattrs["zero_index"], axis=0)
if scan_type == "RHI":
# remove first zero angles
sdiff = mdata.shape[0] - sattrs["el"].shape[0]
mdata = mdata[sdiff:, :]
data1["data"] = mdata
data1["dyn_range_max"] = dyn_range_max
data1["dyn_range_min"] = dyn_range_min
data[actual_moment] = data1
return data, sattrs
def read_gamic_hdf5(filename, wanted_elevations=None, wanted_moments=None):
"""Data reader for hdf5 files produced by the commercial \
GAMIC Enigma V3 MURAN software
See GAMIC homepage for further info (https://www.gamic.com).
Parameters
----------
filename : str or file-like
path of the gamic hdf5 file or file-like object
wanted_elevations : sequence
sequence of strings of elevation_angle(s) of scan (only needed for PPI)
wanted_moments : sequence
sequence of strings of moment name(s)
Returns
-------
data : dict
dictionary of scan and moment data (numpy arrays)
attrs : dict
dictionary of attributes
Examples
--------
See :ref:`/notebooks/fileio/wradlib_radar_formats.ipynb#GAMIC-HDF5`.
"""
# check elevations
if wanted_elevations is None:
wanted_elevations = "all"
# check wanted_moments
if wanted_moments is None:
wanted_moments = "all"
# read the data from file
with h5py.File(filename, "r") as f:
# placeholder for attributes and data
attrs = {}
vattrs = {}
data = {}
# check if GAMIC file and
try:
f["how"].attrs.get("software")
except KeyError:
print("WRADLIB: File is no GAMIC hdf5!")
raise
# get scan_type (PVOL or RHI)
scan_type = f["what"].attrs.get("object")
if Version(h5py.__version__) < Version("3.0.0"):
scan_type = scan_type.decode()
# single or volume scan
if scan_type == "PVOL":
# loop over 'main' hdf5 groups (how, scanX, what, where)
for n in list(f):
if "scan" in n:
g = f[n]
sg1 = g["how"]
# get scan elevation
el = sg1.attrs.get("elevation")
el = str(round(el, 2))
# try to read scan data and attrs
# if wanted_elevations are found
if (el in wanted_elevations) or (wanted_elevations == "all"):
sdata, sattrs = read_gamic_scan(
scan=g, scan_type=scan_type, wanted_moments=wanted_moments
) # noqa
if sdata:
data[n.upper()] = sdata
if sattrs:
attrs[n.upper()] = sattrs
# single rhi scan
elif scan_type == "RHI":
# loop over 'main' hdf5 groups (how, scanX, what, where)
for n in list(f):
if "scan" in n:
g = f[n]
# try to read scan data and attrs
sdata, sattrs = read_gamic_scan(
scan=g, scan_type=scan_type, wanted_moments=wanted_moments
)
if sdata:
data[n.upper()] = sdata
if sattrs:
attrs[n.upper()] = sattrs
# collect volume attributes if wanted data is available
if data:
vattrs["Latitude"] = f["where"].attrs.get("lat")
vattrs["Longitude"] = f["where"].attrs.get("lon")
vattrs["Height"] = f["where"].attrs.get("height")
# check whether its useful to implement that feature
# vattrs['sitecoords'] = (vattrs['Longitude'], vattrs['Latitude'],
# vattrs['Height'])
attrs["VOL"] = vattrs
return data, attrs
def to_hdf5(fpath, data, mode="w", metadata=None, dataset="data", compression="gzip"):
"""Quick storage of one <data> array and a <metadata> dict in an hdf5 file
This is more efficient than pickle, cPickle or numpy.save. The data is
stored in a subgroup named ``data`` (i.e. hdf5file["data").
See :func:`~wradlib.io.hdf.from_hdf5` for retrieving stored data.
Parameters
----------
fpath : str or file-like
path to the hdf5 file or file-like object
data : :py:class:`numpy:numpy.ndarray`
mode : str
file open mode, defaults to "w" (create, truncate if exists)
metadata : dict
dictionary of data's attributes
dataset : str
describing dataset
compression : str
h5py compression type {"gzip"|"szip"|"lzf"}, see h5py documentation
for details
"""
with h5py.File(fpath, mode=mode) as f:
dset = f.create_dataset(dataset, data=data, compression=compression)
# store metadata
if metadata:
for key in metadata.keys():
dset.attrs[key] = metadata[key]
def from_hdf5(fpath, dataset="data"):
"""Loading data from hdf5 files that was stored by \
:func:`~wradlib.io.hdf.to_hdf5`
Parameters
----------
fpath : str or file-like
path to the hdf5 file or file-like object
dataset : str
name of the Dataset in which the data is stored
"""
with h5py.File(fpath, mode="r") as f:
# Check whether Dataset exists
if dataset not in f.keys():
raise KeyError(
f"WRADLIB: Cannot read Dataset <{dataset}> from hdf5 file <{f}>"
)
data = np.array(f[dataset][:])
# get metadata
metadata = {}
for key in f[dataset].attrs.keys():
metadata[key] = f[dataset].attrs[key]
return data, metadata
def read_gpm(filename, bbox=None):
"""Reads GPM files for matching with GR
Parameters
----------
filename : str
path of the GPM file
bbox : dict
dictionary with bounding box coordinates (lon, lat),
defaults to None
Returns
-------
gpm_data : dict
dictionary of gpm data
Examples
--------
See :ref:`/notebooks/match3d/wradlib_match_workflow.ipynb`.
"""
pr_data = nc.Dataset(filename, mode="r")
lon = pr_data["NS"].variables["Longitude"]
lat = pr_data["NS"].variables["Latitude"]
if bbox is not None:
poly = [
[bbox["left"], bbox["bottom"]],
[bbox["left"], bbox["top"]],
[bbox["right"], bbox["top"]],
[bbox["right"], bbox["bottom"]],
[bbox["left"], bbox["bottom"]],
]
from wradlib.zonalstats import get_clip_mask
mask = get_clip_mask(np.dstack((lon[:], lat[:])), poly)
else:
mask = np.ones_like(lon, dtype=bool, subok=False)
mask = np.nonzero(np.count_nonzero(mask, axis=1))
lon = lon[mask]
lat = lat[mask]
year = pr_data["NS"]["ScanTime"].variables["Year"][mask]
month = pr_data["NS"]["ScanTime"].variables["Month"][mask]
dayofmonth = pr_data["NS"]["ScanTime"].variables["DayOfMonth"][mask]
# dayofyear = pr_data['NS']['ScanTime'].variables['DayOfYear'][mask]
hour = pr_data["NS"]["ScanTime"].variables["Hour"][mask]
minute = pr_data["NS"]["ScanTime"].variables["Minute"][mask]
second = pr_data["NS"]["ScanTime"].variables["Second"][mask]
# secondofday = pr_data['NS']['ScanTime'].variables['SecondOfDay'][mask]
millisecond = pr_data["NS"]["ScanTime"].variables["MilliSecond"][mask]
date_array = zip(
year,
month,
dayofmonth,
hour,
minute,
second,
millisecond.astype(np.int32) * 1000,
)
pr_time = np.array(
[dt.datetime(d[0], d[1], d[2], d[3], d[4], d[5], d[6]) for d in date_array]
)
sfc = pr_data["NS"]["PRE"].variables["landSurfaceType"][mask]
pflag = pr_data["NS"]["PRE"].variables["flagPrecip"][mask]
# bbflag = pr_data['NS']['CSF'].variables['flagBB'][mask]
zbb = pr_data["NS"]["CSF"].variables["heightBB"][mask]
# print(zbb.dtype)
bbwidth = pr_data["NS"]["CSF"].variables["widthBB"][mask]
qbb = pr_data["NS"]["CSF"].variables["qualityBB"][mask]
qtype = pr_data["NS"]["CSF"].variables["qualityTypePrecip"][mask]
ptype = pr_data["NS"]["CSF"].variables["typePrecip"][mask]
quality = pr_data["NS"]["scanStatus"].variables["dataQuality"][mask]
refl = pr_data["NS"]["SLV"].variables["zFactorCorrected"][mask]
# print(pr_data['NS']['SLV'].variables['zFactorCorrected'])
zenith = pr_data["NS"]["PRE"].variables["localZenithAngle"][mask]
pr_data.close()
# Check for bad data
if max(quality) != 0:
raise ValueError("GPM contains Bad Data")
pflag = pflag.astype(np.int8)
# Determine the dimensions
ndim = refl.ndim
if ndim != 3:
raise ValueError(f"GPM Dimensions do not match! Needed 3, given {ndim}")
tmp = refl.shape
nscan = tmp[0]
nray = tmp[1]
nbin = tmp[2]
# Reverse direction along the beam
refl = np.flip(refl, axis=-1)
# Change pflag=1 to pflag=2 to be consistent with 'Rain certain' in TRMM
pflag[pflag == 1] = 2
# Simplify the precipitation types
ptype = (ptype / 1e7).astype(np.int16)
# Simplify the surface types
imiss = sfc == -9999
sfc = (sfc / 1e2).astype(np.int16) + 1
sfc[imiss] = 0
# Set a quality indicator for the BB and precip type data
# TODO: Why is the `quality` variable overwritten?
quality = np.zeros((nscan, nray), dtype=np.uint8)
i1 = ((qbb == 0) | (qbb == 1)) & (qtype == 1)
quality[i1] = 1
i2 = (qbb > 1) | (qtype > 2)
quality[i2] = 2
gpm_data = {}
gpm_data.update(
{
"nscan": nscan,
"nray": nray,
"nbin": nbin,
"date": pr_time,
"lon": lon,
"lat": lat,
"pflag": pflag,
"ptype": ptype,
"zbb": zbb,
"bbwidth": bbwidth,
"sfc": sfc,
"quality": quality,
"refl": refl,
"zenith": zenith,
}
)
return gpm_data
def _get_gpm_group(filename, group, variables=None):
"""Return group as xarrax.Dataset from GPM file."""
ds = xr.open_dataset(
filename,
group=group,
decode_cf=False,
engine="h5netcdf",
backend_kwargs=dict(phony_dims="sort"),
)
for n, v in ds.items():
dimnames = v.attrs.get("DimensionNames", False)
if dimnames:
vdims = v.attrs["DimensionNames"].split(",")
dims = {dim: vdims[i] for i, dim in enumerate(v.dims)}
ds[n] = v.swap_dims(dims)
if variables is not None:
keep = set(variables)
ds = ds.drop_vars(set(ds.variables) ^ keep)
if isinstance(variables, dict):
ds = ds.rename(variables)
return ds
def _get_gpm_time_group(filename, group):
"""Return time subgroup as xarrax.Dataset from GPM file."""
variables = [
"Year",
"Month",
"DayOfMonth",
"Hour",
"Minute",
"Second",
"MilliSecond",
]
ds = _get_gpm_group(filename, group=group, variables=variables)
date_array = zip(
ds.Year.values,
ds.Month.values,
ds.DayOfMonth.values,
ds.Hour.values,
ds.Minute.values,
ds.Second.values,
ds.MilliSecond.values,
)
pr_time = np.array(
[dt.datetime(d[0], d[1], d[2], d[3], d[4], d[5], d[6]) for d in date_array]
)
ds = ds.assign_coords({"date": (["nscan"], pr_time)})
ds = ds.drop_vars(set(ds.variables) ^ set(["date"]))
return ds
def open_gpm_dataset(filename, group):
"""Reads GPM files version `V07A`.
Parameters
----------
filename : str
path of the GPM file
group : str
name of group
Returns
-------
ds : xarray.Dataset
xarray.Dataset representation of GPM file with requested `group`.
"""
parents = group.split("/")[:-1]
with h5netcdf.File(filename, mode="r", decode_vlen_strings=True) as f:
grps = list(f[group].groups)
root = _get_gpm_group(filename, group="/")
root = root.rename_dims({list(root.dims)[0]: "nswath"})
groups = {"root": root}
if parents:
for p in parents:
pds = _get_gpm_group(filename, group=f"/{p}")
groups[p] = pds
subroot = _get_gpm_group(filename, group=f"/{group}")
groups["subroot"] = subroot
for grp in grps:
gname = "/".join(["/" + group, grp])
if grp == "ScanTime":
groups[grp] = _get_gpm_time_group(filename, group=gname)
else:
groups[grp] = _get_gpm_group(filename, group=gname)
ds = xr.merge(groups.values())
return ds
def read_trmm(filename1, filename2, bbox=None):
"""Reads TRMM files for matching with GR
Parameters
----------
filename1 : str
path of the TRMM 2A23 file
filename2 : str
path of the TRMM 2A25 file
bbox : dict
dictionary with bounding box coordinates (lon, lat),
defaults to None
Returns
-------
trmm_data : dict
dictionary of trmm data
Examples
--------
See :ref:`/notebooks/match3d/wradlib_match_workflow.ipynb`.
"""
# trmm 2A23 and 2A25 data is hdf4
pr_data1 = nc.Dataset(filename1, mode="r")
pr_data2 = nc.Dataset(filename2, mode="r")
lon = pr_data1.variables["Longitude"]
lat = pr_data1.variables["Latitude"]
if bbox is not None:
poly = [
[bbox["left"], bbox["bottom"]],
[bbox["left"], bbox["top"]],
[bbox["right"], bbox["top"]],
[bbox["right"], bbox["bottom"]],
[bbox["left"], bbox["bottom"]],
]
from wradlib.zonalstats import get_clip_mask
mask = get_clip_mask(np.dstack((lon[:], lat[:])), poly)
else:
mask = np.ones_like(lon, dtype=bool)
mask = np.nonzero(np.count_nonzero(mask, axis=1))
lon = pr_data1.variables["Longitude"][mask]
lat = pr_data1.variables["Latitude"][mask]
year = pr_data1.variables["Year"][mask]
month = pr_data1.variables["Month"][mask]
dayofmonth = pr_data1.variables["DayOfMonth"][mask]
# dayofyear = pr_data1.variables['DayOfYear'][mask]
hour = pr_data1.variables["Hour"][mask]
minute = pr_data1.variables["Minute"][mask]
second = pr_data1.variables["Second"][mask]
# secondofday = pr_data1.variables['scanTime_sec'][mask]
millisecond = pr_data1.variables["MilliSecond"][mask]
date_array = zip(
year,
month,
dayofmonth,
hour,
minute,
second,
millisecond.astype(np.int32) * 1000,
)
pr_time = np.array(
[dt.datetime(d[0], d[1], d[2], d[3], d[4], d[5], d[6]) for d in date_array]
)
pflag = pr_data1.variables["rainFlag"][mask]
ptype = pr_data1.variables["rainType"][mask]
status = pr_data1.variables["status"][mask]
zbb = pr_data1.variables["HBB"][mask].astype(np.float32)
bbwidth = pr_data1.variables["BBwidth"][mask].astype(np.float32)
quality = pr_data2.variables["dataQuality"][mask]
refl = pr_data2.variables["correctZFactor"][mask] / 100.0
zenith = pr_data2.variables["scLocalZenith"][mask]
pr_data1.close()
pr_data2.close()
# mask array
refl = np.ma.array(refl)
# Ground clutter
refl[refl == -8888.0] = np.ma.masked
# Misssing data
refl[refl == -9999.0] = np.ma.masked
# Scaling
refl /= 100.0
# Check for bad data
if max(quality) != 0:
raise ValueError("TRMM contains Bad Data")
# Determine the dimensions
ndim = refl.ndim
if ndim != 3:
raise ValueError(f"TRMM Dimensions do not match! Needed 3, given {ndim}")
tmp = refl.shape
nscan = tmp[0]
nray = tmp[1]
nbin = tmp[2]
# Reverse direction along the beam
refl = np.flip(refl, axis=-1)
# Simplify the precipitation flag
ipos = (pflag >= 10) & (pflag <= 20)
icer = pflag >= 20
pflag[ipos] = 1
pflag[icer] = 2
# Simplify the precipitation types
istr = (ptype >= 100) & (ptype <= 200)
icon = (ptype >= 200) & (ptype <= 300)
ioth = ptype >= 300
inone = ptype == -88
imiss = ptype == -99
ptype[istr] = 1
ptype[icon] = 2
ptype[ioth] = 3
ptype[inone] = 0
ptype[imiss] = -1
# Extract the surface type
sfc = np.zeros((nscan, nray), dtype=np.uint8)
i0 = status == 168
sfc[i0] = 0
i1 = status % 10 == 0
sfc[i1] = 1
i2 = (status - 1) % 10 == 0
sfc[i2] = 2
i3 = (status - 3) % 10 == 0
sfc[i3] = 3
i4 = (status - 4) % 10 == 0
sfc[i4] = 4
i5 = (status - 5) % 10 == 0
sfc[i5] = 5
i9 = (status - 9) % 10 == 0
sfc[i9] = 9
# Extract 2A23 quality
# TODO: Why is the `quality` variable overwritten?
quality = np.zeros((nscan, nray), dtype=np.uint8)
i0 = status == 168
quality[i0] = 0
i1 = status < 50
quality[i1] = 1
i2 = (status >= 50) & (status < 109)
quality[i2] = 2
trmm_data = {}
trmm_data.update(
{
"nscan": nscan,
"nray": nray,
"nbin": nbin,
"date": pr_time,
"lon": lon,
"lat": lat,
"pflag": pflag,
"ptype": ptype,
"zbb": zbb,
"bbwidth": bbwidth,
"sfc": sfc,
"quality": quality,
"refl": refl,
"zenith": zenith,
}
)
return trmm_data
|
c6ad04c0add09b5b20c5ef906253c157868c21df
|
35271f6bd874799df9a93dbe5bcc50272b619dc1
|
/ML/TensorFlow/Basics/tutorial7-indepth-functional.py
|
74034e13153dc8eb751872fcc246d7753e1cf047
|
[
"MIT"
] |
permissive
|
aladdinpersson/Machine-Learning-Collection
|
c724186b64ae52efa6f9d4e97f37477900901d35
|
558557c7989f0b10fee6e8d8f953d7269ae43d4f
|
refs/heads/master
| 2023-08-31T20:52:06.493437
| 2023-03-21T11:44:08
| 2023-03-21T11:44:08
| 250,184,708
| 5,653
| 2,543
|
MIT
| 2023-09-02T03:51:36
| 2020-03-26T07:02:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,943
|
py
|
tutorial7-indepth-functional.py
|
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, regularizers
from tensorflow.keras.datasets import mnist
# Use Pandas to load dataset from csv file
import pandas as pd
# HYPERPARAMETERS
BATCH_SIZE = 64
WEIGHT_DECAY = 0.001
LEARNING_RATE = 0.001
# Make sure we don't get any GPU errors
physical_devices = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
train_df = pd.read_csv("train.csv")
test_df = pd.read_csv("test.csv")
train_images = os.getcwd() + "/train_images/" + train_df.iloc[:, 0].values
test_images = os.getcwd() + "/test_images/" + test_df.iloc[:, 0].values
train_labels = train_df.iloc[:, 1:].values
test_labels = test_df.iloc[:, 1:].values
def read_image(image_path, label):
image = tf.io.read_file(image_path)
image = tf.image.decode_image(image, channels=1, dtype=tf.float32)
# In older versions you need to set shape in order to avoid error
# on newer (2.3.0+) the following 3 lines can safely be removed
image.set_shape((64, 64, 1))
label[0].set_shape([])
label[1].set_shape([])
labels = {"first_num": label[0], "second_num": label[1]}
return image, labels
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_dataset = tf.data.Dataset.from_tensor_slices((train_images, train_labels))
train_dataset = (
train_dataset.shuffle(buffer_size=len(train_labels))
.map(read_image)
.batch(batch_size=BATCH_SIZE)
.prefetch(buffer_size=AUTOTUNE)
)
test_dataset = tf.data.Dataset.from_tensor_slices((test_images, test_labels))
test_dataset = (
test_dataset.map(read_image)
.batch(batch_size=BATCH_SIZE)
.prefetch(buffer_size=AUTOTUNE)
)
inputs = keras.Input(shape=(64, 64, 1))
x = layers.Conv2D(
filters=32,
kernel_size=3,
padding="same",
kernel_regularizer=regularizers.l2(WEIGHT_DECAY),
)(inputs)
x = layers.BatchNormalization()(x)
x = keras.activations.relu(x)
x = layers.Conv2D(64, 3, kernel_regularizer=regularizers.l2(WEIGHT_DECAY),)(x)
x = layers.BatchNormalization()(x)
x = keras.activations.relu(x)
x = layers.MaxPooling2D()(x)
x = layers.Conv2D(
64, 3, activation="relu", kernel_regularizer=regularizers.l2(WEIGHT_DECAY),
)(x)
x = layers.Conv2D(128, 3, activation="relu")(x)
x = layers.MaxPooling2D()(x)
x = layers.Flatten()(x)
x = layers.Dense(128, activation="relu")(x)
x = layers.Dropout(0.5)(x)
x = layers.Dense(64, activation="relu")(x)
output1 = layers.Dense(10, activation="softmax", name="first_num")(x)
output2 = layers.Dense(10, activation="softmax", name="second_num")(x)
model = keras.Model(inputs=inputs, outputs=[output1, output2])
model.compile(
optimizer=keras.optimizers.Adam(LEARNING_RATE),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=["accuracy"],
)
model.fit(train_dataset, epochs=5, verbose=2)
model.evaluate(test_dataset, verbose=2)
|
4614d67e290e75f366badd8c026d1ac97c224601
|
3a24f63c8742560993b5465b26339e7c0ed05a27
|
/crates/ruff/resources/test/fixtures/flake8_quotes/singles_implicit.py
|
146fda7fa8b19ce58359139176ef996f902928b8
|
[
"BSD-3-Clause",
"0BSD",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0"
] |
permissive
|
astral-sh/ruff
|
8f1de11263474c6293454b02c728df2f113801db
|
82410524d9612f11387c2675a03869d489bb97ef
|
refs/heads/main
| 2023-08-02T23:20:34.351174
| 2023-08-02T21:32:43
| 2023-08-02T21:32:43
| 523,043,277
| 2,264
| 122
|
MIT
| 2023-09-14T20:08:59
| 2022-08-09T17:17:44
|
Rust
|
UTF-8
|
Python
| false
| false
| 263
|
py
|
singles_implicit.py
|
x = (
'This'
'is'
'not'
)
x = (
'This' \
'is' \
'not'
)
x = (
'This'
'is "actually"'
'fine'
)
x = (
'This' \
'is "actually"' \
'fine'
)
if True:
'This can use "single" quotes'
'But this needs to be changed'
|
8168859e1104bafa78fff57da01f1c1776e7e94e
|
ec7b8378698ed9dfc5e62b94c20524bf3aefc3c3
|
/tiatoolbox/models/architecture/micronet.py
|
2e552ab3795299dff4f43632c155d4a0f23d0e0d
|
[
"BSD-3-Clause"
] |
permissive
|
TissueImageAnalytics/tiatoolbox
|
52fe15704b396a055d9b4fccc678787ef489aed8
|
f26387f46f675a7b9a8a48c95dad26e819229f2f
|
refs/heads/develop
| 2023-08-16T15:47:19.282604
| 2023-08-14T16:50:45
| 2023-08-14T16:50:45
| 267,705,904
| 222
| 44
|
NOASSERTION
| 2023-09-14T16:57:15
| 2020-05-28T22:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 17,873
|
py
|
micronet.py
|
"""Define MicroNet architecture.
Raza, SEA et al., “Micro-Net: A unified model for segmentation of
various objects in microscopy images,” Medical Image Analysis,
Dec. 2018, vol. 52, p. 160-173.
"""
from __future__ import annotations
from collections import OrderedDict
import numpy as np
import torch
from scipy import ndimage
from skimage import morphology
from torch import nn
from torch.nn import functional
from tiatoolbox.models.architecture.hovernet import HoVerNet
from tiatoolbox.models.models_abc import ModelABC
from tiatoolbox.utils import misc
def group1_forward_branch(
layer: nn.Module,
in_tensor: torch.Tensor,
resized_feat: torch.Tensor,
) -> torch.Tensor:
"""Define group 1 connections.
Args:
layer (torch.nn.Module):
Network layer.
in_tensor (torch.Tensor):
Input tensor.
resized_feat (torch.Tensor):
Resized input.
Returns:
torch.Tensor:
Output of group 1 layer.
"""
a = layer["conv1"](in_tensor)
a = layer["conv2"](a)
a = layer["pool"](a)
b = layer["conv3"](resized_feat)
b = layer["conv4"](b)
return torch.cat(tensors=(a, b), dim=1)
def group2_forward_branch(layer: nn.Module, in_tensor: torch.Tensor) -> torch.Tensor:
"""Define group 1 connections.
Args:
layer (torch.nn.Module):
Network layer.
in_tensor (torch.Tensor):
Input tensor.
Returns:
torch.Tensor:
Output of group 1 layer.
"""
a = layer["conv1"](in_tensor)
return layer["conv2"](a)
def group3_forward_branch(
layer: nn.Module,
main_feat: torch.Tensor,
skip: torch.Tensor,
) -> torch.Tensor:
"""Define group 1 connections.
Args:
layer (torch.nn.Module):
Network layer.
main_feat (torch.Tensor):
Input tensor.
skip (torch.Tensor):
Skip connection.
Returns:
torch.Tensor: Output of group 1 layer.
"""
a = layer["up1"](main_feat)
a = layer["conv1"](a)
a = layer["conv2"](a)
b1 = layer["up2"](a)
b2 = layer["up3"](skip)
b = torch.cat(tensors=(b1, b2), dim=1)
return layer["conv3"](b)
def group4_forward_branch(layer: nn.Module, in_tensor: torch.Tensor) -> torch.Tensor:
"""Define group 1 connections.
Args:
layer (torch.nn.Module):
Network layer.
in_tensor (torch.Tensor):
Input tensor.
Returns:
torch.Tensor: Output of group 1 layer.
"""
a = layer["up1"](in_tensor)
return layer["conv1"](a)
def group1_arch_branch(in_ch: int, resized_in_ch: int, out_ch: int):
"""Group1 branch for MicroNet.
Args:
in_ch (int):
Number of input channels.
resized_in_ch (int):
Number of input channels from resized input.
out_ch (int):
Number of output channels.
Returns:
:class:`torch.nn.ModuleDict`:
An output of type :class:`torch.nn.ModuleDict`
"""
module_dict = OrderedDict()
module_dict["conv1"] = nn.Sequential(
nn.Conv2d(
in_ch,
out_ch,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
nn.Tanh(),
nn.BatchNorm2d(out_ch),
)
module_dict["conv2"] = nn.Sequential(
nn.Conv2d(
out_ch,
out_ch,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
nn.Tanh(),
)
module_dict["pool"] = nn.MaxPool2d(2, padding=0) # check padding
module_dict["conv3"] = nn.Sequential(
nn.Conv2d(
resized_in_ch,
out_ch,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
nn.Tanh(),
nn.BatchNorm2d(out_ch),
)
module_dict["conv4"] = nn.Sequential(
nn.Conv2d(
out_ch,
out_ch,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
nn.Tanh(),
)
return nn.ModuleDict(module_dict)
def group2_arch_branch(in_ch: int, out_ch: int):
"""Group2 branch for MicroNet.
Args:
in_ch (int):
Number of input channels.
out_ch (int):
Number of output channels.
Returns:
torch.nn.ModuleDict:
An output of type :class:`torch.nn.ModuleDict`
"""
module_dict = OrderedDict()
module_dict["conv1"] = nn.Sequential(
nn.Conv2d(
in_ch,
out_ch,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
nn.Tanh(),
)
module_dict["conv2"] = nn.Sequential(
nn.Conv2d(
out_ch,
out_ch,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
nn.Tanh(),
)
return nn.ModuleDict(module_dict)
def group3_arch_branch(in_ch: int, skip: int, out_ch: int):
"""Group3 branch for MicroNet.
Args:
in_ch (int):
Number of input channels.
skip (int):
Number of channels for the skip connection.
out_ch (int):
Number of output channels.
Returns:
torch.nn.ModuleDict:
An output of type :class:`torch.nn.ModuleDict`
"""
module_dict = OrderedDict()
module_dict["up1"] = nn.ConvTranspose2d(
in_ch,
out_ch,
kernel_size=(2, 2),
stride=(2, 2),
)
module_dict["conv1"] = nn.Sequential(
nn.Conv2d(
out_ch,
out_ch,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
nn.Tanh(),
)
module_dict["conv2"] = nn.Sequential(
nn.Conv2d(
out_ch,
out_ch,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
nn.Tanh(),
)
module_dict["up2"] = nn.ConvTranspose2d(
out_ch,
out_ch,
kernel_size=(5, 5),
stride=(1, 1),
)
module_dict["up3"] = nn.ConvTranspose2d(
skip,
out_ch,
kernel_size=(5, 5),
stride=(1, 1),
)
module_dict["conv3"] = nn.Sequential(
nn.Conv2d(
2 * out_ch,
out_ch,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
bias=True,
),
nn.Tanh(),
)
return nn.ModuleDict(module_dict)
def group4_arch_branch(
in_ch: int,
out_ch: int,
up_kernel: tuple[int, int] = (2, 2),
up_strides: tuple[int, int] = (2, 2),
activation: str = "tanh",
) -> nn.ModuleDict:
"""Group4 branch for MicroNet.
This branch defines architecture for decoder and
provides input for the auxiliary and main output branch.
Args:
in_ch (int):
Number of input channels.
out_ch (int):
Number of output channels.
up_kernel (tuple of int):
Kernel size for
:class:`torch.nn.ConvTranspose2d`.
up_strides (tuple of int):
Stride size for
:class:`torch.nn.ConvTranspose2d`.
activation (str):
Activation function, default="tanh".
Returns:
torch.nn.ModuleDict:
An output of type :class:`torch.nn.ModuleDict`
"""
activation = nn.ReLU() if activation == "relu" else nn.Tanh()
module_dict = OrderedDict()
module_dict["up1"] = nn.ConvTranspose2d(
in_ch,
out_ch,
kernel_size=up_kernel,
stride=up_strides,
)
module_dict["conv1"] = nn.Sequential(
nn.Conv2d(
out_ch,
out_ch,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
activation,
)
return nn.ModuleDict(module_dict)
def out_arch_branch(
in_ch: int,
num_output_channels: int = 2,
activation: str = "softmax",
):
"""Group5 branch for MicroNet.
This branch defines architecture for auxiliary and the main output.
Args:
in_ch (int):
Number of input channels.
num_output_channels (int):
Number of output channels. default=2.
activation (str):
Activation function, default="softmax".
Returns:
torch.nn.Sequential:
An output of type :class:`torch.nn.Sequential`
"""
activation = nn.ReLU() if activation == "relu" else nn.Softmax()
return nn.Sequential(
nn.Dropout2d(p=0.5),
nn.Conv2d(
in_ch,
num_output_channels,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
activation,
)
class MicroNet(ModelABC):
"""Initialize MicroNet [1].
The following models have been included in tiatoolbox:
1. `micronet-consep`:
This is trained on `CoNSeP dataset
<https://warwick.ac.uk/fac/cross_fac/tia/data/hovernet/>`_ The
model is retrained in torch as the original model with results
on CoNSeP [2] was trained in TensorFlow.
The tiatoolbox model should produce the following results on the CoNSeP dataset:
.. list-table:: MicroNet performance
:widths: 15 15 15 15 15 15 15
:header-rows: 1
* - Model name
- Data set
- DICE
- AJI
- DQ
- SQ
- PQ
* - micronet-consep
- CoNSeP
- 0.80
- 0.49
- 0.62
- 0.75
- 0.47
Args:
num_input_channels (int):
Number of channels in input. default=3.
num_output_channels (int):
Number of output channels. default=2.
out_activation (str):
Activation to use at the output. MapDe inherits MicroNet
but uses ReLU activation.
References:
[1] Raza, Shan E Ahmed, et al. "Micro-Net: A unified model for
segmentation of various objects in microscopy images."
Medical image analysis 52 (2019): 160-173.
[2] Graham, Simon, et al. "Hover-net: Simultaneous segmentation
and classification of nuclei in multi-tissue histology images."
Medical Image Analysis 58 (2019): 101563.
"""
def __init__(
self,
num_input_channels=3,
num_output_channels=2,
out_activation="softmax",
) -> None:
"""Initialize :class:`MicroNet`."""
super().__init__()
if num_output_channels < 2: # noqa: PLR2004
msg = "Number of classes should be >=2."
raise ValueError(msg)
self.__num_output_channels = num_output_channels
self.in_ch = num_input_channels
module_dict = OrderedDict()
module_dict["b1"] = group1_arch_branch(
num_input_channels,
num_input_channels,
64,
)
module_dict["b2"] = group1_arch_branch(128, num_input_channels, 128)
module_dict["b3"] = group1_arch_branch(256, num_input_channels, 256)
module_dict["b4"] = group1_arch_branch(512, num_input_channels, 512)
module_dict["b5"] = group2_arch_branch(1024, 2048)
module_dict["b6"] = group3_arch_branch(2048, 1024, 1024)
module_dict["b7"] = group3_arch_branch(1024, 512, 512)
module_dict["b8"] = group3_arch_branch(512, 256, 256)
module_dict["b9"] = group3_arch_branch(256, 128, 128)
module_dict["fm1"] = group4_arch_branch(
128,
64,
(2, 2),
(2, 2),
activation=out_activation,
)
module_dict["fm2"] = group4_arch_branch(
256,
128,
(4, 4),
(4, 4),
activation=out_activation,
)
module_dict["fm3"] = group4_arch_branch(
512,
256,
(8, 8),
(8, 8),
activation=out_activation,
)
module_dict["aux_out1"] = out_arch_branch(
64,
num_output_channels=self.__num_output_channels,
)
module_dict["aux_out2"] = out_arch_branch(
128,
num_output_channels=self.__num_output_channels,
)
module_dict["aux_out3"] = out_arch_branch(
256,
num_output_channels=self.__num_output_channels,
)
module_dict["out"] = out_arch_branch(
64 + 128 + 256,
num_output_channels=self.__num_output_channels,
activation=out_activation,
)
self.layer = nn.ModuleDict(module_dict)
def forward(self, input_tensor: torch.Tensor): # skipcq: PYL-W0221
"""Logic for using layers defined in init.
This method defines how layers are used in forward operation.
Args:
input_tensor (torch.Tensor):
Input images, the tensor is in the shape of NCHW.
Returns:
list:
A list of main and auxiliary outputs. The expected
format is `[main_output, aux1, aux2, aux3]`.
"""
b1 = group1_forward_branch(
self.layer["b1"],
input_tensor,
functional.interpolate(input_tensor, size=(128, 128), mode="bicubic"),
)
b2 = group1_forward_branch(
self.layer["b2"],
b1,
functional.interpolate(input_tensor, size=(64, 64), mode="bicubic"),
)
b3 = group1_forward_branch(
self.layer["b3"],
b2,
functional.interpolate(input_tensor, size=(32, 32), mode="bicubic"),
)
b4 = group1_forward_branch(
self.layer["b4"],
b3,
functional.interpolate(input_tensor, size=(16, 16), mode="bicubic"),
)
b5 = group2_forward_branch(self.layer["b5"], b4)
b6 = group3_forward_branch(self.layer["b6"], b5, b4)
b7 = group3_forward_branch(self.layer["b7"], b6, b3)
b8 = group3_forward_branch(self.layer["b8"], b7, b2)
b9 = group3_forward_branch(self.layer["b9"], b8, b1)
fm1 = group4_forward_branch(self.layer["fm1"], b9)
fm2 = group4_forward_branch(self.layer["fm2"], b8)
fm3 = group4_forward_branch(self.layer["fm3"], b7)
aux1 = self.layer["aux_out1"](fm1)
aux2 = self.layer["aux_out2"](fm2)
aux3 = self.layer["aux_out3"](fm3)
out = torch.cat(tensors=(fm1, fm2, fm3), dim=1)
out = self.layer["out"](out)
return [out, aux1, aux2, aux3]
@staticmethod
def postproc(image: np.ndarray):
"""Post-processing script for MicroNet.
Args:
image (ndarray):
Input image of type numpy array.
Returns:
:class:`numpy.ndarray`:
Pixel-wise nuclear instance segmentation
prediction.
"""
pred_bin = np.argmax(image[0], axis=2)
pred_inst = ndimage.label(pred_bin)[0]
pred_inst = morphology.remove_small_objects(pred_inst, min_size=50)
canvas = np.zeros(pred_inst.shape[:2], dtype=np.int32)
for inst_id in range(1, np.max(pred_inst) + 1):
inst_map = np.array(pred_inst == inst_id, dtype=np.uint8)
inst_map = ndimage.binary_fill_holes(inst_map)
canvas[inst_map > 0] = inst_id
nuc_inst_info_dict = HoVerNet.get_instance_info(canvas)
return canvas, nuc_inst_info_dict
@staticmethod
def preproc(image: np.ndarray):
"""Preprocessing function for MicroNet.
Performs per image standardization.
Args:
image (:class:`numpy.ndarray`):
Input image of type numpy array.
Returns:
:class:`numpy.ndarray`:
Pre-processed numpy array.
"""
image = np.transpose(image, axes=(2, 0, 1))
image = image / 255.0
image = torch.from_numpy(image)
image_mean = torch.mean(image, dim=(-1, -2, -3))
stddev = torch.std(image, dim=(-1, -2, -3))
num_pixels = torch.tensor(torch.numel(image), dtype=torch.float32)
min_stddev = torch.rsqrt(num_pixels)
adjusted_stddev = torch.max(stddev, min_stddev)
image -= image_mean
image = torch.div(image, adjusted_stddev)
return np.transpose(image.numpy(), axes=(1, 2, 0))
@staticmethod
def infer_batch(
model: torch.nn.Module,
batch_data: np.ndarray,
*,
on_gpu: bool,
) -> list[np.ndarray]:
"""Run inference on an input batch.
This contains logic for forward operation as well as batch I/O
aggregation.
Args:
model (nn.Module):
PyTorch defined model.
batch_data (:class:`numpy.ndarray`):
A batch of data generated by
`torch.utils.data.DataLoader`.
on_gpu (bool):
Whether to run inference on a GPU.
Returns:
list(np.ndarray):
Probability map as a numpy array.
"""
patch_imgs = batch_data
device = misc.select_device(on_gpu=on_gpu)
patch_imgs_gpu = patch_imgs.to(device).type(torch.float32) # to NCHW
patch_imgs_gpu = patch_imgs_gpu.permute(0, 3, 1, 2).contiguous()
model.eval() # infer mode
with torch.inference_mode():
pred, _, _, _ = model(patch_imgs_gpu)
pred = pred.permute(0, 2, 3, 1).contiguous()
pred = pred.cpu().numpy()
return [
pred,
]
|
43a4cc5c4f5bc15d3a2700b15986a43a674293ca
|
96b7ee94763d477ddbe1534a866fb7e3bd269a7b
|
/tests/unit/test_exception.py
|
393d6a2cd8747f4059161976d4cf721dff6601e5
|
[
"MIT"
] |
permissive
|
ej2/python-quickbooks
|
a83d088983f2aa6b001d2cb7445f42ea0ab4725a
|
5d29d1fa832496d00af927e35deb9ba78817550d
|
refs/heads/master
| 2023-09-03T23:49:59.551648
| 2023-08-29T16:10:29
| 2023-08-29T16:10:29
| 39,512,055
| 139
| 81
|
MIT
| 2023-08-29T16:15:24
| 2015-07-22T14:50:48
|
Python
|
UTF-8
|
Python
| false
| false
| 638
|
py
|
test_exception.py
|
import unittest
from quickbooks.exceptions import QuickbooksException, AuthorizationException
class QuickbooksExceptionTests(unittest.TestCase):
def test_init(self):
exception = QuickbooksException("message", 100, "detail")
self.assertEqual(exception.message, "message")
self.assertEqual(exception.error_code, 100)
self.assertEqual(exception.detail, "detail")
class AuthorizationExceptionTests(unittest.TestCase):
def test_unicode(self):
exception = AuthorizationException("message", detail="detail")
self.assertEqual(str(exception), "QB Auth Exception 0: message\ndetail")
|
39fa3d967e6c6878eb63c01a098ad899817aa7f7
|
e5beafe9aac8a90eb360ca46628d547d5c50677c
|
/tests/unit/core/profiler/util/test_util.py
|
d8058d49ebe4431a54018d5474f46b5f66f07c19
|
[
"MIT"
] |
permissive
|
flask-dashboard/Flask-MonitoringDashboard
|
84c3a479d6eb2bc16201984d3988e52091b23208
|
bd41a2396d0770ad14e5b739db42c69ecd85cd49
|
refs/heads/master
| 2023-09-01T03:09:12.583145
| 2023-08-22T21:16:55
| 2023-08-22T21:16:55
| 87,939,708
| 747
| 168
|
MIT
| 2023-08-22T21:07:23
| 2017-04-11T13:50:14
|
Python
|
UTF-8
|
Python
| false
| false
| 338
|
py
|
test_util.py
|
from flask_monitoringdashboard.core.profiler.util import order_histogram
def test_order_histogram():
histogram = {('0:42->1:12', 'c'): 610, ('0:42', 'a'): 1234, ('0:42->1:13', 'b'): 614}
assert order_histogram(histogram.items()) == (
[(('0:42', 'a'), 1234), (('0:42->1:13', 'b'), 614), (('0:42->1:12', 'c'), 610)]
)
|
129320cdabb766c187a16ca5000bc905782da01f
|
bece8b97cdb15988562c8c8dc27a5b58cd3acb90
|
/sandworm/Resources/browser-creds/Windows/lazagne/config/lib/memorpy/SunProcess.py
|
831c7f63d519dbd1c01fa8dc257a28ffde3308cf
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
center-for-threat-informed-defense/adversary_emulation_library
|
4382e78f56faa635d5a6cc89bce5b36f3b74476c
|
4b1d1327ebfbd550ba7e5f1b5562c1f3db61311e
|
refs/heads/master
| 2023-08-12T20:08:57.078612
| 2023-07-17T16:54:16
| 2023-07-17T16:54:16
| 259,753,257
| 1,220
| 243
|
Apache-2.0
| 2023-09-06T16:23:00
| 2020-04-28T21:16:54
|
C
|
UTF-8
|
Python
| false
| false
| 5,069
|
py
|
SunProcess.py
|
# This file is part of memorpy.
#
# memorpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# memorpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with memorpy. If not, see <http://www.gnu.org/licenses/>.
from .BaseProcess import BaseProcess, ProcessException
import struct
import os
MA_READ = 0x04
MA_WRITE = 0x02
MA_EXEC = 0x01
MA_SHARED = 0x08
MA_ANON = 0x40
MA_ISM = 0x80
MA_NORESERVE = 0x100
MA_SHM = 0x200
MA_RESERVED1 = 0x400
MA_OSM = 0x800
PSINFO_T = struct.Struct(
'iiiIIIIIIIILLLLHHLLLLLL16s80siiLLciILLcccchi8sLLIIIIII'
)
MAP_T = struct.Struct(
'LL64sQiiii'
)
class SunProcess(BaseProcess):
def __init__(self, pid=None, name=None, debug=True, ptrace=None):
''' Create and Open a process object from its pid or from its name '''
super(SunProcess, self).__init__()
self.pid = int(pid)
self.pas = None
self.writable = False
if name and not self.pid:
self.pid = SunProcess.pid_from_name(name)
if not name and not self.pid:
raise ValueError('You need to instanciate process with at least a name or a pid')
try:
self._open()
except:
pass
def close(self):
if self.pas:
self.pas.close()
def __del__(self):
self.close()
def _open(self):
try:
self.pas = open('/proc/%d/as'%(self.pid), 'w+')
self.writable = True
except IOError:
self.pas = open('/proc/%d/as'%(self.pid))
self.isProcessOpen = True
@staticmethod
def _name_args(pid):
with open('/proc/%d/psinfo'%(int(pid))) as psinfo:
items = PSINFO_T.unpack_from(psinfo.read())
return items[23].rstrip('\x00'), items[24].rstrip('\x00')
@staticmethod
def list():
processes=[]
for pid in os.listdir('/proc'):
try:
pid = int(pid)
name, _ = SunProcess._name_args(pid)
processes.append({
'pid': pid,
'name': name
})
except:
pass
return processes
@staticmethod
def pid_from_name(name):
processes=[]
for pid in os.listdir('/proc'):
try:
pid = int(pid)
pname, cmdline = SunProcess._name_args(pid)
if name in pname:
return pid
if name in cmdline.split(' ', 1)[0]:
return pid
except:
pass
raise ProcessException('No process with such name: %s'%name)
def iter_region(self, start_offset=None, end_offset=None, protec=None, optimizations=None):
"""
optimizations :
i for inode==0 (no file mapping)
s to avoid scanning shared regions
x to avoid scanning x regions
r don't scan ronly regions
"""
if not self.isProcessOpen:
return
with open('/proc/%d/map'%(self.pid)) as maps_file:
while True:
mapping = maps_file.read(MAP_T.size)
if not mapping:
break
start, size, name, offset, flags, pagesize, shmid, filler = MAP_T.unpack(mapping)
if start_offset is not None:
if start < start_offset:
continue
if end_offset is not None:
if start > end_offset:
continue
if not flags & MA_READ:
continue
if optimizations:
if 'i' in optimizations and not flags & MA_ANON:
continue
if 's' in optimizations and flags & MA_SHM:
continue
# in sunos it's quite common when this flag is set, so let's use other letter
if 'X' in optimizations and flags & MA_EXEC:
continue
if 'r' in optimizations and not flags & MA_WRITE:
continue
yield start, size
def write_bytes(self, address, data):
if not self.pas or not self.writable:
return False
self.pas.seek(address)
self.pas.write(data)
return True
def read_bytes(self, address, bytes = 4):
if not self.pas:
return
self.pas.seek(address)
return self.pas.read(bytes)
|
33afc6bd098d5a8fc68c5af1b41a5789349df3b7
|
b756605d6552079c3f43f08102433ce96b3d21e6
|
/src/pipelinex/extras/ops/numpy_ops.py
|
5d770a0f4a33d3052371799db2ec85f85562937d
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
Minyus/pipelinex
|
c35a7d01c0e1d4add84728c9c3663209bb5f1f7a
|
88ada1393962205e5532094bc0ef2bb4a5b3519b
|
refs/heads/master
| 2022-11-25T19:17:13.957991
| 2022-11-11T13:32:49
| 2022-11-11T13:32:49
| 221,912,857
| 223
| 16
|
NOASSERTION
| 2021-05-02T12:46:32
| 2019-11-15T11:43:39
|
Python
|
UTF-8
|
Python
| false
| false
| 981
|
py
|
numpy_ops.py
|
import numpy as np
_to_channel_last_dict = {3: (-2, -1, -3), 4: (0, -2, -1, -3)}
def to_channel_last_arr(a):
if a.ndim in {3, 4}:
return np.transpose(a, axes=_to_channel_last_dict.get(a.ndim))
else:
return a
_to_channel_first_dict = {3: (-1, -3, -2), 4: (0, -1, -3, -2)}
def to_channel_first_arr(a):
if a.ndim in {3, 4}:
return np.transpose(a, axes=_to_channel_first_dict.get(a.ndim))
else:
return a
def reverse_channel(a, channel_first=False):
if a.ndim == 3:
if channel_first:
return a[::-1, :, :]
else:
return a[:, :, ::-1]
if a.ndim == 4:
if channel_first:
return a[:, ::-1, :, :]
else:
return a[:, :, :, ::-1]
return a
class ReverseChannel:
def __init__(self, channel_first=False):
self.channel_first = channel_first
def __call__(self, a):
return reverse_channel(a, channel_first=self.channel_first)
|
bca320d54e2bd4fe5289519f4691f3e43ecc8e9f
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/braintree/braintree/util/datetime_parser.pyi
|
eb12ffa4057cfcf0aad97c0705388fcf6aa09427
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 35
|
pyi
|
datetime_parser.pyi
|
def parse_datetime(timestamp): ...
|
6690272ef9b56a4e87760e67cbce149156c403eb
|
afa75947ca301d55865342e09ad58ab128c5b1e8
|
/praw/models/reddit/draft.py
|
2872c803128c0510c4e8b8dd61942fc380e20168
|
[
"GPL-3.0-only",
"BSD-2-Clause"
] |
permissive
|
praw-dev/praw
|
ab6a6f188d22922618406360143b12c3de3e90df
|
f1d5506b7a3df240f748e1b7749fd5636aa67b32
|
refs/heads/master
| 2023-09-01T06:30:03.060189
| 2023-09-01T01:54:01
| 2023-09-01T01:54:01
| 847,957
| 2,825
| 646
|
BSD-2-Clause
| 2023-09-11T17:52:22
| 2010-08-19T04:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 11,734
|
py
|
draft.py
|
"""Provide the draft class."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from ...const import API_PATH
from ...exceptions import ClientException
from .base import RedditBase
from .subreddit import Subreddit
from .user_subreddit import UserSubreddit
if TYPE_CHECKING: # pragma: no cover
import praw
class Draft(RedditBase):
"""A class that represents a Reddit submission draft.
.. include:: ../../typical_attributes.rst
========================== ======================================================
Attribute Description
========================== ======================================================
``link_flair_template_id`` The link flair's ID.
``link_flair_text`` The link flair's text content, or ``None`` if not
flaired.
``modified`` Time the submission draft was modified, represented in
`Unix Time`_.
``original_content`` Whether the submission draft will be set as original
content.
``selftext`` The submission draft's selftext. ``None`` if a link
submission draft.
``spoiler`` Whether the submission will be marked as a spoiler.
``subreddit`` Provides an instance of :class:`.Subreddit` or
:class:`.UserSubreddit` (if set).
``title`` The title of the submission draft.
``url`` The URL the submission draft links to.
========================== ======================================================
.. _unix time: https://en.wikipedia.org/wiki/Unix_time
"""
STR_FIELD = "id"
@classmethod
def _prepare_data(
cls,
*,
flair_id: str | None = None,
flair_text: str | None = None,
is_public_link: bool | None = None,
nsfw: bool | None = None,
original_content: bool | None = None,
selftext: str | None = None,
send_replies: bool | None = None,
spoiler: bool | None = None,
subreddit: praw.models.Subreddit | praw.models.UserSubreddit | None = None,
title: str | None = None,
url: str | None = None,
**draft_kwargs: Any,
) -> dict[str, Any]:
data = {
"body": selftext or url,
"flair_id": flair_id,
"flair_text": flair_text,
"is_public_link": is_public_link,
"kind": "markdown" if selftext is not None else "link",
"nsfw": nsfw,
"original_content": original_content,
"send_replies": send_replies,
"spoiler": spoiler,
"title": title,
}
if subreddit:
data.update(
{
"subreddit": subreddit.fullname,
"target": "profile"
if subreddit.display_name.startswith("u_")
else "subreddit",
}
)
data.update(draft_kwargs)
return data
def __init__(
self,
reddit: praw.Reddit,
id: str | None = None, # pylint: disable=redefined-builtin
_data: dict[str, Any] = None,
):
"""Initialize a :class:`.Draft` instance."""
if (id, _data).count(None) != 1:
msg = "Exactly one of 'id' or '_data' must be provided."
raise TypeError(msg)
fetched = False
if id:
self.id = id
elif len(_data) > 1:
if _data["kind"] == "markdown":
_data["selftext"] = _data.pop("body")
elif _data["kind"] == "link":
_data["url"] = _data.pop("body")
fetched = True
super().__init__(reddit, _data=_data, _fetched=fetched)
def __repr__(self) -> str:
"""Return an object initialization representation of the instance."""
if self._fetched:
subreddit = (
f" subreddit={self.subreddit.display_name!r}" if self.subreddit else ""
)
title = f" title={self.title!r}" if self.title else ""
return f"{self.__class__.__name__}(id={self.id!r}{subreddit}{title})"
return f"{self.__class__.__name__}(id={self.id!r})"
def _fetch(self):
for draft in self._reddit.drafts():
if draft.id == self.id:
self.__dict__.update(draft.__dict__)
self._fetched = True
return
msg = (
f"The currently authenticated user not have a draft with an ID of {self.id}"
)
raise ClientException(msg)
def delete(self):
"""Delete the :class:`.Draft`.
Example usage:
.. code-block:: python
draft = reddit.drafts("124862bc-e1e9-11eb-aa4f-e68667a77cbb")
draft.delete()
"""
self._reddit.delete(API_PATH["draft"], params={"draft_id": self.id})
def submit(
self,
*,
flair_id: str | None = None,
flair_text: str | None = None,
nsfw: bool | None = None,
selftext: str | None = None,
spoiler: bool | None = None,
subreddit: str
| praw.models.Subreddit
| praw.models.UserSubreddit
| None = None,
title: str | None = None,
url: str | None = None,
**submit_kwargs: Any,
) -> praw.models.Submission:
"""Submit a draft.
:param flair_id: The flair template to select (default: ``None``).
:param flair_text: If the template's ``flair_text_editable`` value is ``True``,
this value will set a custom text (default: ``None``). ``flair_id`` is
required when ``flair_text`` is provided.
:param nsfw: Whether or not the submission should be marked NSFW (default:
``None``).
:param selftext: The Markdown formatted content for a ``text`` submission. Use
an empty string, ``""``, to make a title-only submission (default:
``None``).
:param spoiler: Whether or not the submission should be marked as a spoiler
(default: ``None``).
:param subreddit: The subreddit to submit the draft to. This accepts a subreddit
display name, :class:`.Subreddit` object, or :class:`.UserSubreddit` object.
:param title: The title of the submission (default: ``None``).
:param url: The URL for a ``link`` submission (default: ``None``).
:returns: A :class:`.Submission` object for the newly created submission.
.. note::
Parameters set here will override their respective :class:`.Draft`
attributes.
Additional keyword arguments are passed to the :meth:`.Subreddit.submit` method.
For example, to submit a draft as is:
.. code-block:: python
draft = reddit.drafts("5f87d55c-e4fb-11eb-8965-6aeb41b0880e")
submission = draft.submit()
For example, to submit a draft but use a different title than what is set:
.. code-block:: python
draft = reddit.drafts("5f87d55c-e4fb-11eb-8965-6aeb41b0880e")
submission = draft.submit(title="New Title")
.. seealso::
- :meth:`~.Subreddit.submit` to submit url posts and selftexts
- :meth:`~.Subreddit.submit_gallery`. to submit more than one image in the
same post
- :meth:`~.Subreddit.submit_image` to submit images
- :meth:`~.Subreddit.submit_poll` to submit polls
- :meth:`~.Subreddit.submit_video` to submit videos and videogifs
"""
submit_kwargs["draft_id"] = self.id
if not (self.subreddit or subreddit):
msg = "'subreddit' must be set on the Draft instance or passed as a keyword argument."
raise ValueError(msg)
for key, attribute in [
("flair_id", flair_id),
("flair_text", flair_text),
("nsfw", nsfw),
("selftext", selftext),
("spoiler", spoiler),
("title", title),
("url", url),
]:
value = attribute or getattr(self, key, None)
if value is not None:
submit_kwargs[key] = value
if isinstance(subreddit, str):
_subreddit = self._reddit.subreddit(subreddit)
elif isinstance(subreddit, (Subreddit, UserSubreddit)):
_subreddit = subreddit
else:
_subreddit = self.subreddit
return _subreddit.submit(**submit_kwargs)
def update(
self,
*,
flair_id: str | None = None,
flair_text: str | None = None,
is_public_link: bool | None = None,
nsfw: bool | None = None,
original_content: bool | None = None,
selftext: str | None = None,
send_replies: bool | None = None,
spoiler: bool | None = None,
subreddit: str
| praw.models.Subreddit
| praw.models.UserSubreddit
| None = None,
title: str | None = None,
url: str | None = None,
**draft_kwargs: Any,
):
"""Update the :class:`.Draft`.
.. note::
Only provided values will be updated.
:param flair_id: The flair template to select.
:param flair_text: If the template's ``flair_text_editable`` value is ``True``,
this value will set a custom text. ``flair_id`` is required when
``flair_text`` is provided.
:param is_public_link: Whether to enable public viewing of the draft before it
is submitted.
:param nsfw: Whether the draft should be marked NSFW.
:param original_content: Whether the submission should be marked as original
content.
:param selftext: The Markdown formatted content for a text submission draft. Use
``None`` to make a title-only submission draft. ``selftext`` can not be
provided if ``url`` is provided.
:param send_replies: When ``True``, messages will be sent to the submission
author when comments are made to the submission.
:param spoiler: Whether the submission should be marked as a spoiler.
:param subreddit: The subreddit to create the draft for. This accepts a
subreddit display name, :class:`.Subreddit` object, or
:class:`.UserSubreddit` object.
:param title: The title of the draft.
:param url: The URL for a ``link`` submission draft. ``url`` can not be provided
if ``selftext`` is provided.
Additional keyword arguments can be provided to handle new parameters as Reddit
introduces them.
For example, to update the title of a draft do:
.. code-block:: python
draft = reddit.drafts("5f87d55c-e4fb-11eb-8965-6aeb41b0880e")
draft.update(title="New title")
"""
if isinstance(subreddit, str):
subreddit = self._reddit.subreddit(subreddit)
data = self._prepare_data(
flair_id=flair_id,
flair_text=flair_text,
is_public_link=is_public_link,
nsfw=nsfw,
original_content=original_content,
selftext=selftext,
send_replies=send_replies,
spoiler=spoiler,
subreddit=subreddit,
title=title,
url=url,
**draft_kwargs,
)
data["id"] = self.id
_new_draft = self._reddit.put(API_PATH["draft"], data=data)
_new_draft._fetch()
self.__dict__.update(_new_draft.__dict__)
|
c787b9f5c92ae52c21283233fe4beb38d6a86100
|
64a000d42240493e054c39528efca48aca19998a
|
/distributed/versions.py
|
05325dae2176c80ac495784ad64a27d549ce5cfb
|
[
"BSD-3-Clause"
] |
permissive
|
dask/distributed
|
1ee853c7d1273a421601247954d242d21330bd3d
|
ec40daa12dc690e4e33dbe12dc40488633e0f67b
|
refs/heads/main
| 2023-08-30T12:17:49.467926
| 2023-08-29T16:51:52
| 2023-08-29T16:51:52
| 42,408,083
| 1,534
| 982
|
BSD-3-Clause
| 2023-09-14T17:21:47
| 2015-09-13T18:42:29
|
Python
|
UTF-8
|
Python
| false
| false
| 5,165
|
py
|
versions.py
|
""" utilities for package version introspection """
from __future__ import annotations
import importlib
import os
import platform
import struct
import sys
from collections.abc import Callable, Iterable
from itertools import chain
from types import ModuleType
from typing import Any
from packaging.requirements import Requirement
BOKEH_REQUIREMENT = Requirement("bokeh>=2.4.2,!=3.0.*")
required_packages = [
("dask", lambda p: p.__version__),
("distributed", lambda p: p.__version__),
("msgpack", lambda p: ".".join([str(v) for v in p.version])),
("cloudpickle", lambda p: p.__version__),
("tornado", lambda p: p.version),
("toolz", lambda p: p.__version__),
]
optional_packages = [
("numpy", lambda p: p.__version__),
("pandas", lambda p: p.__version__),
("lz4", lambda p: p.__version__),
]
# only these scheduler packages will be checked for version mismatch
scheduler_relevant_packages = {pkg for pkg, _ in required_packages} | {
"lz4",
"python",
} - {"msgpack"}
# notes to be displayed for mismatch packages
notes_mismatch_package: dict[str, str] = {}
def get_versions(
packages: Iterable[str | tuple[str, Callable[[ModuleType], str | None]]]
| None = None
) -> dict[str, dict[str, Any]]:
"""Return basic information on our software installation, and our installed versions
of packages
"""
return {
"host": get_system_info(),
"packages": get_package_info(
chain(required_packages, optional_packages, packages or [])
),
}
def get_system_info() -> dict[str, Any]:
uname = platform.uname()
return {
"python": "%d.%d.%d.%s.%s" % sys.version_info,
"python-bits": struct.calcsize("P") * 8,
"OS": uname.system,
"OS-release": uname.release,
"machine": uname.machine,
"processor": uname.processor,
"byteorder": sys.byteorder,
"LC_ALL": os.environ.get("LC_ALL", "None"),
"LANG": os.environ.get("LANG", "None"),
}
def version_of_package(pkg: ModuleType) -> str | None:
"""Try a variety of common ways to get the version of a package"""
from contextlib import suppress
with suppress(AttributeError):
return pkg.__version__
with suppress(AttributeError):
return str(pkg.version)
with suppress(AttributeError):
return ".".join(map(str, pkg.version_info))
return None
def get_package_info(
pkgs: Iterable[str | tuple[str, Callable[[ModuleType], str | None] | None]]
) -> dict[str, str | None]:
"""get package versions for the passed required & optional packages"""
pversions: dict[str, str | None] = {"python": ".".join(map(str, sys.version_info))}
for pkg in pkgs:
if isinstance(pkg, (tuple, list)):
modname, ver_f = pkg
if ver_f is None:
ver_f = version_of_package
else:
modname = pkg
ver_f = version_of_package
try:
mod = importlib.import_module(modname)
pversions[modname] = ver_f(mod)
except Exception:
pversions[modname] = None
return pversions
def error_message(scheduler, workers, source, source_name="Client"):
from distributed.utils import asciitable
source = source.get("packages") if source else "UNKNOWN"
scheduler = scheduler.get("packages") if scheduler else "UNKNOWN"
workers = {k: v.get("packages") if v else "UNKNOWN" for k, v in workers.items()}
packages = set()
packages.update(source)
packages.update(scheduler)
for worker in workers:
packages.update(workers.get(worker))
errs = []
notes = []
for pkg in sorted(packages):
versions = set()
scheduler_version = (
scheduler.get(pkg, "MISSING") if isinstance(scheduler, dict) else scheduler
)
if pkg in scheduler_relevant_packages:
versions.add(scheduler_version)
source_version = (
source.get(pkg, "MISSING") if isinstance(source, dict) else source
)
versions.add(source_version)
worker_versions = {
workers[w].get(pkg, "MISSING")
if isinstance(workers[w], dict)
else workers[w]
for w in workers
}
versions |= worker_versions
if len(versions) <= 1:
continue
if len(worker_versions) == 1:
worker_versions = list(worker_versions)[0]
elif len(worker_versions) == 0:
worker_versions = None
errs.append((pkg, source_version, scheduler_version, worker_versions))
if pkg in notes_mismatch_package.keys():
notes.append(f"- {pkg}: {notes_mismatch_package[pkg]}")
out = {"warning": "", "error": ""}
if errs:
err_table = asciitable(["Package", source_name, "Scheduler", "Workers"], errs)
err_msg = f"Mismatched versions found\n\n{err_table}"
if notes:
err_msg += "\nNotes: \n{}".format("\n".join(notes))
out["warning"] += err_msg
return out
class VersionMismatchWarning(Warning):
"""Indicates version mismatch between nodes"""
|
a859fa8b98e420ee3a7ee28848aaae686564c4ed
|
80f94bea418d7956df1ba19d4d6a1d7715a94ade
|
/test/unit/data/model/migrations/testing_utils/versions/db1/2e8a580bc79a_drop_sqlachemymigrate_table.py
|
d80b518976b8edf338ab65b3d5a62fed2411caf6
|
[
"CC-BY-2.5",
"MIT",
"CC-BY-3.0",
"AFL-3.0"
] |
permissive
|
galaxyproject/galaxy
|
5748409eb6693b1611f289d164f85e20c3237495
|
b9ae7a16ba0465995e880ae9701b7e87226b9bab
|
refs/heads/dev
| 2023-08-28T22:35:51.248138
| 2023-08-26T08:02:33
| 2023-08-26T08:02:33
| 31,211,061
| 1,277
| 1,137
|
NOASSERTION
| 2023-09-14T19:39:01
| 2015-02-23T14:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 916
|
py
|
2e8a580bc79a_drop_sqlachemymigrate_table.py
|
"""drop sqlachemymigrate table
Revision ID: 2e8a580bc79a
Revises: 62695fac6cc0
Create Date: 2021-11-05 16:29:19.123118
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "2e8a580bc79a"
down_revision = "62695fac6cc0"
branch_labels = None
depends_on = None
def upgrade():
# This table exists in both schemas: gxy and tsi. With a combined database,
# this migration will be applied twice to the same database, so we ignore
# the error that happens on the second run when the table has been dropped.
try:
op.drop_table("migrate_version", must_exist=True)
except sa.exc.InvalidRequestError:
pass
def downgrade():
op.create_table(
"migrate_version",
sa.Column("repository_id", sa.String(250), primary_key=True),
sa.Column("repository_path", sa.Text),
sa.Column("version", sa.Integer),
)
|
80aa73d4de5081ed3b7e483c69b58cd6d93f54b9
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-Intents/PyObjCTest/test_inpersonhandle.py
|
ff4b7da73716abd2b02175c01604be9868ad54e1
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 456
|
py
|
test_inpersonhandle.py
|
from PyObjCTools.TestSupport import TestCase, min_os_level
import Intents
class TestINPersonHandle(TestCase):
def test_enum_types(self):
self.assertIsEnumType(Intents.INPersonHandleType)
@min_os_level("10.12")
def testConstants(self):
self.assertEqual(Intents.INPersonHandleTypeUnknown, 0)
self.assertEqual(Intents.INPersonHandleTypeEmailAddress, 1)
self.assertEqual(Intents.INPersonHandleTypePhoneNumber, 2)
|
b30a4d6a1d51bf88305c130eea39a1375884c1c2
|
9241974e50a37303163303660f66f6b77cc5632a
|
/pre_commit/staged_files_only.py
|
8812356560385b4b706b8ac196c884f74f43791b
|
[
"MIT"
] |
permissive
|
pre-commit/pre-commit
|
c138e1bd540c7515c10f0d61f3d7a4ef66d5bf0a
|
a1f1d1915646865be2fe84d04633ba964feb0ba0
|
refs/heads/main
| 2023-08-19T08:07:21.342724
| 2023-08-15T14:19:29
| 2023-08-15T14:19:29
| 17,689,377
| 10,804
| 912
|
MIT
| 2023-09-12T00:06:21
| 2014-03-13T00:39:38
|
Python
|
UTF-8
|
Python
| false
| false
| 3,922
|
py
|
staged_files_only.py
|
from __future__ import annotations
import contextlib
import logging
import os.path
import time
from typing import Generator
from pre_commit import git
from pre_commit.errors import FatalError
from pre_commit.util import CalledProcessError
from pre_commit.util import cmd_output
from pre_commit.util import cmd_output_b
from pre_commit.xargs import xargs
logger = logging.getLogger('pre_commit')
# without forcing submodule.recurse=0, changes in nested submodules will be
# discarded if `submodule.recurse=1` is configured
# we choose this instead of `--no-recurse-submodules` because it works on
# versions of git before that option was added to `git checkout`
_CHECKOUT_CMD = ('git', '-c', 'submodule.recurse=0', 'checkout', '--', '.')
def _git_apply(patch: str) -> None:
args = ('apply', '--whitespace=nowarn', patch)
try:
cmd_output_b('git', *args)
except CalledProcessError:
# Retry with autocrlf=false -- see #570
cmd_output_b('git', '-c', 'core.autocrlf=false', *args)
@contextlib.contextmanager
def _intent_to_add_cleared() -> Generator[None, None, None]:
intent_to_add = git.intent_to_add_files()
if intent_to_add:
logger.warning('Unstaged intent-to-add files detected.')
xargs(('git', 'rm', '--cached', '--'), intent_to_add)
try:
yield
finally:
xargs(('git', 'add', '--intent-to-add', '--'), intent_to_add)
else:
yield
@contextlib.contextmanager
def _unstaged_changes_cleared(patch_dir: str) -> Generator[None, None, None]:
tree = cmd_output('git', 'write-tree')[1].strip()
diff_cmd = (
'git', 'diff-index', '--ignore-submodules', '--binary',
'--exit-code', '--no-color', '--no-ext-diff', tree, '--',
)
retcode, diff_stdout, diff_stderr = cmd_output_b(*diff_cmd, check=False)
if retcode == 0:
# There weren't any staged files so we don't need to do anything
# special
yield
elif retcode == 1 and diff_stdout.strip():
patch_filename = f'patch{int(time.time())}-{os.getpid()}'
patch_filename = os.path.join(patch_dir, patch_filename)
logger.warning('Unstaged files detected.')
logger.info(f'Stashing unstaged files to {patch_filename}.')
# Save the current unstaged changes as a patch
os.makedirs(patch_dir, exist_ok=True)
with open(patch_filename, 'wb') as patch_file:
patch_file.write(diff_stdout)
# prevent recursive post-checkout hooks (#1418)
no_checkout_env = dict(os.environ, _PRE_COMMIT_SKIP_POST_CHECKOUT='1')
try:
cmd_output_b(*_CHECKOUT_CMD, env=no_checkout_env)
yield
finally:
# Try to apply the patch we saved
try:
_git_apply(patch_filename)
except CalledProcessError:
logger.warning(
'Stashed changes conflicted with hook auto-fixes... '
'Rolling back fixes...',
)
# We failed to apply the patch, presumably due to fixes made
# by hooks.
# Roll back the changes made by hooks.
cmd_output_b(*_CHECKOUT_CMD, env=no_checkout_env)
_git_apply(patch_filename)
logger.info(f'Restored changes from {patch_filename}.')
else: # pragma: win32 no cover
# some error occurred while requesting the diff
e = CalledProcessError(retcode, diff_cmd, b'', diff_stderr)
raise FatalError(
f'pre-commit failed to diff -- perhaps due to permissions?\n\n{e}',
)
@contextlib.contextmanager
def staged_files_only(patch_dir: str) -> Generator[None, None, None]:
"""Clear any unstaged changes from the git working directory inside this
context.
"""
with _intent_to_add_cleared(), _unstaged_changes_cleared(patch_dir):
yield
|
4d80b13ca5339af09d834a6301252dffcaefc0e1
|
e9869359c839c8c175ae7877bc35dcfdfe4058f8
|
/kornia/feature/disk/__init__.py
|
396caa4755adb5c41f952d76e0c2f5eb23978d41
|
[
"Apache-2.0"
] |
permissive
|
kornia/kornia
|
80f93eae6a70b8bc0c9784f92a842ab9a6ab54ae
|
1e0f8baa7318c05b17ea6dbb48605691bca8972f
|
refs/heads/master
| 2023-08-31T06:32:45.960859
| 2023-08-30T21:59:41
| 2023-08-30T21:59:41
| 145,693,916
| 7,351
| 833
|
Apache-2.0
| 2023-09-12T21:59:29
| 2018-08-22T10:31:37
|
Python
|
UTF-8
|
Python
| false
| false
| 57
|
py
|
__init__.py
|
from .disk import DISK
from .structs import DISKFeatures
|
676b7997c153a6d763eda671713a7c8f47c414e2
|
eb7afa613940f5a3f202352a94dd996edcb6bed5
|
/boto3_type_annotations/boto3_type_annotations/xray/client.py
|
cb6e6e74dce0bd41f9245add32734023373fdec6
|
[
"MIT"
] |
permissive
|
alliefitter/boto3_type_annotations
|
e4da614e27a1d2ad3c9c653c50b8e30108180da5
|
2a88aa562b1aee6e8a6cc30402980884b3707fbb
|
refs/heads/master
| 2020-04-05T22:05:12.689913
| 2019-11-28T03:32:13
| 2019-11-28T03:32:13
| 157,244,330
| 131
| 11
|
MIT
| 2023-04-21T17:17:03
| 2018-11-12T16:38:57
|
Python
|
UTF-8
|
Python
| false
| false
| 2,861
|
py
|
client.py
|
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from datetime import datetime
from botocore.waiter import Waiter
from typing import List
class Client(BaseClient):
def batch_get_traces(self, TraceIds: List, NextToken: str = None) -> Dict:
pass
def can_paginate(self, operation_name: str = None):
pass
def create_group(self, GroupName: str, FilterExpression: str = None) -> Dict:
pass
def create_sampling_rule(self, SamplingRule: Dict) -> Dict:
pass
def delete_group(self, GroupName: str = None, GroupARN: str = None) -> Dict:
pass
def delete_sampling_rule(self, RuleName: str = None, RuleARN: str = None) -> Dict:
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_encryption_config(self) -> Dict:
pass
def get_group(self, GroupName: str = None, GroupARN: str = None) -> Dict:
pass
def get_groups(self, NextToken: str = None) -> Dict:
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_sampling_rules(self, NextToken: str = None) -> Dict:
pass
def get_sampling_statistic_summaries(self, NextToken: str = None) -> Dict:
pass
def get_sampling_targets(self, SamplingStatisticsDocuments: List) -> Dict:
pass
def get_service_graph(self, StartTime: datetime, EndTime: datetime, GroupName: str = None, GroupARN: str = None, NextToken: str = None) -> Dict:
pass
def get_time_series_service_statistics(self, StartTime: datetime, EndTime: datetime, GroupName: str = None, GroupARN: str = None, EntitySelectorExpression: str = None, Period: int = None, NextToken: str = None) -> Dict:
pass
def get_trace_graph(self, TraceIds: List, NextToken: str = None) -> Dict:
pass
def get_trace_summaries(self, StartTime: datetime, EndTime: datetime, TimeRangeType: str = None, Sampling: bool = None, SamplingStrategy: Dict = None, FilterExpression: str = None, NextToken: str = None) -> Dict:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def put_encryption_config(self, Type: str, KeyId: str = None) -> Dict:
pass
def put_telemetry_records(self, TelemetryRecords: List, EC2InstanceId: str = None, Hostname: str = None, ResourceARN: str = None) -> Dict:
pass
def put_trace_segments(self, TraceSegmentDocuments: List) -> Dict:
pass
def update_group(self, GroupName: str = None, GroupARN: str = None, FilterExpression: str = None) -> Dict:
pass
def update_sampling_rule(self, SamplingRuleUpdate: Dict) -> Dict:
pass
|
d307f39a6f3b3b50b33b052f5afa3a5e0689db07
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/SE-Net/modelarts/start_train.py
|
55d61040bc72ce18118d388fbcc9569568c2eda9
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 10,825
|
py
|
start_train.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
This is the boot file for ModelArts platform.
Firstly, the train datasets are copied from obs to ModelArts.
Then, the string of train shell command is concated and using 'os.system()' to execute
"""
import os
import ast
import argparse
import glob
import datetime
import numpy as np
import moxing as mox
from mindspore import Tensor, export, context
from mindspore.nn.optim.momentum import Momentum
from mindspore.train.model import Model
from mindspore.context import ParallelMode
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.communication.management import init, get_rank, get_group_size
from mindspore.common import set_seed
from mindspore.parallel import set_algo_parameters
import mindspore.nn as nn
import mindspore.common.initializer as weight_init
from src.lr_generator import get_lr
from src.CrossEntropySmooth import CrossEntropySmooth
print(os.system('env'))
def obs_data2modelarts(FLAGS):
"""
Copy train data from obs to modelarts by using moxing api.
"""
start = datetime.datetime.now()
print("===>>>Copy files from obs:{} to modelarts dir:{}".format(FLAGS.data_url, FLAGS.modelarts_data_dir))
mox.file.copy_parallel(src_url=FLAGS.data_url, dst_url=FLAGS.modelarts_data_dir)
end = datetime.datetime.now()
print("===>>>Copy from obs to modelarts, time use:{}(s)".format((end - start).seconds))
files = os.listdir(FLAGS.modelarts_data_dir)
print("===>>>Files:", files)
def modelarts_result2obs(FLAGS):
"""
Copy debug data from modelarts to obs.
According to the switch flags, the debug data may contains auto tune repository,
dump data for precision comparison, even the computation graph and profiling data.
"""
mox.file.copy_parallel(src_url=FLAGS.modelarts_result_dir, dst_url=FLAGS.train_url)
print("===>>>Copy Event or Checkpoint from modelarts dir:{} to obs:{}".format(FLAGS.modelarts_result_dir,
FLAGS.train_url))
files = os.listdir()
print("===>>>current Files:", files)
mox.file.copy(src_url='SE-net.air', dst_url=FLAGS.train_url+'/SE-net.air')
def export_AIR(args_opt):
"""start modelarts export"""
ckpt_list = glob.glob(args_opt.modelarts_result_dir + "/resnet*.ckpt")
if not ckpt_list:
print("ckpt file not generated.")
ckpt_list.sort(key=os.path.getmtime)
ckpt_model = ckpt_list[-1]
print("checkpoint path", ckpt_model)
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
if args_opt.device_target == "Ascend":
context.set_context(device_id=args_opt.device_id)
net_ = resnet(args_opt.class_num)
param_dict_ = load_checkpoint(ckpt_model)
load_param_into_net(net_, param_dict_)
input_arr = Tensor(np.zeros([1, 3, 224, 224], np.float32))
export(net, input_arr, file_name='SE-net', file_format='AIR')
if __name__ == '__main__':
## Note: the code dir is not the same as work dir on ModelArts Platform!!!
code_dir = os.path.dirname(__file__)
work_dir = os.getcwd()
print("===>>>code_dir:{}, work_dir:{}".format(code_dir, work_dir))
parser = argparse.ArgumentParser()
parser.add_argument("--train_url", type=str, default="./output")
parser.add_argument("--data_url", type=str, default="./dataset")
parser.add_argument("--modelarts_data_dir", type=str, default="/cache/dataset")
parser.add_argument("--modelarts_result_dir", type=str, default="/cache/result")#modelarts train result: /cache/result
parser.add_argument('--net', type=str, default=None, help='Resnet Model, either resnet50 or resnet101')
parser.add_argument('--dataset', type=str, default="cifar10", help='Dataset, either cifar10 or imagenet2012')
parser.add_argument('--epoch_size', type=int, default=1, help='epoch_size')
parser.add_argument('--pretrain_epoch_size', type=int, default=0, help='pretrain_epoch_size, use with pre_trained')
parser.add_argument('--batch_size', type=int, default=256, help='batch_size')
parser.add_argument('--class_num', type=int, default=1001, help='class_num')
parser.add_argument('--run_distribute', type=ast.literal_eval, default=False, help='Run distribute')
parser.add_argument('--device_num', type=int, default=1, help='Device num.')
parser.add_argument("--device_id", type=int, default=0, help="Device id")
parser.add_argument('--device_target', type=str, default='Ascend', choices=("Ascend", "GPU", "CPU"),
help="Device target, support Ascend, GPU and CPU.")
parser.add_argument('--pre_trained', type=str, default=None, help='Pretrained checkpoint path')
parser.add_argument('--parameter_server', type=ast.literal_eval, default=False, help='Run parameter server train')
args = parser.parse_args()
set_seed(1)
if args.net == "se-resnet50":
from src.resnet import se_resnet50 as resnet
from src.config import config2 as config
from src.dataset import create_dataset2 as create_dataset
## copy dataset from obs to modelarts
obs_data2modelarts(args)
## start train
target = args.device_target
ckpt_save_dir = args.modelarts_result_dir
# init context
context.set_context(mode=context.GRAPH_MODE, device_target=target, save_graphs=False)
if args.parameter_server:
context.set_ps_context(enable_ps=True)
if args.run_distribute:
if target == "Ascend":
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(device_id=device_id)
context.set_auto_parallel_context(device_num=args.device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
set_algo_parameters(elementwise_op_strategy_follow=True)
init()
elif target == "GPU":
init('nccl')
context.reset_auto_parallel_context()
rank = get_rank()
device_num = get_group_size()
context.set_auto_parallel_context(device_num=device_num,
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
if args.net == "se-resnet50":
context.set_auto_parallel_context(all_reduce_fusion_config=[85, 160])
else:
context.set_auto_parallel_context(all_reduce_fusion_config=[180, 313])
# create dataset
dataset = create_dataset(dataset_path=args.modelarts_data_dir, do_train=True, repeat_num=1,
batch_size=args.batch_size, target=target, distribute=args.run_distribute)
step_size = dataset.get_dataset_size()
# define net
net = resnet(class_num=args.class_num)
if args.parameter_server:
net.set_param_ps()
# init weight
if args.pre_trained:
param_dict = load_checkpoint(args.modelarts_data_dir+'/'+args.pre_trained)
load_param_into_net(net, param_dict)
else:
for _, cell in net.cells_and_names():
if isinstance(cell, nn.Conv2d):
cell.weight.set_data(weight_init.initializer(weight_init.XavierUniform(),
cell.weight.shape,
cell.weight.dtype))
if isinstance(cell, nn.Dense):
cell.weight.set_data(weight_init.initializer(weight_init.TruncatedNormal(),
cell.weight.shape,
cell.weight.dtype))
# init lr
if args.net == "se-resnet50":
lr = get_lr(lr_init=config.lr_init, lr_end=config.lr_end, lr_max=config.lr_max,
warmup_epochs=config.warmup_epochs, total_epochs=args.epoch_size, steps_per_epoch=step_size,
lr_decay_mode=config.lr_decay_mode)
lr = Tensor(lr)
# define opt
decayed_params = []
no_decayed_params = []
for param in net.trainable_params():
if 'beta' not in param.name and 'gamma' not in param.name and 'bias' not in param.name:
decayed_params.append(param)
else:
no_decayed_params.append(param)
group_params = [{'params': decayed_params, 'weight_decay': config.weight_decay},
{'params': no_decayed_params},
{'order_params': net.trainable_params()}]
opt = Momentum(group_params, lr, config.momentum, loss_scale=config.loss_scale)
# define loss, model
if target in ["Ascend", "GPU"]:
if args.dataset == "imagenet2012":
if not config.use_label_smooth:
config.label_smooth_factor = 0.0
loss = CrossEntropySmooth(sparse=True, reduction="mean",
smooth_factor=config.label_smooth_factor, num_classes=args.class_num)
loss_scale = FixedLossScaleManager(config.loss_scale, drop_overflow_update=False)
model = Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics={'acc'},
amp_level="O2", keep_batchnorm_fp32=False)
# define callbacks
time_cb = TimeMonitor(data_size=step_size)
loss_cb = LossMonitor()
cb = [time_cb, loss_cb]
if config.save_checkpoint:
config_ck = CheckpointConfig(save_checkpoint_steps=config.save_checkpoint_epochs * step_size,
keep_checkpoint_max=config.keep_checkpoint_max)
ckpt_cb = ModelCheckpoint(prefix="resnet", directory=ckpt_save_dir, config=config_ck)
cb += [ckpt_cb]
# train model
model.train(args.epoch_size - args.pretrain_epoch_size, dataset, callbacks=cb,
sink_size=dataset.get_dataset_size(), dataset_sink_mode=True)
## start export air
export_AIR(args)
## copy result from modelarts to obs
modelarts_result2obs(args)
|
270471499d14f52ae58989aaa374c02576db763e
|
1aa2efeffa14d4e409a2749ebdb09fce460a38f5
|
/poker/scraper/table.py
|
75762b7f844021f571e1ca4aa19af51faec3551e
|
[] |
no_license
|
dickreuter/Poker
|
a87d03a515c88da68d1c125755d14f77bd524f68
|
f472ca29e3e7b2384dd1b9cd13b390ddd869bbbb
|
refs/heads/master
| 2023-08-28T07:21:57.239803
| 2023-08-21T15:33:49
| 2023-08-21T15:33:49
| 55,018,634
| 1,781
| 583
| null | 2023-08-14T23:30:32
| 2016-03-29T23:58:12
|
Python
|
UTF-8
|
Python
| false
| false
| 9,691
|
py
|
table.py
|
import logging
import sys
import time
import numpy as np
from poker.decisionmaker.genetic_algorithm import GeneticAlgorithm
from poker.scraper.table_scraper import TableScraper
from poker.tools.helper import get_config
from poker.tools.vbox_manager import VirtualBoxController
# pylint: disable=no-member,unused-variable,no-self-use
class Table(TableScraper):
# General tools that are used to operate the pokerbot and are valid for all tables
def __init__(self, p, table_dict, gui_signals, game_logger, version, nn_model=None):
self.version = version
self.ip = ''
self.logger = logging.getLogger('table')
self.logger.setLevel(logging.DEBUG)
self.gui_signals = gui_signals
self.game_logger = game_logger
self.nn_model = nn_model
super().__init__(table_dict)
def take_screenshot(self, initial, p):
if initial:
self.gui_signals.signal_status.emit("")
self.gui_signals.signal_progressbar_reset.emit()
if self.gui_signals.exit_thread == True: sys.exit()
if self.gui_signals.pause_thread == True:
while self.gui_signals.pause_thread:
time.sleep(.2)
if self.gui_signals.exit_thread == True: sys.exit()
time.sleep(0.1)
config = get_config()
control = config.config.get('main', 'control')
if control == 'Direct mouse control':
self.take_screenshot2()
self.entireScreenPIL = self.screenshot
else:
try:
vb = VirtualBoxController()
self.entireScreenPIL = vb.get_screenshot_vbox()
self.logger.debug("Screenshot taken from virtual machine")
except:
self.logger.warning("No virtual machine found. Press SETUP to re initialize the VM controller")
# gui_signals.signal_open_setup.emit(p,L)
self.take_screenshot2()
self.entireScreenPIL = self.screenshot
self.gui_signals.signal_status.emit(str(p.current_strategy))
self.gui_signals.signal_progressbar_increase.emit(5)
self.logger.info("Screenshot taken")
return True
def call_genetic_algorithm(self, p):
self.gui_signals.signal_progressbar_increase.emit(5)
self.gui_signals.signal_status.emit("Updating charts and work in background")
n = self.game_logger.get_game_count(p.current_strategy)
lg = int(p.selected_strategy['considerLastGames']) # only consider lg last games to see if there was a loss
f = self.game_logger.get_strategy_return(p.current_strategy, lg)
self.gui_signals.signal_label_number_update.emit('gamenumber', str(int(n)))
total_winnings = self.game_logger.get_strategy_return(p.current_strategy, 9999999)
winnings_per_bb_100 = total_winnings / p.selected_strategy['bigBlind'] / n * 100 if n > 0 else 0
self.logger.info("Total Strategy winnings: %s", total_winnings)
self.logger.info("Winnings in BB per 100 hands: %s", np.round(winnings_per_bb_100, 2))
self.gui_signals.signal_label_number_update.emit('winnings', str(np.round(winnings_per_bb_100, 2)))
self.logger.info("Game #" + str(n) + " - Last " + str(lg) + ": $" + str(f))
if n % int(p.selected_strategy['strategyIterationGames']) == 0 and f < float(
p.selected_strategy['minimumLossForIteration']):
self.gui_signals.signal_status.emit("***Improving current strategy***")
self.logger.info("***Improving current strategy***")
# winsound.Beep(500, 100)
GeneticAlgorithm(True, self.game_logger)
p.read_strategy()
else:
pass
# self.logger.debug("Criteria not met for running genetic algorithm. Recommendation would be as follows:")
# if n % 50 == 0: GeneticAlgorithm(False, logger, L)
def crop_image(self, original, left, top, right, bottom):
# original.show()
width, height = original.size # Get dimensions
cropped_example = original.crop((left, top, right, bottom))
# cropped_example.show()
return cropped_example
def get_utg_from_abs_pos(self, abs_pos, dealer_pos):
utg_pos = (abs_pos - dealer_pos + 4) % self.total_players
return utg_pos
def get_abs_from_utg_pos(self, utg_pos, dealer_pos):
abs_pos = (utg_pos + dealer_pos - 4) % self.total_players
return abs_pos
def get_raisers_and_callers(self, p, reference_pot):
first_raiser = np.nan
second_raiser = np.nan
first_caller = np.nan
for n in range(5): # n is absolute position of other player, 0 is player after bot
i = (
self.dealer_position + n + 3 - 2) % 5 # less myself as 0 is now first other player to my left and no longer myself
self.logger.debug("Go through pots to find raiser abs: {0} {1}".format(i, self.other_players[i]['pot']))
if self.other_players[i]['pot'] != '': # check if not empty (otherwise can't convert string)
if self.other_players[i]['pot'] > reference_pot:
# reference pot is bb for first round and bot for second round
if np.isnan(first_raiser):
first_raiser = int(i)
first_raiser_pot = self.other_players[i]['pot']
else:
if self.other_players[i]['pot'] > first_raiser_pot:
second_raiser = int(i)
first_raiser_utg = self.get_utg_from_abs_pos(first_raiser, self.dealer_position)
highest_raiser = np.nanmax([first_raiser, second_raiser])
second_raiser_utg = self.get_utg_from_abs_pos(second_raiser, self.dealer_position)
first_possible_caller = int(self.big_blind_position_abs_op + 1) if np.isnan(highest_raiser) else int(
highest_raiser + 1)
self.logger.debug("First possible potential caller is: " + str(first_possible_caller))
# get first caller after raise in preflop
for n in range(first_possible_caller, 5): # n is absolute position of other player, 0 is player after bot
self.logger.debug(
"Go through pots to find caller abs: " + str(n) + ": " + str(self.other_players[n]['pot']))
if self.other_players[n]['pot'] != '': # check if not empty (otherwise can't convert string)
if (self.other_players[n]['pot'] == float(
p.selected_strategy['bigBlind']) and not n == self.big_blind_position_abs_op) or \
self.other_players[n]['pot'] > float(p.selected_strategy['bigBlind']):
first_caller = int(n)
break
first_caller_utg = self.get_utg_from_abs_pos(first_caller, self.dealer_position)
# check for callers between bot and first raiser. If so, first raiser becomes second raiser and caller becomes first raiser
first_possible_caller = 0
if self.position_utg_plus == 3: first_possible_caller = 1
if self.position_utg_plus == 4: first_possible_caller = 2
if not np.isnan(first_raiser):
for n in range(first_possible_caller, first_raiser):
if self.other_players[n]['status'] == 1 and \
not (self.other_players[n]['utg_position'] == 5 and p.selected_strategy['bigBlind']) and \
not (self.other_players[n]['utg_position'] == 4 and p.selected_strategy['smallBlind']) and \
not (self.other_players[n]['pot'] == ''):
second_raiser = first_raiser
first_raiser = n
first_raiser_utg = self.get_utg_from_abs_pos(first_raiser, self.dealer_position)
second_raiser_utg = self.get_utg_from_abs_pos(second_raiser, self.dealer_position)
break
self.logger.debug("First raiser abs: " + str(first_raiser))
self.logger.info("First raiser utg+" + str(first_raiser_utg))
self.logger.debug("Second raiser abs: " + str(second_raiser))
self.logger.info("Highest raiser abs: " + str(highest_raiser))
self.logger.debug("First caller abs: " + str(first_caller))
self.logger.info("First caller utg+" + str(first_caller_utg))
return first_raiser, second_raiser, first_caller, first_raiser_utg, second_raiser_utg, first_caller_utg
def derive_preflop_sheet_name(self, t, h, first_raiser_utg, first_caller_utg, second_raiser_utg):
first_raiser_string = 'R' if not np.isnan(first_raiser_utg) else ''
first_raiser_number = str(first_raiser_utg + 1) if first_raiser_string != '' else ''
second_raiser_string = 'R' if not np.isnan(second_raiser_utg) else ''
second_raiser_number = str(second_raiser_utg + 1) if second_raiser_string != '' else ''
first_caller_string = 'C' if not np.isnan(first_caller_utg) else ''
first_caller_number = str(first_caller_utg + 1) if first_caller_string != '' else ''
round_string = '2' if h.round_number == 1 else ''
sheet_name = str(t.position_utg_plus + 1) + \
round_string + \
str(first_raiser_string) + str(first_raiser_number) + \
str(second_raiser_string) + str(second_raiser_number) + \
str(first_caller_string) + str(first_caller_number)
if h.round_number == 2:
sheet_name = 'R1R2R1A2'
self.preflop_sheet_name = sheet_name
return self.preflop_sheet_name
|
f112d6d2cee84c76654108eeba56ee93f20ee3e5
|
2bd7a9bd2aa6ea6ef41745af4388d607ade5800e
|
/pySOT/controller/controller.py
|
9aeb2837649c24fdf9d1d2198b87435a5aca226a
|
[
"BSD-3-Clause"
] |
permissive
|
dme65/pySOT
|
c5071eca7bc35d90d590b517bbabca13cb3e27fd
|
c8f04fd4ed30d49bb61adb008134741319b512a4
|
refs/heads/master
| 2021-11-06T02:57:12.418452
| 2021-09-07T15:33:47
| 2021-10-27T17:48:18
| 36,836,292
| 208
| 52
|
NOASSERTION
| 2021-09-07T17:49:25
| 2015-06-03T23:27:06
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,857
|
py
|
controller.py
|
"""
.. module:: controller
:synopsis: pySOT controllers
.. moduleauthor:: David Eriksson <dme65@cornell.edu>,
:Module: controller
:Author: David Eriksson <dme65@cornell.edu>,
"""
import copy
import os.path
import dill
class CheckpointController(object):
"""Checkpoint controller
Controller that uses dill to take snapshots of the strategy each time
an evaluation is completed, killed, or the run is terminated. We assume
that the strategy can be pickled, or this won't work. We currently do not
respect potential termination callbacks and failed evaluation callbacks.
The strategy needs to implement a resume method that is called when a run
is resumed. The strategy object can assume that all pending evaluations
have been killed and that their respective callbacks won't be executed
:param controller: POAP controller
:type controller: Controller
:param fname: Filename for checkpoint file (file cannot exist for new run)
:type fname: string
:ivar controller: POAP controller
:ivar fname: Filename for snapshot
"""
def __init__(self, controller, fname="checkpoint.pysot"):
controller.add_feval_callback(self._add_on_update)
controller.add_feval_callback(self.on_new_feval)
controller.add_term_callback(self.on_terminate)
self.controller = controller
self.fname = fname
def _add_on_update(self, record):
"""Internal handler -- add on_update callback to all new fevals.
:param record: Evaluation record
:type record: EvalRecord
"""
record.add_callback(self.on_update)
def on_new_feval(self, record):
"""Handle new function evaluation request.
:param record: Evaluation record
:type record: EvalRecord
"""
pass
def _save(self):
"""Save the strategy by calling the save method."""
self.controller.strategy.save(self.fname)
def resume(self):
"""Resume an optimization run.
:return: The record corresponding to the best solution
:rtype: EvalRecord
"""
if not os.path.isfile(self.fname):
raise IOError("Checkpoint file does not exist")
with open(self.fname, "rb") as input:
self.controller.strategy = dill.load(input)
fevals = copy.copy(self.controller.strategy.fevals)
self.controller.fevals = fevals
self.controller.strategy.resume()
return self.controller.run()
def on_update(self, record):
"""Handle feval update.
:param record: Evaluation record
:type record: EvalRecord
"""
if record.is_completed:
self.on_complete(record)
elif record.is_killed:
self.on_kill(record)
elif record.is_cancelled:
self.on_cancel(record)
def on_complete(self, record):
"""Handle feval completion.
:param record: Evaluation record
:type record: EvalRecord
"""
self._save()
def on_kill(self, record):
""""Handle record killed.
:param record: Evaluation record
:type record: EvalRecord
"""
self._save()
def on_cancel(self, record):
""""Handle record cancelled.
:param record: Evaluation record
:type record: EvalRecord
"""
self._save()
def on_terminate(self):
""""Handle termination."""
self._save()
def run(self):
"""Start the optimization run.
Make sure we do not overwrite any existing checkpointing files
:return: The record corresponding to the best solution
:rtype: EvalRecord
"""
if os.path.isfile(self.fname):
raise IOError("Checkpoint file already exists, aborting...")
return self.controller.run()
|
d15f1428ae1a2410f1d69a7993c55bf9d3101288
|
c2d48caa5db7e746a38beca625406fcf47379d3c
|
/src/olympia/addons/migrations/0012_remove_addon_public_stats.py
|
0e953e04fc2652de95470d90acfe3632c456a939
|
[] |
permissive
|
mozilla/addons-server
|
1f6269ec0a4aa5a0142a5f81978ef674daf213a7
|
e0f043bca8a64478e2ba62f877c9dc28620be22f
|
refs/heads/master
| 2023-09-01T09:34:41.867534
| 2023-09-01T07:21:22
| 2023-09-01T07:21:22
| 16,416,867
| 920
| 590
|
BSD-3-Clause
| 2023-09-14T16:15:01
| 2014-01-31T18:44:15
|
Python
|
UTF-8
|
Python
| false
| false
| 333
|
py
|
0012_remove_addon_public_stats.py
|
# Generated by Django 2.2.12 on 2020-06-10 08:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('addons', '0011_auto_20200610_0553'),
]
operations = [
migrations.RemoveField(
model_name='addon',
name='public_stats',
),
]
|
ad3834cb8faf27cec32c7a9cf786825847abfaf2
|
90502c84304eac37315b77351ce877ad13edad7f
|
/comicweb/comic/__init__.py
|
da35ecdbc1c8e85b0f063149d297abf77c752044
|
[] |
no_license
|
LoyalWilliams/comic
|
38fc8812120a0bb771d68dde4b533e91dc5afdb9
|
00fadfcfb6f4ca8d5e714e31416aebb8e12f2aaf
|
refs/heads/master
| 2021-11-26T20:22:26.995310
| 2021-08-29T14:10:00
| 2021-08-29T14:10:00
| 162,074,370
| 107
| 35
| null | 2021-06-10T21:05:35
| 2018-12-17T04:26:39
|
CSS
|
UTF-8
|
Python
| false
| false
| 5,833
|
py
|
__init__.py
|
img_urls=[[u'https://manhua.qpic.cn/manhua_detail/0/13_14_14_bab8fc39a723b36151072d336c983001_5174.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_11_55bf2b04ac0a8d7feee4d4289cf0eb78_5128.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_11_24280a7c97c3a06644a7ec5c36a5674f_5129.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_11_d5ca31783489df16a375594a440c1b02_5130.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_11_97300a659b188486dd9b66dd81d70dbb_5131.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_11_c4d6916d80b084b3232ef1ac518255db_5132.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_11_66b32c6824fa5043770dbcb0800e75cb_5133.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_ce30953ef26a9f3108c87dd1606fff89_5134.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_6d4b8ef2a9bb9c970e13b1ff7f9514ec_5135.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_86e43948742c6dc5e5519981f75aea59_5136.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_d8cefdebe28e5f71feebac636bc751fc_5137.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_4d6da23c60b2a6d6851458f79235198c_5138.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_74b1222d1c1b1240935fdc17c6cda011_5139.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_69f15e67e88ae00933e84f3cffb714dc_5140.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_2641f5b0a920231536a7c27a76c5a5c6_5141.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_de9789b0f4b41826e48040bd93550f86_5142.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_568f1a3343c8d9660367bfeb28b0b7e5_5143.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_ca9e91f5c4d8e773a1ad7936cbd51f2a_5144.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_bf79eee86743756dedefdaeda1765da1_5145.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_3d937660d800be2ddb1b230884045cff_5146.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_1b0901f0df9c5c6de14fe676d8bb11a4_5147.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_cf82877509a256dc9e2891f81dba871d_5148.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_866c29314aa3ed0415ea2404dfc83a7a_5149.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_8af2eec79b1ff59654d3631289e064ba_5150.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_9b8cb916886e138b4fc275acd4189dcb_5151.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_94ecb05c2637c55c42f90cb8a139a600_5152.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_e3ad902cda3db672a4860a2f019dc0d2_5153.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_12_7ed354416084d08be2d53a5d4c3a7493_5154.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_13_b64bf43bd10c048939176aaa31857e8a_5155.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_13_fbd077c73633813c0fbd0c26c85ad9f2_5156.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_13_8de9eef77024a1fd93b58be56f1875c7_5157.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_13_20d2dd492e1386e9ad3d827bb04c7ba3_5158.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_13_2576d82a7d6a54dce54c4bbc210fc280_5159.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_13_be74c65393cdd7f7eb0ea12f80d8c24b_5160.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_13_72cfb967c4f65bb12b67174f8e69afe2_5161.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_13_d2c2f59f339954b1bcf2c31a985ea220_5162.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_13_8570b8d18e53be38d6f84e5bd771e86d_5163.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_13_a5b6182eee8db9eaa8add10349463b12_5164.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_13_cee3c3e082ff0f32597136c505a3542d_5165.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_13_38d657686f0b24af193209b3f857140f_5166.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_13_bfb4889ab501e87e5cd441ef9c57f5d8_5167.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_13_e906423a730e061fb5903b1adbd98e2c_5168.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_13_67914b15c8f339066a5cd70eb796dede_5169.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_13_9294cf6e413a60e277b9d64b09eca4fd_5170.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_13_7037db00557bdb62a111145aeae7af3f_5171.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_13_aaacbc5a2da4d13297ae2bac4fe51293_5172.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_13_760bbf9518190f61f8ba7409695ada70_5173.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_15_b64730c01b69270e038b6eb205910349_5175.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_15_9ccbedbc08adc1412da8f8d62d8b9c8d_5176.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_15_b8504256b978ba3e1cdbcc4819bb47d3_5177.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_15_14b7c500bc1c88df071b803711001ec8_5178.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_15_ef0995a8a267b326f8f79146bc5733e4_5179.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_15_916669d9b1e57baf4eaf5248e94cdad0_5180.jpg/0'],
[u'https://manhua.qpic.cn/manhua_detail/0/13_14_15_279718b04cab3e4009116579381256f8_5181.jpg/0']],
|
32f3c14603e0f0221e939f8ddc27f311212ad86d
|
15eb68a30bd1bcd8c153ce3c8774e09ef3f4135d
|
/NightlyTests/tensorflow/non_eager/test_bn_reestimation.py
|
94ea12e33c1fa64e5e45146d2572701b2ab578d9
|
[
"BSD-3-Clause"
] |
permissive
|
quic/aimet
|
77a984af68fc3c46d98c707d18a14c95a3efdacf
|
5a406e657082b6a4f6e4bf48f0e46e085cb1e351
|
refs/heads/develop
| 2023-08-21T12:51:10.500286
| 2023-08-18T18:35:39
| 2023-08-18T18:35:39
| 257,688,216
| 1,676
| 339
|
NOASSERTION
| 2023-09-08T06:59:39
| 2020-04-21T18:57:10
|
Python
|
UTF-8
|
Python
| false
| false
| 18,391
|
py
|
test_bn_reestimation.py
|
# /usr/bin/env python3.6
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
import pytest
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import logging
import json
import numpy as np
import tensorflow as tf
from aimet_common.utils import AimetLogger
from aimet_common.defs import QuantScheme
from aimet_tensorflow.quantsim import QuantizationSimModel
from aimet_tensorflow.bn_reestimation import reestimate_bn_stats, _get_all_tf_bn_vars_list
from aimet_tensorflow.batch_norm_fold import fold_all_batch_norms_to_scale
from aimet_tensorflow.common.graph_eval import initialize_uninitialized_vars
from aimet_tensorflow.utils.op.bn_mutable import modify_sess_bn_mutable
from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils
from aimet_tensorflow.common.connectedgraph import ConnectedGraph
from aimet_tensorflow.utils.op.bn_mutable import get_active_bn_ops, set_mutable_bn_is_training_var
#from Examples.tensorflow.utils.add_computational_nodes_in_graph import add_image_net_computational_nodes_in_graph
# currently multiple notebook example is using the above utilty function, making a copy,
# TODO refactor to common TF util
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.WARN)
logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Test)
AimetLogger.set_level_for_all_areas(logging.DEBUG)
tf.compat.v1.disable_eager_execution()
np.random.seed(0)
tf.compat.v1.set_random_seed(0)
def add_image_net_computational_nodes_in_graph(session: tf.compat.v1.Session, logits_name: str, num_classes: int):
"""
:param session: Tensorflow session to operate on
:param logits_name: Output tensor name of session graph
:param num_classes: No of classes in model data
"""
with session.graph.as_default():
# predicted value of the model
y_hat = session.graph.get_tensor_by_name(logits_name)
y_hat_argmax = tf.compat.v1.argmax(y_hat, axis=1)
# placeholder for the labels
y = tf.compat.v1.placeholder(tf.compat.v1.int64, shape=[None, num_classes], name='labels')
y_argmax = tf.compat.v1.argmax(y, axis=1)
# prediction Op
correct_prediction = tf.compat.v1.equal(y_hat_argmax, y_argmax)
# pylint: disable-msg=unused-variable
# accuracy Op: top1
top1_acc = tf.compat.v1.reduce_mean(tf.compat.v1.cast(correct_prediction, tf.compat.v1.float32), name='top1-acc')
# accuracy Op: top5
top5_acc = tf.compat.v1.reduce_mean(tf.compat.v1.cast(tf.compat.v1.nn.in_top_k(predictions=y_hat,
targets=tf.compat.v1.cast(y_argmax, tf.compat.v1.int32),
k=5),
tf.compat.v1.float32),
name='top5-acc')
# loss Op: loss
loss = tf.compat.v1.reduce_mean(tf.compat.v1.losses.softmax_cross_entropy(onehot_labels=y, logits=y_hat))
def get_all_status(sess,
bn_mean_tf_var_list,
bn_variance_tf_var_list,
bn_momentum_tf_var_list,
bn_training_tf_var_list):
"""
get current all stats (momentum,training, mean,var) for debug and unit test
."""
with sess.graph.as_default():
bn_mean_dict = dict(zip(bn_mean_tf_var_list, sess.run([v for v in bn_mean_tf_var_list])))
bn_variance_dict = dict(zip(bn_variance_tf_var_list, sess.run([v for v in bn_variance_tf_var_list])))
bn_momentum_dict = dict(zip(bn_momentum_tf_var_list, sess.run([v for v in bn_momentum_tf_var_list])))
bn_training_dict = dict(zip(bn_training_tf_var_list, sess.run([v for v in bn_training_tf_var_list])))
return bn_mean_dict, bn_variance_dict, bn_momentum_dict, bn_training_dict
def is_dict_close_numpy_array_zeros(dict1):
for k in dict1.keys():
np_zeros = np.zeros(dict1[k].shape)
if not (np.allclose(dict1[k], np_zeros)):
return False
return True
def is_two_dict_close_numpy_array(dict1, dict2):
for k in dict1.keys():
if not (np.allclose(dict1[k], dict2[k])):
return False
return True
def is_two_dict_close_bool(dict1, dict2):
for k in dict1.keys():
if not (dict1[k] == dict2[k]):
return False
return True
def is_two_dict_close_float(dict1, dict2):
for k in dict1.keys():
if not (dict1[k] == pytest.approx(dict2[k])):
return False
return True
@pytest.fixture(scope="session")
def bn_num_batches():
return 4
@pytest.fixture(scope="session")
def batch_size():
return 2
@pytest.fixture
def bn_re_estimation_dataset(bn_num_batches, batch_size):
graph = tf.Graph()
with graph.as_default():
dummy_inputs = tf.random.normal((bn_num_batches * batch_size, 32, 32, 3))
dataset = tf.compat.v1.data.Dataset.from_tensor_slices(dummy_inputs)
dataset = dataset.batch(batch_size)
return dataset
class TestBNReEstimation:
def test_model_rewriter_ptq_reestimation_fold(self, bn_re_estimation_dataset, bn_num_batches):
tf.compat.v1.reset_default_graph()
model = tf.keras.applications.mobilenet_v2.MobileNetV2(weights=None, input_shape=(32, 32, 3))
graph = model.inputs[0].graph
sess = tf.compat.v1.Session(graph=graph)
initialize_uninitialized_vars(sess)
# model rewriter
start_op_names = ["input_1"]
end_op_names = ["predictions/Softmax"]
sess = modify_sess_bn_mutable(sess, start_op_names, end_op_names, training_tf_placeholder=False)
# PTQ
default_config_per_channel = {
"defaults":
{
"ops":
{
"is_output_quantized": "True"
},
"params":
{
"is_quantized": "True",
"is_symmetric": "True"
},
"strict_symmetric": "False",
"unsigned_symmetric": "True",
"per_channel_quantization": "True"
},
"params":
{
"bias":
{
"is_quantized": "False"
}
},
"op_type":
{
"Squeeze":
{
"is_output_quantized": "False"
},
"Pad":
{
"is_output_quantized": "False"
},
"Mean":
{
"is_output_quantized": "False"
}
},
"supergroups":
[
{
"op_list": ["Conv", "Relu"]
},
{
"op_list": ["Conv", "Clip"]
},
{
"op_list": ["Conv", "BatchNormalization", "Relu"]
},
{
"op_list": ["Add", "Relu"]
},
{
"op_list": ["Gemm", "Relu"]
}
],
"model_input":
{
"is_input_quantized": "True"
},
"model_output":
{}
}
config_file_path = "/tmp/default_config_per_channel.json"
with open(config_file_path, "w") as f:
json.dump(default_config_per_channel, f)
sim = QuantizationSimModel(sess, start_op_names, end_op_names, use_cuda=True,
quant_scheme=QuantScheme.training_range_learning_with_tf_init,
config_file=config_file_path)
def dummy_forward_pass(sess, args):
model_input = sess.graph.get_tensor_by_name("input_1:0")
model_output = sess.graph.get_tensor_by_name('predictions/Softmax:0')
dummy_val = np.random.randn(1, *model_input.shape[1:])
sess.run(model_output, feed_dict={model_input: dummy_val})
sim.compute_encodings(dummy_forward_pass, None)
# check bn_re_estimation
self._reestimate_and_compare_results(sim, bn_re_estimation_dataset, bn_num_batches, start_op_names, end_op_names)
# check bn_fold
model_input = sim.session.graph.get_tensor_by_name("input_1:0")
model_output = sim.session.graph.get_tensor_by_name('predictions/Softmax:0')
dummy_val = np.random.randn(128, *model_input.shape[1:])
output_baseline = sim.session.run(model_output, feed_dict={model_input: dummy_val})
fold_all_batch_norms_to_scale(sim, start_op_names, end_op_names)
model_input_after_fold = sim.session.graph.get_tensor_by_name("input_1:0")
model_output_after_fold = sim.session.graph.get_tensor_by_name('predictions/Softmax:0')
output_fold_after_fold = sim.session.run(model_output_after_fold, feed_dict={model_input_after_fold: dummy_val})
assert np.allclose(output_baseline, output_fold_after_fold, atol=1e-2)
sim.session.close()
def test_remove_bn_update_ops_with_training_ops(self):
""" verify that the BNs UPDATE_OPS are removed correctly after training ops are added (QAT) """
tf.compat.v1.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
tf.keras.applications.mobilenet_v2.MobileNetV2(weights=None, input_shape=(224, 224, 3))
sess = tf.compat.v1.Session(graph=graph)
initialize_uninitialized_vars(sess)
start_op_names = ["input_1"]
output_op_names = ["predictions/Softmax"]
validation_inputs = ["labels"]
add_image_net_computational_nodes_in_graph(sess, logits_name=output_op_names[0] + ':0', num_classes=1000)
# Update BNs with mutable BNs.
updated_sess = modify_sess_bn_mutable(sess, start_op_names, output_op_names)
with updated_sess.graph.as_default():
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
assert len(update_ops) == 104
# set all mutable BNs in training mode.
set_mutable_bn_is_training_var(updated_sess, True)
self._training_loop(updated_sess, update_ops, start_op_names, validation_inputs)
# Find BNs UPDATE_OPS programmatically.
update_ops_programmatically = []
conn_graph = ConnectedGraph(updated_sess.graph, start_op_names, output_op_names)
bn_conn_graph_ops = tuple(get_active_bn_ops(conn_graph))
for bn_conn_graph_op in bn_conn_graph_ops:
bn_tf_op = bn_conn_graph_op.get_tf_op_with_io_tensor().op
assign_moving_avg_op = BNUtils.get_assign_moving_avg_op(bn_tf_op)
assign_moving_avg_op_1 = BNUtils.get_assign_moving_avg_1_op(bn_tf_op)
update_ops_programmatically.append(assign_moving_avg_op)
update_ops_programmatically.append(assign_moving_avg_op_1)
with updated_sess.graph.as_default():
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
assert len(update_ops_programmatically) == len(update_ops)
# Remove BNs UPDATE_OPS
for bn_conn_graph_op in bn_conn_graph_ops:
bn_tf_op = bn_conn_graph_op.get_tf_op_with_io_tensor().op
BNUtils.remove_bn_op_from_update_ops(updated_sess, bn_tf_op)
with updated_sess.graph.as_default():
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
# check that UPDATE_OPS list is empty
assert not update_ops
sess.close()
updated_sess.close()
@staticmethod
def _training_loop(session, update_ops, data_inputs, validation_inputs):
""" utility to add training ops """
dummy_input = np.random.randn(1, 224, 224, 3)
dummy_labels = np.random.randn(1, 1000)
with session.graph.as_default():
loss_op = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.LOSSES)[0]
global_step_op = tf.compat.v1.train.create_global_step()
# Define an optimizer
optimizer_op = tf.compat.v1.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
with tf.control_dependencies(update_ops):
train_op = optimizer_op.minimize(loss_op, global_step=global_step_op)
initialize_uninitialized_vars(session)
input_label_tensors = [session.graph.get_tensor_by_name(input_label + ':0')
for input_label in tuple(data_inputs) + tuple(validation_inputs)]
input_label_tensors_dict = {input_label_tensors[0]: dummy_input,
input_label_tensors[1]: dummy_labels}
feed_dict = {**input_label_tensors_dict}
for i in range(2):
batch_loss_val, _ = session.run([loss_op, train_op], feed_dict=feed_dict)
@staticmethod
def _reestimate_and_compare_results(sim, dataset, num_batches, start_op_names, output_op_names):
mean_tf_vars, variance_tf_vars, momentum_tf_vars, is_training_tf_vars = _get_all_tf_bn_vars_list(sim)
model_input = sim.session.graph.get_tensor_by_name(start_op_names[0] + ':0')
model_output = sim.session.graph.get_tensor_by_name(output_op_names[0] + ':0')
dummy_val = np.random.randn(1, *model_input.shape[1:])
feed_dict_data = {model_input: dummy_val}
for bn_training in is_training_tf_vars:
feed_dict_data[bn_training] = True
sim.session.run(model_output, feed_dict=feed_dict_data)
mean_ori, variance_ori, momentum_ori, is_training_ori = get_all_status(sim.session,
mean_tf_vars,
variance_tf_vars,
momentum_tf_vars,
is_training_tf_vars)
with reestimate_bn_stats(sim=sim, start_op_names=start_op_names, output_op_names=output_op_names,
dataset=dataset, num_batches=num_batches):
mean_est, variance_est, momentum_est, is_training_est = get_all_status(sim.session,
mean_tf_vars,
variance_tf_vars,
momentum_tf_vars,
is_training_tf_vars)
# Sanity check(apply_bn_re_estimation): re-estimation , update runing mean &var, set training with False for
# eval(), momentum no change
assert not is_two_dict_close_numpy_array(mean_ori, mean_est)
assert not is_two_dict_close_numpy_array(variance_ori, variance_est)
assert not is_dict_close_numpy_array_zeros(variance_est)
assert is_two_dict_close_float(momentum_ori, momentum_est)
assert is_two_dict_close_bool(is_training_ori, is_training_est)
mean_restored, variance_restored, momentum_restored, is_training_restored = get_all_status(sim.session,
mean_tf_vars,
variance_tf_vars,
momentum_tf_vars,
is_training_tf_vars)
# Sanity check(train_mode): restore mean &var, set training with True for train(), momentum no change
assert is_two_dict_close_numpy_array(mean_ori, mean_restored)
assert is_two_dict_close_numpy_array(variance_ori, variance_restored)
assert is_two_dict_close_float(momentum_ori, momentum_restored)
assert is_two_dict_close_bool(is_training_ori, is_training_restored)
|
1dab38d3d25b8d0bf410ffc6c6a74a6fe0c21946
|
302ce5ab1045ee93845608c96580c63d54d730af
|
/src/spikeinterface/sortingcomponents/tools.py
|
45b9079ea9fefcec98087cf320ab3d74d005c9d0
|
[
"MIT"
] |
permissive
|
SpikeInterface/spikeinterface
|
f900b62720860b2881d2e6b5fa4441e0e560f625
|
ee2237b3f5ce2347b2ec9df90e97b0ee6c738dcf
|
refs/heads/main
| 2023-09-02T11:27:54.687021
| 2023-09-01T13:48:29
| 2023-09-01T13:48:29
| 196,581,117
| 295
| 133
|
MIT
| 2023-09-14T19:12:16
| 2019-07-12T13:07:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,280
|
py
|
tools.py
|
import numpy as np
from spikeinterface.core.node_pipeline import run_node_pipeline, ExtractSparseWaveforms, PeakRetriever
def make_multi_method_doc(methods, ident=" "):
doc = ""
doc += "method: " + ", ".join(f"'{method.name}'" for method in methods) + "\n"
doc += ident + " Method to use.\n"
for method in methods:
doc += "\n"
doc += ident + f"arguments for method='{method.name}'"
for line in method.params_doc.splitlines():
doc += ident + line + "\n"
return doc
def get_prototype_spike(recording, peaks, job_kwargs, nb_peaks=1000, ms_before=0.5, ms_after=0.5):
nb_peaks = min(len(peaks), nb_peaks)
idx = np.sort(np.random.choice(len(peaks), nb_peaks, replace=False))
peak_retriever = PeakRetriever(recording, peaks[idx])
sparse_waveforms = ExtractSparseWaveforms(
recording,
parents=[peak_retriever],
ms_before=ms_before,
ms_after=ms_after,
return_output=True,
radius_um=5,
)
nbefore = sparse_waveforms.nbefore
waveforms = run_node_pipeline(recording, [peak_retriever, sparse_waveforms], job_kwargs=job_kwargs)
prototype = np.median(waveforms[:, :, 0] / (waveforms[:, nbefore, 0][:, np.newaxis]), axis=0)
return prototype
|
48cbfb507ac1b61978f84d14be4add56b85ccb98
|
e3db4c51862ae9d2504db4ca9693e758d2add747
|
/train_twec.py
|
14d80fbec09b5d6fe4a6ebeb32a3a691b31c217e
|
[
"MIT"
] |
permissive
|
rajaswa/DRIFT
|
59f2398ca2d42958c8dd25dea159259b9b13d23f
|
21977752ac2468057b7661e460d6a5bd5e5fe73c
|
refs/heads/main
| 2023-05-23T19:34:52.967311
| 2023-01-26T12:41:11
| 2023-01-26T12:41:11
| 347,655,676
| 115
| 11
|
MIT
| 2021-07-02T08:43:20
| 2021-03-14T14:14:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,252
|
py
|
train_twec.py
|
import os
import time
import numpy as np
import streamlit as st
from twec.twec import TWEC
def train(
data_dir="./data/",
embedding_size=300,
skipgram=False,
siter=10,
diter=10,
negative_samples=10,
window_size=5,
output_path="./model",
overwrite_compass=True,
streamlit=False,
component=None,
):
if streamlit and component is None:
raise ValueError("`component` cannot be `None` when `streamlit` is `True`.")
aligner = TWEC(
size=embedding_size,
sg=int(skipgram),
siter=siter,
diter=diter,
workers=4,
ns=negative_samples,
window=window_size,
opath=output_path,
)
if streamlit:
component.write("Training")
progress = 0.0
progress_bar = component.progress(progress)
output = component.beta_expander("Output")
all_files = sorted(os.listdir(data_dir))
num_files = len(all_files)
start = time.time()
# train the compass: the text should be the concatenation of the text from the slices
aligner.train_compass(
os.path.join(data_dir, "compass.txt"), overwrite=overwrite_compass
)
# keep an eye on the overwrite behaviour
end = time.time()
compass_out = f"Time Taken for TWEC Pre-Training: {(end - start)} ms"
if not streamlit:
print(compass_out)
else:
progress += 1 / num_files
progress_bar.progress(np.round(progress, decimals=1))
with output:
st.write(compass_out)
slices = {}
for file in all_files:
if file != "compass.txt":
start = time.time()
slices[file.split(".")[0]] = aligner.train_slice(
os.path.join(data_dir, file), save=True
)
end = time.time()
year_out = f"Time Taken for TWEC Fine-tuning for {file.split('.')[0]}: {(end - start)} ms"
if not streamlit:
print(year_out)
else:
progress += 1 / num_files
if progress > 1.0:
progress = 1.0
progress_bar.progress(progress)
with output:
st.write(year_out)
if __name__ == "__main__":
train()
|
91d3021cdb64dd3ab2f3154e99c1e524b8dbea73
|
a838c711a218bbdb661132eaf252fa417ca8f273
|
/influxdb_client/domain/template_summary_summary.py
|
5ca10021a9cd7fff478ba5da131eadcf5ff21422
|
[
"MIT"
] |
permissive
|
influxdata/influxdb-client-python
|
9ae84038c1145466dd40c3a9096a74983f29bedb
|
1ec64b7e1039c891ac3a667ee6697731c61ddbaf
|
refs/heads/master
| 2023-08-23T09:14:38.727662
| 2023-08-09T03:59:54
| 2023-08-09T03:59:54
| 192,689,401
| 623
| 215
|
MIT
| 2023-09-11T05:46:26
| 2019-06-19T08:17:20
|
Python
|
UTF-8
|
Python
| false
| false
| 12,436
|
py
|
template_summary_summary.py
|
# coding: utf-8
"""
InfluxDB OSS API Service.
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
class TemplateSummarySummary(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'buckets': 'list[TemplateSummarySummaryBuckets]',
'checks': 'list[CheckDiscriminator]',
'dashboards': 'list[TemplateSummarySummaryDashboards]',
'labels': 'list[TemplateSummaryLabel]',
'label_mappings': 'list[TemplateSummarySummaryLabelMappings]',
'missing_env_refs': 'list[str]',
'missing_secrets': 'list[str]',
'notification_endpoints': 'list[NotificationEndpointDiscriminator]',
'notification_rules': 'list[TemplateSummarySummaryNotificationRules]',
'tasks': 'list[TemplateSummarySummaryTasks]',
'telegraf_configs': 'list[TelegrafRequest]',
'variables': 'list[TemplateSummarySummaryVariables]'
}
attribute_map = {
'buckets': 'buckets',
'checks': 'checks',
'dashboards': 'dashboards',
'labels': 'labels',
'label_mappings': 'labelMappings',
'missing_env_refs': 'missingEnvRefs',
'missing_secrets': 'missingSecrets',
'notification_endpoints': 'notificationEndpoints',
'notification_rules': 'notificationRules',
'tasks': 'tasks',
'telegraf_configs': 'telegrafConfigs',
'variables': 'variables'
}
def __init__(self, buckets=None, checks=None, dashboards=None, labels=None, label_mappings=None, missing_env_refs=None, missing_secrets=None, notification_endpoints=None, notification_rules=None, tasks=None, telegraf_configs=None, variables=None): # noqa: E501,D401,D403
"""TemplateSummarySummary - a model defined in OpenAPI.""" # noqa: E501
self._buckets = None
self._checks = None
self._dashboards = None
self._labels = None
self._label_mappings = None
self._missing_env_refs = None
self._missing_secrets = None
self._notification_endpoints = None
self._notification_rules = None
self._tasks = None
self._telegraf_configs = None
self._variables = None
self.discriminator = None
if buckets is not None:
self.buckets = buckets
if checks is not None:
self.checks = checks
if dashboards is not None:
self.dashboards = dashboards
if labels is not None:
self.labels = labels
if label_mappings is not None:
self.label_mappings = label_mappings
if missing_env_refs is not None:
self.missing_env_refs = missing_env_refs
if missing_secrets is not None:
self.missing_secrets = missing_secrets
if notification_endpoints is not None:
self.notification_endpoints = notification_endpoints
if notification_rules is not None:
self.notification_rules = notification_rules
if tasks is not None:
self.tasks = tasks
if telegraf_configs is not None:
self.telegraf_configs = telegraf_configs
if variables is not None:
self.variables = variables
@property
def buckets(self):
"""Get the buckets of this TemplateSummarySummary.
:return: The buckets of this TemplateSummarySummary.
:rtype: list[TemplateSummarySummaryBuckets]
""" # noqa: E501
return self._buckets
@buckets.setter
def buckets(self, buckets):
"""Set the buckets of this TemplateSummarySummary.
:param buckets: The buckets of this TemplateSummarySummary.
:type: list[TemplateSummarySummaryBuckets]
""" # noqa: E501
self._buckets = buckets
@property
def checks(self):
"""Get the checks of this TemplateSummarySummary.
:return: The checks of this TemplateSummarySummary.
:rtype: list[CheckDiscriminator]
""" # noqa: E501
return self._checks
@checks.setter
def checks(self, checks):
"""Set the checks of this TemplateSummarySummary.
:param checks: The checks of this TemplateSummarySummary.
:type: list[CheckDiscriminator]
""" # noqa: E501
self._checks = checks
@property
def dashboards(self):
"""Get the dashboards of this TemplateSummarySummary.
:return: The dashboards of this TemplateSummarySummary.
:rtype: list[TemplateSummarySummaryDashboards]
""" # noqa: E501
return self._dashboards
@dashboards.setter
def dashboards(self, dashboards):
"""Set the dashboards of this TemplateSummarySummary.
:param dashboards: The dashboards of this TemplateSummarySummary.
:type: list[TemplateSummarySummaryDashboards]
""" # noqa: E501
self._dashboards = dashboards
@property
def labels(self):
"""Get the labels of this TemplateSummarySummary.
:return: The labels of this TemplateSummarySummary.
:rtype: list[TemplateSummaryLabel]
""" # noqa: E501
return self._labels
@labels.setter
def labels(self, labels):
"""Set the labels of this TemplateSummarySummary.
:param labels: The labels of this TemplateSummarySummary.
:type: list[TemplateSummaryLabel]
""" # noqa: E501
self._labels = labels
@property
def label_mappings(self):
"""Get the label_mappings of this TemplateSummarySummary.
:return: The label_mappings of this TemplateSummarySummary.
:rtype: list[TemplateSummarySummaryLabelMappings]
""" # noqa: E501
return self._label_mappings
@label_mappings.setter
def label_mappings(self, label_mappings):
"""Set the label_mappings of this TemplateSummarySummary.
:param label_mappings: The label_mappings of this TemplateSummarySummary.
:type: list[TemplateSummarySummaryLabelMappings]
""" # noqa: E501
self._label_mappings = label_mappings
@property
def missing_env_refs(self):
"""Get the missing_env_refs of this TemplateSummarySummary.
:return: The missing_env_refs of this TemplateSummarySummary.
:rtype: list[str]
""" # noqa: E501
return self._missing_env_refs
@missing_env_refs.setter
def missing_env_refs(self, missing_env_refs):
"""Set the missing_env_refs of this TemplateSummarySummary.
:param missing_env_refs: The missing_env_refs of this TemplateSummarySummary.
:type: list[str]
""" # noqa: E501
self._missing_env_refs = missing_env_refs
@property
def missing_secrets(self):
"""Get the missing_secrets of this TemplateSummarySummary.
:return: The missing_secrets of this TemplateSummarySummary.
:rtype: list[str]
""" # noqa: E501
return self._missing_secrets
@missing_secrets.setter
def missing_secrets(self, missing_secrets):
"""Set the missing_secrets of this TemplateSummarySummary.
:param missing_secrets: The missing_secrets of this TemplateSummarySummary.
:type: list[str]
""" # noqa: E501
self._missing_secrets = missing_secrets
@property
def notification_endpoints(self):
"""Get the notification_endpoints of this TemplateSummarySummary.
:return: The notification_endpoints of this TemplateSummarySummary.
:rtype: list[NotificationEndpointDiscriminator]
""" # noqa: E501
return self._notification_endpoints
@notification_endpoints.setter
def notification_endpoints(self, notification_endpoints):
"""Set the notification_endpoints of this TemplateSummarySummary.
:param notification_endpoints: The notification_endpoints of this TemplateSummarySummary.
:type: list[NotificationEndpointDiscriminator]
""" # noqa: E501
self._notification_endpoints = notification_endpoints
@property
def notification_rules(self):
"""Get the notification_rules of this TemplateSummarySummary.
:return: The notification_rules of this TemplateSummarySummary.
:rtype: list[TemplateSummarySummaryNotificationRules]
""" # noqa: E501
return self._notification_rules
@notification_rules.setter
def notification_rules(self, notification_rules):
"""Set the notification_rules of this TemplateSummarySummary.
:param notification_rules: The notification_rules of this TemplateSummarySummary.
:type: list[TemplateSummarySummaryNotificationRules]
""" # noqa: E501
self._notification_rules = notification_rules
@property
def tasks(self):
"""Get the tasks of this TemplateSummarySummary.
:return: The tasks of this TemplateSummarySummary.
:rtype: list[TemplateSummarySummaryTasks]
""" # noqa: E501
return self._tasks
@tasks.setter
def tasks(self, tasks):
"""Set the tasks of this TemplateSummarySummary.
:param tasks: The tasks of this TemplateSummarySummary.
:type: list[TemplateSummarySummaryTasks]
""" # noqa: E501
self._tasks = tasks
@property
def telegraf_configs(self):
"""Get the telegraf_configs of this TemplateSummarySummary.
:return: The telegraf_configs of this TemplateSummarySummary.
:rtype: list[TelegrafRequest]
""" # noqa: E501
return self._telegraf_configs
@telegraf_configs.setter
def telegraf_configs(self, telegraf_configs):
"""Set the telegraf_configs of this TemplateSummarySummary.
:param telegraf_configs: The telegraf_configs of this TemplateSummarySummary.
:type: list[TelegrafRequest]
""" # noqa: E501
self._telegraf_configs = telegraf_configs
@property
def variables(self):
"""Get the variables of this TemplateSummarySummary.
:return: The variables of this TemplateSummarySummary.
:rtype: list[TemplateSummarySummaryVariables]
""" # noqa: E501
return self._variables
@variables.setter
def variables(self, variables):
"""Set the variables of this TemplateSummarySummary.
:param variables: The variables of this TemplateSummarySummary.
:type: list[TemplateSummarySummaryVariables]
""" # noqa: E501
self._variables = variables
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in self.openapi_types.items():
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, TemplateSummarySummary):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
|
ce9397dd709771c688eed8724cdc9bf08c30bd91
|
e3cfab409afb5ff9a0b3812bf848be6ca9239cee
|
/pygeodesy/triaxials.py
|
a4e2cc6816a1e0466e3f2a5fea3607abf5454322
|
[
"MIT"
] |
permissive
|
mrJean1/PyGeodesy
|
565266a4f7f6cda5abe98e915bbd868f6cbe1760
|
eba35704b248a7a0388b30f3cea19793921e99b7
|
refs/heads/master
| 2023-08-23T13:58:20.069917
| 2023-08-20T18:50:45
| 2023-08-20T18:50:45
| 68,028,481
| 283
| 66
| null | 2022-04-09T00:40:52
| 2016-09-12T16:49:10
|
Python
|
UTF-8
|
Python
| false
| false
| 60,718
|
py
|
triaxials.py
|
# -*- coding: utf-8 -*-
u'''Triaxal ellipsoid classes I{ordered} L{Triaxial} and I{unordered} L{Triaxial_} and Jacobi
conformal projections L{JacobiConformal} and L{JacobiConformalSpherical}, transcoded from
I{Charles Karney}'s C++ class U{JacobiConformal<https://GeographicLib.SourceForge.io/C++/doc/
classGeographicLib_1_1JacobiConformal.html#details>} to pure Python and miscellaneous classes
L{BetaOmega2Tuple}, L{BetaOmega3Tuple}, L{Jacobi2Tuple} and L{TriaxialError}.
Copyright (C) U{Charles Karney<mailto:Karney@Alum.MIT.edu>} (2008-2023). For more information,
see the U{GeographicLib<https://GeographicLib.SourceForge.io>} documentation.
@see: U{Geodesics on a triaxial ellipsoid<https://WikiPedia.org/wiki/Geodesics_on_an_ellipsoid#
Geodesics_on_a_triaxial_ellipsoid>} and U{Triaxial coordinate systems and their geometrical
interpretation<https://www.Topo.Auth.GR/wp-content/uploads/sites/111/2021/12/09_Panou.pdf>}.
@var Triaxials.Amalthea: Triaxial(name='Amalthea', a=125000, b=73000, c=64000, e2ab=0.658944, e2bc=0.231375493, e2ac=0.737856, volume=2446253479595252, area=93239507787.490371704, area_p=93212299402.670425415)
@var Triaxials.Ariel: Triaxial(name='Ariel', a=581100, b=577900, c=577700, e2ab=0.01098327, e2bc=0.000692042, e2ac=0.011667711, volume=812633172614203904, area=4211301462766.580078125, area_p=4211301574065.829589844)
@var Triaxials.Earth: Triaxial(name='Earth', a=6378173.435, b=6378103.9, c=6356754.399999999, e2ab=0.000021804, e2bc=0.006683418, e2ac=0.006705077, volume=1083208241574987694080, area=510065911057441.0625, area_p=510065915922713.6875)
@var Triaxials.Enceladus: Triaxial(name='Enceladus', a=256600, b=251400, c=248300, e2ab=0.040119337, e2bc=0.024509841, e2ac=0.06364586, volume=67094551514082248, area=798618496278.596679688, area_p=798619018175.109863281)
@var Triaxials.Europa: Triaxial(name='Europa', a=1564130, b=1561230, c=1560930, e2ab=0.003704694, e2bc=0.000384275, e2ac=0.004087546, volume=15966575194402123776, area=30663773697323.51953125, area_p=30663773794562.45703125)
@var Triaxials.Io: Triaxial(name='Io', a=1829400, b=1819300, c=1815700, e2ab=0.011011391, e2bc=0.003953651, e2ac=0.014921506, volume=25313121117889765376, area=41691875849096.7421875, area_p=41691877397441.2109375)
@var Triaxials.Mars: Triaxial(name='Mars', a=3394600, b=3393300, c=3376300, e2ab=0.000765776, e2bc=0.009994646, e2ac=0.010752768, volume=162907283585817247744, area=144249140795107.4375, area_p=144249144150662.15625)
@var Triaxials.Mimas: Triaxial(name='Mimas', a=207400, b=196800, c=190600, e2ab=0.09960581, e2bc=0.062015624, e2ac=0.155444317, volume=32587072869017956, area=493855762247.691894531, area_p=493857714107.9375)
@var Triaxials.Miranda: Triaxial(name='Miranda', a=240400, b=234200, c=232900, e2ab=0.050915557, e2bc=0.011070811, e2ac=0.061422691, volume=54926187094835456, area=698880863325.756958008, area_p=698881306767.950317383)
@var Triaxials.Moon: Triaxial(name='Moon', a=1735550, b=1735324, c=1734898, e2ab=0.000260419, e2bc=0.000490914, e2ac=0.000751206, volume=21886698675223740416, area=37838824729886.09375, area_p=37838824733332.2265625)
@var Triaxials.Tethys: Triaxial(name='Tethys', a=535600, b=528200, c=525800, e2ab=0.027441672, e2bc=0.009066821, e2ac=0.036259685, volume=623086233855821440, area=3528073490771.394042969, area_p=3528074261832.738769531)
@var Triaxials.WGS84_35: Triaxial(name='WGS84_35', a=6378172, b=6378102, c=6356752.314245179, e2ab=0.00002195, e2bc=0.006683478, e2ac=0.006705281, volume=1083207319768789942272, area=510065621722018.125, area_p=510065626587483.3125)
'''
# make sure int/int division yields float quotient, see .basics
from __future__ import division as _; del _ # PYCHOK semicolon
from pygeodesy.basics import isscalar, map1, _zip, _ValueError
from pygeodesy.constants import EPS, EPS0, EPS02, EPS4, _EPS2e4, INT0, PI2, PI_3, PI4, \
_0_0, _0_5, _1_0, _N_2_0, float0_, isfinite, isnear1, \
_4_0 # PYCHOK used!
from pygeodesy.datums import Datum, _spherical_datum, _WGS84, Ellipsoid, Fmt
# from pygeodesy.dms import toDMS # _MODS
# from pygeodesy.ellipsoids import Ellipsoid # from .datums
# from pygeodesy.elliptic import Elliptic # _MODS
# from pygeodesy.errors import _ValueError # from .basics
from pygeodesy.fmath import Fdot, fdot, fmean_, hypot, hypot_, norm2
from pygeodesy.fsums import Fsum, fsumf_, fsum1f_
from pygeodesy.interns import NN, _a_, _b_, _beta_, _c_, _distant_, _finite_, \
_height_, _inside_, _near_, _not_, _NOTEQUAL_, _null_, \
_opposite_, _outside_, _SPACE_, _spherical_, _too_, \
_x_, _y_
# from pygeodesy.lazily import _ALL_LAZY, _ALL_MODS as _MODS # from .vector3d
from pygeodesy.named import _NamedEnum, _NamedEnumItem, _NamedTuple, _Pass, \
_lazyNamedEnumItem as _lazy
from pygeodesy.namedTuples import LatLon3Tuple, Vector3Tuple, Vector4Tuple
from pygeodesy.props import Property_RO, property_RO
# from pygeodesy.streprs import Fmt # from .datums
from pygeodesy.units import Degrees, Float, Height_, Meter, Meter2, Meter3, \
Radians, Radius, Scalar_
from pygeodesy.utily import asin1, atan2d, km2m, m2km, SinCos2, sincos2d_
from pygeodesy.vector3d import _otherV3d, Vector3d, _ALL_LAZY, _MODS
from math import atan2, fabs, sqrt
__all__ = _ALL_LAZY.triaxials
__version__ = '23.08.19'
_not_ordered_ = _not_('ordered')
_omega_ = 'omega'
_TRIPS = 537 # 52..58, Eberly 1074?
class _NamedTupleTo(_NamedTuple): # in .testNamedTuples
'''(INTERNAL) Base for C{-.toDegrees}, C{-.toRadians}.
'''
def _toDegrees(self, a, b, *c, **toDMS_kwds):
if toDMS_kwds:
toDMS = _MODS.dms.toDMS
a = toDMS(a.toDegrees(), **toDMS_kwds)
b = toDMS(b.toDegrees(), **toDMS_kwds)
elif isinstance(a, Degrees) and \
isinstance(b, Degrees):
return self
else:
a, b = a.toDegrees(), b.toDegrees()
return self.classof(a, b, *c, name=self.name)
def _toRadians(self, a, b, *c):
return self if isinstance(a, Radians) and \
isinstance(b, Radians) else \
self.classof(a.toRadians(), b.toRadians(),
*c, name=self.name)
class BetaOmega2Tuple(_NamedTupleTo):
'''2-Tuple C{(beta, omega)} with I{ellipsoidal} lat- and
longitude C{beta} and C{omega} both in L{Radians} (or
L{Degrees}).
'''
_Names_ = (_beta_, _omega_)
_Units_ = (_Pass, _Pass)
def toDegrees(self, **toDMS_kwds):
'''Convert this L{BetaOmega2Tuple} to L{Degrees} or C{toDMS}.
@return: L{BetaOmega2Tuple}C{(beta, omega)} with
C{beta} and C{omega} both in L{Degrees}
or as a L{toDMS} string provided some
B{C{toDMS_kwds}} keyword arguments are
specified.
'''
return _NamedTupleTo._toDegrees(self, *self, **toDMS_kwds)
def toRadians(self):
'''Convert this L{BetaOmega2Tuple} to L{Radians}.
@return: L{BetaOmega2Tuple}C{(beta, omega)} with
C{beta} and C{omega} both in L{Radians}.
'''
return _NamedTupleTo._toRadians(self, *self)
class BetaOmega3Tuple(_NamedTupleTo):
'''3-Tuple C{(beta, omega, height)} with I{ellipsoidal} lat- and
longitude C{beta} and C{omega} both in L{Radians} (or L{Degrees})
and the C{height}, rather the (signed) I{distance} to the triaxial's
surface (measured along the radial line to the triaxial's center)
in C{meter}, conventionally.
'''
_Names_ = BetaOmega2Tuple._Names_ + (_height_,)
_Units_ = BetaOmega2Tuple._Units_ + ( Meter,)
def toDegrees(self, **toDMS_kwds):
'''Convert this L{BetaOmega3Tuple} to L{Degrees} or C{toDMS}.
@return: L{BetaOmega3Tuple}C{(beta, omega, height)} with
C{beta} and C{omega} both in L{Degrees} or as a
L{toDMS} string provided some B{C{toDMS_kwds}}
keyword arguments are specified.
'''
return _NamedTupleTo._toDegrees(self, *self, **toDMS_kwds)
def toRadians(self):
'''Convert this L{BetaOmega3Tuple} to L{Radians}.
@return: L{BetaOmega3Tuple}C{(beta, omega, height)} with
C{beta} and C{omega} both in L{Radians}.
'''
return _NamedTupleTo._toRadians(self, *self)
def to2Tuple(self):
'''Reduce this L{BetaOmega3Tuple} to a L{BetaOmega2Tuple}.
'''
return BetaOmega2Tuple(*self[:2])
class Jacobi2Tuple(_NamedTupleTo):
'''2-Tuple C{(x, y)} with a Jacobi Conformal C{x} and C{y}
projection, both in L{Radians} (or L{Degrees}).
'''
_Names_ = (_x_, _y_)
_Units_ = (_Pass, _Pass)
def toDegrees(self, **toDMS_kwds):
'''Convert this L{Jacobi2Tuple} to L{Degrees} or C{toDMS}.
@return: L{Jacobi2Tuple}C{(x, y)} with C{x} and C{y}
both in L{Degrees} or as a L{toDMS} string
provided some B{C{toDMS_kwds}} keyword
arguments are specified.
'''
return _NamedTupleTo._toDegrees(self, *self, **toDMS_kwds)
def toRadians(self):
'''Convert this L{Jacobi2Tuple} to L{Radians}.
@return: L{Jacobi2Tuple}C{(x, y)} with C{x}
and C{y} both in L{Radians}.
'''
return _NamedTupleTo._toRadians(self, *self)
class Triaxial_(_NamedEnumItem):
'''I{Unordered} triaxial ellipsoid and base class.
Triaxial ellipsoids with right-handed semi-axes C{a}, C{b} and C{c}, oriented
such that the large principal ellipse C{ab} is the equator I{Z}=0, I{beta}=0,
while the small principal ellipse C{ac} is the prime meridian, plane I{Y}=0,
I{omega}=0.
The four umbilic points, C{abs}(I{omega}) = C{abs}(I{beta}) = C{PI/2}, lie on
the middle principal ellipse C{bc} in plane I{X}=0, I{omega}=C{PI/2}.
@note: I{Geodetic} C{lat}- and C{lon}gitudes are in C{degrees}, I{geodetic}
C{phi} and C{lam}bda are in C{radians}, but I{ellipsoidal} lat- and
longitude C{beta} and C{omega} are in L{Radians} by default (or in
L{Degrees} if converted).
'''
_ijk = _kji = None
_unordered = True
def __init__(self, a_triaxial, b=None, c=None, name=NN):
'''New I{unordered} L{Triaxial_}.
@arg a_triaxial: Large, C{X} semi-axis (C{scalar}, conventionally in
C{meter}) or an other L{Triaxial} or L{Triaxial_} instance.
@kwarg b: Middle, C{Y} semi-axis (C{meter}, same units as B{C{a}}), required
if C{B{a_triaxial} is scalar}, ignored otherwise.
@kwarg c: Small, C{Z} semi-axis (C{meter}, same units as B{C{a}}), required
if C{B{a_triaxial} is scalar}, ignored otherwise.
@kwarg name: Optional name (C{str}).
@raise TriaxialError: Invalid semi-axis or -axes.
'''
try:
a = a_triaxial
t = a._abc3 if isinstance(a, Triaxial_) else (
Radius(a=a), Radius(b=b), Radius(c=c))
except (TypeError, ValueError) as x:
raise TriaxialError(a=a, b=b, c=c, cause=x)
if name:
self.name = name
a, b, c = self._abc3 = t
if self._unordered: # == not isinstance(self, Triaxial)
s, _, t = sorted(t)
if not (isfinite(t) and s > 0):
raise TriaxialError(a=a, b=b, c=c) # txt=_invalid_
elif not (isfinite(a) and a >= b >= c > 0):
raise TriaxialError(a=a, b=b, c=c, txt=_not_ordered_)
elif not (a > c and self._a2c2 > 0 and self.e2ac > 0):
raise TriaxialError(a=a, c=c, e2ac=self.e2ac, txt=_spherical_)
def __str__(self):
return self.toStr()
@Property_RO
def a(self):
'''Get the (largest) C{x} semi-axis (C{meter}, conventionally).
'''
a, _, _ = self._abc3
return a
@Property_RO
def _a2b2(self):
'''(INTERNAL) Get C{a**2 - b**2} == E_sub_e**2.
'''
a, b, _ = self._abc3
return ((a - b) * (a + b)) if a != b else _0_0
@Property_RO
def _a2_b2(self):
'''(INTERNAL) Get C{(a/b)**2}.
'''
a, b, _ = self._abc3
return (a / b)**2 if a != b else _1_0
@Property_RO
def _a2c2(self):
'''(INTERNAL) Get C{a**2 - c**2} == E_sub_x**2.
'''
a, _, c = self._abc3
return ((a - c) * (a + c)) if a != c else _0_0
@Property_RO
def area(self):
'''Get the surface area (C{meter} I{squared}).
'''
c, b, a = sorted(self._abc3)
if a > c:
a = Triaxial(a, b, c).area if a > b else \
Ellipsoid(a, b=c).areax # a == b
else: # a == c == b
a = Meter2(area=a**2 * PI4)
return a
def area_p(self, p=1.6075):
'''I{Approximate} the surface area (C{meter} I{squared}).
@kwarg p: Exponent (C{scalar} > 0), 1.6 for near-spherical or 1.5849625007
for "near-flat" triaxials.
@see: U{Surface area<https://WikiPedia.org/wiki/Ellipsoid#Approximate_formula>}.
'''
a, b, c = self._abc3
if a == b == c:
a *= a
else:
_p = pow
a = _p(fmean_(_p(a * b, p), _p(a * c, p), _p(b * c, p)), _1_0 / p)
return Meter2(area_p=a * PI4)
@Property_RO
def b(self):
'''Get the (middle) C{y} semi-axis (C{meter}, same units as B{C{a}}).
'''
_, b, _ = self._abc3
return b
@Property_RO
def _b2c2(self):
'''(INTERNAL) Get C{b**2 - c**2} == E_sub_y**2.
'''
_, b, c = self._abc3
return ((b - c) * (b + c)) if b != c else _0_0
@Property_RO
def c(self):
'''Get the (smallest) C{z} semi-axis (C{meter}, same units as B{C{a}}).
'''
_, _, c = self._abc3
return c
@Property_RO
def _c2_b2(self):
'''(INTERNAL) Get C{(c/b)**2}.
'''
_, b, c = self._abc3
return (c / b)**2 if b != c else _1_0
@Property_RO
def e2ab(self):
'''Get the C{ab} ellipse' I{(1st) eccentricity squared} (C{scalar}), M{1 - (b/a)**2}.
'''
return Float(e2ab=(_1_0 - self._1e2ab) or _0_0)
@Property_RO
def _1e2ab(self):
'''(INTERNAL) Get C{1 - e2ab} == C{(b/a)**2}.
'''
a, b, _ = self._abc3
return (b / a)**2 if a != b else _1_0
@Property_RO
def e2ac(self):
'''Get the C{ac} ellipse' I{(1st) eccentricity squared} (C{scalar}), M{1 - (c/a)**2}.
'''
return Float(e2ac=(_1_0 - self._1e2ac) or _0_0)
@Property_RO
def _1e2ac(self):
'''(INTERNAL) Get C{1 - e2ac} == C{(c/a)**2}.
'''
a, _, c = self._abc3
return (c / a)**2 if a != c else _1_0
@Property_RO
def e2bc(self):
'''Get the C{bc} ellipse' I{(1st) eccentricity squared} (C{scalar}), M{1 - (c/b)**2}.
'''
return Float(e2bc=(_1_0 - self._1e2bc) or _0_0)
_1e2bc = _c2_b2 # C{1 - e2bc} == C{(c/b)**2}
@property_RO
def _Elliptic(self):
'''(INTERNAL) Get class L{Elliptic}, I{once}.
'''
Triaxial_._Elliptic = E = _MODS.elliptic.Elliptic # overwrite property_RO
return E
def hartzell4(self, pov, los=None, name=NN):
'''Compute the intersection of this triaxial's surface with a Line-Of-Sight
from a Point-Of-View in space.
@see: Function L{pygeodesy.hartzell4} for further details.
'''
return hartzell4(pov, los=los, tri_biax=self, name=name)
def height4(self, x_xyz, y=None, z=None, normal=True, eps=EPS):
'''Compute the projection on and the height of a cartesian above or below
this triaxial's surface.
@arg x_xyz: X component (C{scalar}) or a cartesian (C{Cartesian},
L{Ecef9Tuple}, L{Vector3d}, L{Vector3Tuple} or L{Vector4Tuple}).
@kwarg y: Y component (C{scalar}), required if B{C{x_xyz}} if C{scalar}.
@kwarg z: Z component (C{scalar}), required if B{C{x_xyz}} if C{scalar}.
@kwarg normal: If C{True} the projection is perpendicular to (the nearest
point on) this triaxial's surface, otherwise the C{radial}
line to this triaxial's center (C{bool}).
@kwarg eps: Tolerance for root finding and validation (C{scalar}), use a
negative value to skip validation.
@return: L{Vector4Tuple}C{(x, y, z, h)} with the cartesian coordinates
C{x}, C{y} and C{z} of the projection on or the intersection
with and with the height C{h} above or below the triaxial's
surface in C{meter}, conventionally.
@raise TriaxialError: Non-cartesian B{C{xyz}}, invalid B{C{eps}}, no
convergence in root finding or validation failed.
@see: Method L{Ellipsoid.height4} and I{Eberly}'s U{Distance from a Point
to ... an Ellipsoid ...<https://www.GeometricTools.com/Documentation/
DistancePointEllipseEllipsoid.pdf>}.
'''
v, r = _otherV3d_(x_xyz, y, z), self.isSpherical
i, h = None, v.length
if h < EPS0: # EPS
x = y = z = _0_0
h -= min(self._abc3) # nearest
elif r: # .isSpherical
x, y, z = v.times(r / h).xyz
h -= r
else:
x, y, z = v.xyz
try:
if normal: # perpendicular to triaxial
x, y, z, h, i = _normalTo5(x, y, z, self, eps=eps)
else: # radially to triaxial's center
x, y, z = self._radialTo3(z, hypot(x, y), y, x)
h = v.minus_(x, y, z).length
except Exception as e:
raise TriaxialError(x=x, y=y, z=z, cause=e)
if h > 0 and self.sideOf(v, eps=EPS0) < 0:
h = -h # below the surface
return Vector4Tuple(x, y, z, h, iteration=i, name=self.height4.__name__)
@Property_RO
def isOrdered(self):
'''Is this triaxial I{ordered} and I{not spherical} (C{bool})?
'''
a, b, c = self._abc3
return bool(a >= b > c) # b > c!
@Property_RO
def isSpherical(self):
'''Is this triaxial I{spherical} (C{Radius} or INT0)?
'''
a, b, c = self._abc3
return a if a == b == c else INT0
def normal3d(self, x_xyz, y=None, z=None, length=_1_0):
'''Get a 3-D vector perpendicular to at a cartesian on this triaxial's surface.
@arg x_xyz: X component (C{scalar}) or a cartesian (C{Cartesian},
L{Ecef9Tuple}, L{Vector3d}, L{Vector3Tuple} or L{Vector4Tuple}).
@kwarg y: Y component (C{scalar}), required if B{C{x_xyz}} if C{scalar}.
@kwarg z: Z component (C{scalar}), required if B{C{x_xyz}} if C{scalar}.
@kwarg length: Optional length and in-/outward direction (C{scalar}).
@return: A C{Vector3d(x_, y_, z_)} normalized to B{C{length}}, pointing
in- or outward for neg- respectively positive B{C{length}}.
@note: Cartesian location C{(B{x}, B{y}, B{z})} must be on this triaxial's
surface, use method L{Triaxial.sideOf} to validate.
'''
# n = 2 * (x / a2, y / b2, z / c2)
# == 2 * (x, y * a2 / b2, z * a2 / c2) / a2 # iff ordered
# == 2 * (x, y / _1e2ab, z / _1e2ac) / a2
# == unit(x, y / _1e2ab, z / _1e2ac).times(length)
n = self._normal3d.times_(*_otherV3d_(x_xyz, y, z).xyz)
if n.length < EPS0:
raise TriaxialError(x=x_xyz, y=y, z=z, txt=_null_)
return n.times(length / n.length)
@Property_RO
def _normal3d(self):
'''(INTERNAL) Get M{Vector3d((d/a)**2, (d/b)**2, (d/c)**2)}, M{d = max(a, b, c)}.
'''
d = max(self._abc3)
t = tuple(((d / x)**2 if x != d else _1_0) for x in self._abc3)
return Vector3d(*t, name=self.normal3d.__name__)
def _norm2(self, s, c, *a):
'''(INTERNAL) Normalize C{s} and C{c} iff not already.
'''
if fabs(_hypot21(s, c)) > EPS02:
s, c = norm2(s, c)
if a:
s, c = norm2(s * self.b, c * a[0])
return float0_(s, c)
def _order3(self, *abc, **reverse): # reverse=False
'''(INTERNAL) Un-/Order C{a}, C{b} and C{c}.
@return: 3-Tuple C{(a, b, c)} ordered by or un-ordered
(reverse-ordered) C{ijk} if C{B{reverse}=True}.
'''
ijk = self._order_ijk(**reverse)
return _getitems(abc, *ijk) if ijk else abc
def _order3d(self, v, **reverse): # reverse=False
'''(INTERNAL) Un-/Order a C{Vector3d}.
@return: Vector3d(x, y, z) un-/ordered.
'''
ijk = self._order_ijk(**reverse)
return v.classof(*_getitems(v.xyz, *ijk)) if ijk else v
@Property_RO
def _ordered4(self):
'''(INTERNAL) Helper for C{_hartzell3d2} and C{_normalTo5}.
'''
def _order2(reverse, a, b, c):
'''(INTERNAL) Un-Order C{a}, C{b} and C{c}.
@return: 2-Tuple C{((a, b, c), ijk)} with C{a} >= C{b} >= C{c}
and C{ijk} a 3-tuple with the initial indices.
'''
i, j, k = 0, 1, 2 # range(3)
if a < b:
a, b, i, j = b, a, j, i
if a < c:
a, c, i, k = c, a, k, i
if b < c:
b, c, j, k = c, b, k, j
# reverse (k, j, i) since (a, b, c) is reversed-sorted
ijk = (k, j, i) if reverse else (None if i < j < k else (i, j, k))
return (a, b, c), ijk
abc, T = self._abc3, self
if not self.isOrdered:
abc, ijk = _order2(False, *abc)
if ijk:
_, kji = _order2(True, *ijk)
T = Triaxial_(*abc)
T._ijk, T._kji = ijk, kji
return abc + (T,)
def _order_ijk(self, reverse=False):
'''(INTERNAL) Get the un-/order indices.
'''
return self._kji if reverse else self._ijk
def _radialTo3(self, sbeta, cbeta, somega, comega):
'''(INTERNAL) I{Unordered} helper for C{.height4}.
'''
def _rphi(a, b, sphi, cphi):
# <https://WikiPedia.org/wiki/Ellipse#Polar_form_relative_to_focus>
# polar form: radius(phi) = a * b / hypot(a * sphi, b * cphi)
return (b / hypot(sphi, b / a * cphi)) if a > b else (
(a / hypot(cphi, a / b * sphi)) if a < b else a)
sa, ca = self._norm2(sbeta, cbeta)
sb, cb = self._norm2(somega, comega)
a, b, c = self._abc3
if a != b:
a = _rphi(a, b, sb, cb)
if a != c:
c = _rphi(a, c, sa, ca)
z, r = c * sa, c * ca
x, y = r * cb, r * sb
return x, y, z
def sideOf(self, x_xyz, y=None, z=None, eps=EPS4):
'''Is a cartesian above, below or on the surface of this triaxial?
@arg x_xyz: X component (C{scalar}) or a cartesian (C{Cartesian},
L{Ecef9Tuple}, L{Vector3d}, L{Vector3Tuple} or L{Vector4Tuple}).
@kwarg y: Y component (C{scalar}), required if B{C{x_xyz}} if C{scalar}.
@kwarg z: Z component (C{scalar}), required if B{C{x_xyz}} if C{scalar}.
@kwarg eps: Near surface tolerance(C{scalar}).
@return: C{INT0} if C{(B{x}, B{y}, B{z})} is near this triaxial's surface
within tolerance B{C{eps}}, otherwise a neg- or positive C{float}
if in- respectively outside this triaxial.
@see: Methods L{Triaxial.height4} and L{Triaxial.normal3d}.
'''
return _sideOf(_otherV3d_(x_xyz, y, z).xyz, self._abc3, eps=eps)
def _sqrt(self, x):
'''(INTERNAL) Helper, see L{pygeodesy.sqrt0}.
'''
if x < 0:
raise TriaxialError(Fmt.PAREN(sqrt=x))
return _0_0 if x < EPS02 else sqrt(x)
def toEllipsoid(self, name=NN):
'''Convert this triaxial to an L{Ellipsoid}, provided 2 axes match.
@return: An L{Ellipsoid} with north along this C{Z} axis if C{a == b},
this C{Y} axis if C{a == c} or this C{X} axis if C{b == c}.
@raise TriaxialError: This C{a != b}, C{b != c} and C{c != a}.
@see: Method L{Ellipsoid.toTriaxial}.
'''
a, b, c = self._abc3
if a == b:
b = c # N = c-Z
elif b == c: # N = a-X
a, b = b, a
elif a != c: # N = b-Y
t = _SPACE_(_a_, _NOTEQUAL_, _b_, _NOTEQUAL_, _c_)
raise TriaxialError(a=a, b=b, c=c, txt=t)
return Ellipsoid(a, b=b, name=name or self.name)
def toStr(self, prec=9, name=NN, **unused): # PYCHOK signature
'''Return this C{Triaxial} as a string.
@kwarg prec: Precision, number of decimal digits (0..9).
@kwarg name: Override name (C{str}) or C{None} to exclude
this triaxial's name.
@return: This C{Triaxial}'s attributes (C{str}).
'''
T = Triaxial_
t = T.a,
J = JacobiConformalSpherical
t += (J.ab, J.bc) if isinstance(self, J) else (T.b, T.c)
t += T.e2ab, T.e2bc, T.e2ac
J = JacobiConformal
if isinstance(self, J):
t += J.xyQ2,
t += T.volume, T.area
return self._instr(name, prec, props=t, area_p=self.area_p())
@Property_RO
def volume(self):
'''Get the volume (C{meter**3}), M{4 / 3 * PI * a * b * c}.
'''
a, b, c = self._abc3
return Meter3(volume=a * b * c * PI_3 * _4_0)
class Triaxial(Triaxial_):
'''I{Ordered} triaxial ellipsoid.
@see: L{Triaxial_} for more information.
'''
_unordered = False
def __init__(self, a_triaxial, b=None, c=None, name=NN):
'''New I{ordered} L{Triaxial}.
@arg a_triaxial: Largest semi-axis (C{scalar}, conventionally in C{meter})
or an other L{Triaxial} or L{Triaxial_} instance.
@kwarg b: Middle semi-axis (C{meter}, same units as B{C{a}}), required
if C{B{a_triaxial} is scalar}, ignored otherwise.
@kwarg c: Smallest semi-axis (C{meter}, same units as B{C{a}}), required
if C{B{a_triaxial} is scalar}, ignored otherwise.
@kwarg name: Optional name (C{str}).
@note: The semi-axes must be ordered as C{B{a} >= B{b} >= B{c} > 0} and
must be ellipsoidal, C{B{a} > B{c}}.
@raise TriaxialError: Semi-axes not ordered, spherical or invalid.
'''
Triaxial_.__init__(self, a_triaxial, b=b, c=c, name=name)
@Property_RO
def _a2b2_a2c2(self):
'''@see: Methods C{.forwardBetaOmega} and C{._k2_kp2}.
'''
return self._a2b2 / self._a2c2
@Property_RO
def area(self):
'''Get the surface area (C{meter} I{squared}).
@see: U{Surface area<https://WikiPedia.org/wiki/Ellipsoid#Surface_area>}.
'''
a, b, c = self._abc3
if a != b:
kp2, k2 = self._k2_kp2 # swapped!
aE = self._Elliptic(k2, _0_0, kp2, _1_0)
c2 = self._1e2ac # cos(phi)**2 = (c/a)**2
s = sqrt(self.e2ac) # sin(phi)**2 = 1 - c2
r = asin1(s) # phi = atan2(sqrt(c2), s)
b *= fsum1f_(aE.fE(r) * s, c / a * c / b,
aE.fF(r) * c2 / s)
a = Meter2(area=a * b * PI2)
else: # a == b > c
a = Ellipsoid(a, b=c).areax
return a
def _exyz3(self, u):
'''(INTERNAL) Helper for C{.forwardBetOmg}.
'''
if u > 0:
u2 = u**2
x = u * self._sqrt(_1_0 + self._a2c2 / u2)
y = u * self._sqrt(_1_0 + self._b2c2 / u2)
else:
x = y = u = _0_0
return x, y, u
def forwardBetaOmega(self, beta, omega, height=0, name=NN):
'''Convert I{ellipsoidal} lat- and longitude C{beta}, C{omega}
and height to cartesian.
@arg beta: Ellipsoidal latitude (C{radians} or L{Degrees}).
@arg omega: Ellipsoidal longitude (C{radians} or L{Degrees}).
@arg height: Height above or below the ellipsoid's surface (C{meter}, same
units as this triaxial's C{a}, C{b} and C{c} semi-axes).
@kwarg name: Optional name (C{str}).
@return: A L{Vector3Tuple}C{(x, y, z)}.
@see: Method L{Triaxial.reverseBetaOmega} and U{Expressions (23-25)<https://
www.Topo.Auth.GR/wp-content/uploads/sites/111/2021/12/09_Panou.pdf>}.
'''
if height:
h = Height_(height=height, low=-self.c, Error=TriaxialError)
x, y, z = self._exyz3(h + self.c)
else:
x, y, z = self._abc3 # == self._exyz3(self.c)
if z: # and x and y:
sa, ca = SinCos2(beta)
sb, cb = SinCos2(omega)
r = self._a2b2_a2c2
x *= cb * self._sqrt(ca**2 + r * sa**2)
y *= ca * sb
z *= sa * self._sqrt(_1_0 - r * cb**2)
return Vector3Tuple(x, y, z, name=name)
def forwardBetaOmega_(self, sbeta, cbeta, somega, comega, name=NN):
'''Convert I{ellipsoidal} lat- and longitude C{beta} and C{omega}
to cartesian coordinates I{on the triaxial's surface}.
@arg sbeta: Ellipsoidal latitude C{sin(beta)} (C{scalar}).
@arg cbeta: Ellipsoidal latitude C{cos(beta)} (C{scalar}).
@arg somega: Ellipsoidal longitude C{sin(omega)} (C{scalar}).
@arg comega: Ellipsoidal longitude C{cos(omega)} (C{scalar}).
@kwarg name: Optional name (C{str}).
@return: A L{Vector3Tuple}C{(x, y, z)} on the surface.
@raise TriaxialError: This triaxial is near-spherical.
@see: Method L{Triaxial.reverseBetaOmega}, U{Triaxial ellipsoid coordinate
system<https://WikiPedia.org/wiki/Geodesics_on_an_ellipsoid#
Triaxial_ellipsoid_coordinate_system>} and U{expressions (23-25)<https://
www.Topo.Auth.GR/wp-content/uploads/sites/111/2021/12/09_Panou.pdf>}.
'''
t = self._radialTo3(sbeta, cbeta, somega, comega)
return Vector3Tuple(*t, name=name)
def forwardCartesian(self, x_xyz, y=None, z=None, name=NN, **normal_eps):
'''Project a cartesian on this triaxial.
@arg x_xyz: X component (C{scalar}) or a cartesian (C{Cartesian},
L{Ecef9Tuple}, L{Vector3d}, L{Vector3Tuple} or L{Vector4Tuple}).
@kwarg y: Y component (C{scalar}), required if B{C{x_xyz}} if C{scalar}.
@kwarg z: Z component (C{scalar}), required if B{C{x_xyz}} if C{scalar}.
@kwarg name: Optional name (C{str}).
@kwarg normal_eps: Optional keyword arguments C{B{normal}=True} and
C{B{eps}=EPS}, see method L{Triaxial.height4}.
@see: Method L{Triaxial.height4} for further information and method
L{Triaxial.reverseCartesian} to reverse the projection.
'''
t = self.height4(x_xyz, y, z, **normal_eps)
_ = t.rename(name)
return t
def forwardLatLon(self, lat, lon, height=0, name=NN):
'''Convert I{geodetic} lat-, longitude and heigth to cartesian.
@arg lat: Geodetic latitude (C{degrees}).
@arg lon: Geodetic longitude (C{degrees}).
@arg height: Height above the ellipsoid (C{meter}, same units
as this triaxial's C{a}, C{b} and C{c} axes).
@kwarg name: Optional name (C{str}).
@return: A L{Vector3Tuple}C{(x, y, z)}.
@see: Method L{Triaxial.reverseLatLon} and U{Expressions (9-11)<https://
www.Topo.Auth.GR/wp-content/uploads/sites/111/2021/12/09_Panou.pdf>}.
'''
return self._forwardLatLon3(height, name, *sincos2d_(lat, lon))
def forwardLatLon_(self, slat, clat, slon, clon, height=0, name=NN):
'''Convert I{geodetic} lat-, longitude and heigth to cartesian.
@arg slat: Geodetic latitude C{sin(lat)} (C{scalar}).
@arg clat: Geodetic latitude C{cos(lat)} (C{scalar}).
@arg slon: Geodetic longitude C{sin(lon)} (C{scalar}).
@arg clon: Geodetic longitude C{cos(lon)} (C{scalar}).
@arg height: Height above the ellipsoid (C{meter}, same units
as this triaxial's axes C{a}, C{b} and C{c}).
@kwarg name: Optional name (C{str}).
@return: A L{Vector3Tuple}C{(x, y, z)}.
@see: Method L{Triaxial.reverseLatLon} and U{Expressions (9-11)<https://
www.Topo.Auth.GR/wp-content/uploads/sites/111/2021/12/09_Panou.pdf>}.
'''
sa, ca = self._norm2(slat, clat)
sb, cb = self._norm2(slon, clon)
return self._forwardLatLon3(height, name, sa, ca, sb, cb)
def _forwardLatLon3(self, h, name, sa, ca, sb, cb):
'''(INTERNAL) Helper for C{.forwardLatLon} and C{.forwardLatLon_}.
'''
ca_x_sb = ca * sb
# 1 - (1 - (c/a)**2) * sa**2 - (1 - (b/a)**2) * ca**2 * sb**2
t = fsumf_(_1_0, -self.e2ac * sa**2, -self.e2ab * ca_x_sb**2)
n = self.a / self._sqrt(t) # prime vertical
x = (h + n) * ca * cb
y = (h + n * self._1e2ab) * ca_x_sb
z = (h + n * self._1e2ac) * sa
return Vector3Tuple(x, y, z, name=name)
@Property_RO
def _k2_kp2(self):
'''(INTERNAL) Get C{k2} and C{kp2} for C{._xE}, C{._yE} and C{.area}.
'''
# k2 = a2b2 / a2c2 * c2_b2
# kp2 = b2c2 / a2c2 * a2_b2
# b2 = b**2
# xE = Elliptic(k2, -a2b2 / b2, kp2, a2_b2)
# yE = Elliptic(kp2, +b2c2 / b2, k2, c2_b2)
# aE = Elliptic(kp2, 0, k2, 1)
return (self._a2b2_a2c2 * self._c2_b2,
self._b2c2 / self._a2c2 * self._a2_b2)
def _radialTo3(self, sbeta, cbeta, somega, comega):
'''(INTERNAL) Convert I{ellipsoidal} lat- and longitude C{beta} and
C{omega} to cartesian coordinates I{on the triaxial's surface},
also I{ordered} helper for C{.height4}.
'''
sa, ca = self._norm2(sbeta, cbeta)
sb, cb = self._norm2(somega, comega)
b2_a2 = self._1e2ab # == (b/a)**2
c2_a2 = -self._1e2ac # == -(c/a)**2
a2c2_a2 = self. e2ac # (a**2 - c**2) / a**2 == 1 - (c/a)**2
x2 = Fsum(_1_0, -b2_a2 * sa**2, c2_a2 * ca**2).fover(a2c2_a2)
z2 = Fsum(c2_a2, sb**2, b2_a2 * cb**2).fover(a2c2_a2)
x, y, z = self._abc3
x *= cb * self._sqrt(x2)
y *= ca * sb
z *= sa * self._sqrt(z2)
return x, y, z
def reverseBetaOmega(self, x_xyz, y=None, z=None, name=NN):
'''Convert cartesian to I{ellipsoidal} lat- and longitude, C{beta}, C{omega}
and height.
@arg x_xyz: X component (C{scalar}) or a cartesian (C{Cartesian},
L{Ecef9Tuple}, L{Vector3d}, L{Vector3Tuple} or L{Vector4Tuple}).
@kwarg y: Y component (C{scalar}), required if B{C{x_xyz}} if C{scalar}.
@kwarg z: Z component (C{scalar}), required if B{C{x_xyz}} if C{scalar}.
@kwarg name: Optional name (C{str}).
@return: A L{BetaOmega3Tuple}C{(beta, omega, height)} with C{beta} and
C{omega} in L{Radians} and (radial) C{height} in C{meter}, same
units as this triaxial's axes.
@see: Methods L{Triaxial.forwardBetaOmega} and L{Triaxial.forwardBetaOmega_}
and U{Expressions (21-22)<https://www.Topo.Auth.GR/wp-content/uploads/
sites/111/2021/12/09_Panou.pdf>}.
'''
v = _otherV3d_(x_xyz, y, z)
a, b, h = self._reverseLatLon3(v, atan2, v, self.forwardBetaOmega_)
return BetaOmega3Tuple(Radians(beta=a), Radians(omega=b), h, name=name)
def reverseCartesian(self, x_xyz, y=None, z=None, h=0, normal=True, eps=_EPS2e4, name=NN):
'''"Unproject" a cartesian on to a cartesion I{off} this triaxial's surface.
@arg x_xyz: X component (C{scalar}) or a cartesian (C{Cartesian},
L{Ecef9Tuple}, L{Vector3d}, L{Vector3Tuple} or L{Vector4Tuple}).
@kwarg y: Y component (C{scalar}), required if B{C{x_xyz}} if C{scalar}.
@kwarg z: Z component (C{scalar}), required if B{C{x_xyz}} if C{scalar}.
@arg h: Height above or below this triaxial's surface (C{meter}, same units
as the axes).
@kwarg normal: If C{True} the height is C{normal} to the surface, otherwise
C{radially} to the center of this triaxial (C{bool}).
@kwarg eps: Tolerance for surface test (C{scalar}).
@kwarg name: Optional name (C{str}).
@return: A L{Vector3Tuple}C{(x, y, z)}.
@raise TrialError: Cartesian C{(x, y, z)} not on this triaxial's surface.
@see: Methods L{Triaxial.forwardCartesian} and L{Triaxial.height4}.
'''
v = _otherV3d_(x_xyz, y, z, name=name)
s = _sideOf(v.xyz, self._abc3, eps=eps)
if s: # PYCHOK no cover
t = _SPACE_((_inside_ if s < 0 else _outside_), self.toRepr())
raise TriaxialError(eps=eps, sideOf=s, x=v.x, y=v.y, z=v.z, txt=t)
if h:
if normal:
v = v.plus(self.normal3d(*v.xyz, length=h))
elif v.length > EPS0:
v = v.times(_1_0 + (h / v.length))
return v.xyz # Vector3Tuple
def reverseLatLon(self, x_xyz, y=None, z=None, name=NN):
'''Convert cartesian to I{geodetic} lat-, longitude and height.
@arg x_xyz: X component (C{scalar}) or a cartesian (C{Cartesian},
L{Ecef9Tuple}, L{Vector3d}, L{Vector3Tuple} or L{Vector4Tuple}).
@kwarg y: Y component (C{scalar}), required if B{C{x_xyz}} if C{scalar}.
@kwarg z: Z component (C{scalar}), required if B{C{x_xyz}} if C{scalar}.
@kwarg name: Optional name (C{str}).
@return: A L{LatLon3Tuple}C{(lat, lon, height)} with C{lat} and C{lon}
in C{degrees} and (radial) C{height} in C{meter}, same units
as this triaxial's axes.
@see: Methods L{Triaxial.forwardLatLon} and L{Triaxial.forwardLatLon_}
and U{Expressions (4-5)<https://www.Topo.Auth.GR/wp-content/uploads/
sites/111/2021/12/09_Panou.pdf>}.
'''
v = _otherV3d_(x_xyz, y, z)
s = v.times_(self._1e2ac, # == 1 - e_sub_x**2
self._1e2bc, # == 1 - e_sub_y**2
_1_0)
t = self._reverseLatLon3(s, atan2d, v, self.forwardLatLon_)
return LatLon3Tuple(*t, name=name)
def _reverseLatLon3(self, s, atan2_, v, forward_):
'''(INTERNAL) Helper for C{.reverseBetOmg} and C{.reverseLatLon}.
'''
x, y, z = s.xyz
d = hypot( x, y)
a = atan2_(z, d)
b = atan2_(y, x)
h = v.minus_(*forward_(z, d, y, x)).length
return a, b, h
class JacobiConformal(Triaxial):
'''This is a conformal projection of a triaxial ellipsoid to a plane in which the
C{X} and C{Y} grid lines are straight.
Ellipsoidal coordinates I{beta} and I{omega} are converted to Jacobi Conformal
I{y} respectively I{x} separately. Jacobi's coordinates have been multiplied
by C{sqrt(B{a}**2 - B{c}**2) / (2 * B{b})} so that the customary results are
returned in the case of an ellipsoid of revolution.
Copyright (C) U{Charles Karney<mailto:Karney@Alum.MIT.edu>} (2014-2023) and
licensed under the MIT/X11 License.
@note: This constructor can I{not be used to specify a sphere}, see alternate
L{JacobiConformalSpherical}.
@see: L{Triaxial}, C++ class U{JacobiConformal<https://GeographicLib.SourceForge.io/
C++/doc/classGeographicLib_1_1JacobiConformal.html#details>}, U{Jacobi's conformal
projection<https://GeographicLib.SourceForge.io/C++/doc/jacobi.html>} and Jacobi,
C. G. J. I{U{Vorlesungen über Dynamik<https://Books.Google.com/books?
id=ryEOAAAAQAAJ&pg=PA212>}}, page 212ff.
'''
@Property_RO
def _xE(self):
'''(INTERNAL) Get the x-elliptic function.
'''
k2, kp2 = self._k2_kp2
# -a2b2 / b2 == (b2 - a2) / b2 == 1 - a2 / b2 == 1 - a2_b2
return self._Elliptic(k2, _1_0 - self._a2_b2, kp2, self._a2_b2)
def xR(self, omega):
'''Compute a Jacobi Conformal C{x} projection.
@arg omega: Ellipsoidal longitude (C{radians} or L{Degrees}).
@return: The C{x} projection (L{Radians}).
'''
return self.xR_(*SinCos2(omega))
def xR_(self, somega, comega):
'''Compute a Jacobi Conformal C{x} projection.
@arg somega: Ellipsoidal longitude C{sin(omega)} (C{scalar}).
@arg comega: Ellipsoidal longitude C{cos(omega)} (C{scalar}).
@return: The C{x} projection (L{Radians}).
'''
s, c = self._norm2(somega, comega, self.a)
return Radians(x=self._xE.fPi(s, c) * self._a2_b2)
@Property_RO
def xyQ2(self):
'''Get the Jacobi Conformal quadrant size (L{Jacobi2Tuple}C{(x, y)}).
'''
return Jacobi2Tuple(Radians(x=self._a2_b2 * self._xE.cPi),
Radians(y=self._c2_b2 * self._yE.cPi),
name=JacobiConformal.xyQ2.name)
def xyR2(self, beta, omega, name=NN):
'''Compute a Jacobi Conformal C{x} and C{y} projection.
@arg beta: Ellipsoidal latitude (C{radians} or L{Degrees}).
@arg omega: Ellipsoidal longitude (C{radians} or L{Degrees}).
@kwarg name: Optional name (C{str}).
@return: A L{Jacobi2Tuple}C{(x, y)}.
'''
return self.xyR2_(*(SinCos2(beta) + SinCos2(omega)),
name=name or self.xyR2.__name__)
def xyR2_(self, sbeta, cbeta, somega, comega, name=NN):
'''Compute a Jacobi Conformal C{x} and C{y} projection.
@arg sbeta: Ellipsoidal latitude C{sin(beta)} (C{scalar}).
@arg cbeta: Ellipsoidal latitude C{cos(beta)} (C{scalar}).
@arg somega: Ellipsoidal longitude C{sin(omega)} (C{scalar}).
@arg comega: Ellipsoidal longitude C{cos(omega)} (C{scalar}).
@kwarg name: Optional name (C{str}).
@return: A L{Jacobi2Tuple}C{(x, y)}.
'''
return Jacobi2Tuple(self.xR_(somega, comega),
self.yR_(sbeta, cbeta),
name=name or self.xyR2_.__name__)
@Property_RO
def _yE(self):
'''(INTERNAL) Get the x-elliptic function.
'''
kp2, k2 = self._k2_kp2 # swapped!
# b2c2 / b2 == (b2 - c2) / b2 == 1 - c2 / b2 == e2bc
return self._Elliptic(k2, self.e2bc, kp2, self._c2_b2)
def yR(self, beta):
'''Compute a Jacobi Conformal C{y} projection.
@arg beta: Ellipsoidal latitude (C{radians} or L{Degrees}).
@return: The C{y} projection (L{Radians}).
'''
return self.yR_(*SinCos2(beta))
def yR_(self, sbeta, cbeta):
'''Compute a Jacobi Conformal C{y} projection.
@arg sbeta: Ellipsoidal latitude C{sin(beta)} (C{scalar}).
@arg cbeta: Ellipsoidal latitude C{cos(beta)} (C{scalar}).
@return: The C{y} projection (L{Radians}).
'''
s, c = self._norm2(sbeta, cbeta, self.c)
return Radians(y=self._yE.fPi(s, c) * self._c2_b2)
class JacobiConformalSpherical(JacobiConformal):
'''An alternate, I{spherical} L{JacobiConformal} projection.
@see: L{JacobiConformal} for other and more details.
'''
_ab = _bc = 0
def __init__(self, radius_triaxial, ab=0, bc=0, name=NN):
'''New L{JacobiConformalSpherical}.
@arg radius_triaxial: Radius (C{scalar}, conventionally in
C{meter}) or an other L{JacobiConformalSpherical},
L{JacobiConformal} or ordered L{Triaxial}.
@kwarg ab: Relative magnitude of C{B{a} - B{b}} (C{meter},
same units as C{scalar B{radius}}.
@kwarg bc: Relative magnitude of C{B{b} - B{c}} (C{meter},
same units as C{scalar B{radius}}.
@kwarg name: Optional name (C{str}).
@raise TriaxialError: Invalid B{C{radius_triaxial}}, negative
B{C{ab}}, negative B{C{bc}} or C{(B{ab}
+ B{bc})} not positive.
@note: If B{C{radius_triaxial}} is a L{JacobiConformalSpherical}
and if B{C{ab}} and B{C{bc}} are both zero or C{None},
the B{C{radius_triaxial}}'s C{ab}, C{bc}, C{a}, C{b}
and C{c} are copied.
'''
try:
r, j = radius_triaxial, False
if isinstance(r, Triaxial): # ordered only
if (not (ab or bc)) and isinstance(r, JacobiConformalSpherical):
j = True
t = r._abc3
else:
t = (Radius(radius=r),) * 3
self._ab = r.ab if j else Scalar_(ab=ab) # low=0
self._bc = r.bc if j else Scalar_(bc=bc) # low=0
if (self.ab + self.bc) <= 0:
raise ValueError('(ab + bc)')
a, _, c = self._abc3 = t
if not (a >= c and isfinite(self._a2b2)
and isfinite(self._a2c2)):
raise ValueError(_not_(_finite_))
except (TypeError, ValueError) as x:
raise TriaxialError(radius_triaxial=r, ab=ab, bc=bc, cause=x)
if name:
self.name = name
@Property_RO
def ab(self):
'''Get relative magnitude C{ab} (C{meter}, same units as B{C{a}}).
'''
return self._ab
@Property_RO
def _a2b2(self):
'''(INTERNAL) Get C{a**2 - b**2} == ab * (a + b).
'''
a, b, _ = self._abc3
return self.ab * (a + b)
@Property_RO
def _a2c2(self):
'''(INTERNAL) Get C{a**2 - c**2} == a2b2 + b2c2.
'''
return self._a2b2 + self._b2c2
@Property_RO
def bc(self):
'''Get relative magnitude C{bc} (C{meter}, same units as B{C{a}}).
'''
return self._bc
@Property_RO
def _b2c2(self):
'''(INTERNAL) Get C{b**2 - c**2} == bc * (b + c).
'''
_, b, c = self._abc3
return self.bc * (b + c)
@Property_RO
def radius(self):
'''Get radius (C{meter}, conventionally).
'''
return self.a
class TriaxialError(_ValueError):
'''Raised for L{Triaxial} issues.
'''
pass # ...
class Triaxials(_NamedEnum):
'''(INTERNAL) L{Triaxial} registry, I{must} be a sub-class
to accommodate the L{_LazyNamedEnumItem} properties.
'''
def _Lazy(self, *abc, **name):
'''(INTERNAL) Instantiate the C{Triaxial}.
'''
a, b, c = map(km2m, abc)
return Triaxial(a, b, c, **name)
Triaxials = Triaxials(Triaxial, Triaxial_) # PYCHOK singleton
'''Some pre-defined L{Triaxial}s, all I{lazily} instantiated.'''
# <https://ArxIV.org/pdf/1909.06452.pdf> Table 1 Semi-axes in Km
# <https://www.JPS.NASA.gov/education/images/pdf/ss-moons.pdf>
# <https://link.Springer.com/article/10.1007/s00190-022-01650-9>
_E = _WGS84.ellipsoid
Triaxials._assert( # a (Km) b (Km) c (Km) planet
Amalthea = _lazy('Amalthea', 125.0, 73.0, 64), # Jupiter
Ariel = _lazy('Ariel', 581.1, 577.9, 577.7), # Uranus
Earth = _lazy('Earth', 6378.173435, 6378.1039, 6356.7544),
Enceladus = _lazy('Enceladus', 256.6, 251.4, 248.3), # Saturn
Europa = _lazy('Europa', 1564.13, 1561.23, 1560.93), # Jupiter
Io = _lazy('Io', 1829.4, 1819.3, 1815.7), # Jupiter
Mars = _lazy('Mars', 3394.6, 3393.3, 3376.3),
Mimas = _lazy('Mimas', 207.4, 196.8, 190.6), # Saturn
Miranda = _lazy('Miranda', 240.4, 234.2, 232.9), # Uranus
Moon = _lazy('Moon', 1735.55, 1735.324, 1734.898), # Earth
Tethys = _lazy('Tethys', 535.6, 528.2, 525.8), # Saturn
WGS84_35 = _lazy('WGS84_35', *map1(m2km, _E.a + 35, _E.a - 35, _E.b)))
del _E
def _getitems(items, *indices):
'''(INTERNAL) Get the C{items} at the given I{indices}.
@return: C{Type(items[i] for i in indices)} with
C{Type = type(items)}, any C{type} having
the special method C{__getitem__}.
'''
return type(items)(map(items.__getitem__, indices))
def _hartzell3d2(pov, los, Tun): # MCCABE 13 in .ellipsoidal.hartzell4, .formy.hartzell
'''(INTERNAL) Hartzell's "Satellite Line-of-Sight Intersection ...",
formula for I{un-/ordered} triaxials.
'''
a, b, c, T = Tun._ordered4
a2 = a**2 # largest, factored out
b2, p2 = (b**2, T._1e2ab) if b != a else (a2, _1_0)
c2, q2 = (c**2, T._1e2ac) if c != a else (a2, _1_0)
p3 = T._order3d(_otherV3d(pov=pov))
u3 = T._order3d(_otherV3d(los=los)) if los else p3.negate()
u3 = u3.unit() # unit vector, opposing signs
x2, y2, z2 = p3.x2y2z2 # p3.times_(p3).xyz
ux, vy, wz = u3.times_(p3).xyz
u2, v2, w2 = u3.x2y2z2 # u3.times_(u3).xyz
t = (p2 * c2), c2, b2
m = fdot(t, u2, v2, w2) # a2 factored out
if m < EPS0: # zero or near-null LOS vector
raise _ValueError(_near_(_null_))
r = fsumf_(b2 * w2, c2 * v2, -v2 * z2, vy * wz * 2,
-w2 * y2, b2 * u2 * q2, -u2 * z2 * p2, ux * wz * 2 * p2,
-w2 * x2 * p2, -u2 * y2 * q2, -v2 * x2 * q2, ux * vy * 2 * q2)
if r > 0: # a2 factored out
r = sqrt(r) * b * c # == a * a * b * c / a2
elif r < 0: # LOS pointing away from or missing the triaxial
raise _ValueError(_opposite_ if max(ux, vy, wz) > 0 else _outside_)
d = Fdot(t, ux, vy, wz).fadd_(r).fover(m) # -r for antipode, a2 factored out
if d > 0: # POV inside or LOS missing, outside the triaxial
s = fsumf_(_1_0, x2 / a2, y2 / b2, z2 / c2, _N_2_0) # like _sideOf
raise _ValueError(_outside_ if s > 0 else _inside_)
elif fsum1f_(x2, y2, z2) < d**2: # d past triaxial's center
raise _ValueError(_too_(_distant_))
v = p3.minus(u3.times(d)) # Vector3d
h = p3.minus(v).length # distance to triaxial
return T._order3d(v, reverse=True), h
def hartzell4(pov, los=None, tri_biax=_WGS84, name=NN):
'''Compute the intersection of a tri-/biaxial ellipsoid and a Line-Of-Sight
from a Point-Of-View outside.
@arg pov: Point-Of-View outside the tri-/biaxial (C{Cartesian}, L{Ecef9Tuple}
or L{Vector3d}).
@kwarg los: Line-Of-Sight, I{direction} to the tri-/biaxial (L{Vector3d}) or
C{None} to point to the tri-/biaxial's center.
@kwarg tri_biax: A triaxial (L{Triaxial}, L{Triaxial_}, L{JacobiConformal} or
L{JacobiConformalSpherical}) or biaxial ellipsoid (L{Datum},
L{Ellipsoid}, L{Ellipsoid2}, L{a_f2Tuple} or C{scalar} radius,
conventionally in C{meter}).
@kwarg name: Optional name (C{str}).
@return: L{Vector4Tuple}C{(x, y, z, h)} on the tri-/biaxial's surface, with
C{h} the distance from B{C{pov}} to C{(x, y, z)} along the B{C{los}},
all in C{meter}, conventionally.
@raise TriaxialError: Null B{C{pov}} or B{C{los}}, or B{C{pov}} is inside the
tri-/biaxial or B{C{los}} points outside the tri-/biaxial
or points in an opposite direction.
@raise TypeError: Invalid B{C{pov}} or B{C{los}}.
@see: Function L{pygeodesy.hartzell}, L{pygeodesy.tyr3d} for B{C{los}} and
U{I{Satellite Line-of-Sight Intersection with Earth}<https://StephenHartzell.
Medium.com/satellite-line-of-sight-intersection-with-earth-d786b4a6a9b6>}.
'''
if isinstance(tri_biax, Triaxial_):
T = tri_biax
else:
D = tri_biax if isinstance(tri_biax, Datum) else \
_spherical_datum(tri_biax, name=hartzell4.__name__)
T = D.ellipsoid._triaxial
try:
v, h = _hartzell3d2(pov, los, T)
except Exception as x:
raise TriaxialError(pov=pov, los=los, tri_biax=tri_biax, cause=x)
return Vector4Tuple(v.x, v.y, v.z, h, name=name or hartzell4.__name__)
def _hypot21(x, y, z=0):
'''(INTERNAL) Compute M{x**2 + y**2 + z**2 - 1} with C{max(fabs(x),
fabs(y), fabs(z))} rarely greater than 1.0.
'''
return fsumf_(_1_0, x**2, y**2, (z**2 if z else _0_0), _N_2_0)
def _normalTo4(x, y, a, b, eps=EPS):
'''(INTERNAL) Nearest point on and distance to a 2-D ellipse, I{unordered}.
@see: Function C{pygeodesy.ellipsoids._normalTo3} and I{Eberly}'s U{Distance
from a Point to ... an Ellipsoid ...<https://www.GeometricTools.com/
Documentation/DistancePointEllipseEllipsoid.pdf>}.
'''
if a < b:
b, a, d, i = _normalTo4(y, x, b, a, eps=eps)
return a, b, d, i
if not (b > 0 and isfinite(a)):
raise _ValueError(a=a, b=b)
i = None
if y:
if x:
u = fabs(x / a)
v = fabs(y / b)
g = _hypot21(u, v)
if g:
r = (a / b)**2
t, i = _rootXd(r, 0, u, 0, v, g, eps)
a = x / (t / r + _1_0)
b = y / (t + _1_0)
d = hypot(x - a, y - b)
else: # on the ellipse
a, b, d = x, y, _0_0
else: # x == 0
if y < 0:
b = -b
a, d = x, fabs(y - b)
else: # y == 0
n = a * x
d = (a + b) * (a - b)
if d > fabs(n): # PYCHOK no cover
r = n / d
a *= r
b *= sqrt(_1_0 - r**2)
d = hypot(x - a, b)
else:
if x < 0:
a = -a
b, d = y, fabs(x - a)
return a, b, d, i
def _normalTo5(x, y, z, Tun, eps=EPS): # MCCABE 19
'''(INTERNAL) Nearest point on and distance to an I{un-/ordered} triaxial.
@see: I{Eberly}'s U{Distance from a Point to ... an Ellipsoid ...<https://
www.GeometricTools.com/Documentation/DistancePointEllipseEllipsoid.pdf>}.
'''
a, b, c, T = Tun._ordered4
if Tun is not T: # T is ordered, Tun isn't
t = T._order3(x, y, z) + (T,)
a, b, c, d, i = _normalTo5(*t, eps=eps)
return T._order3(a, b, c, reverse=True) + (d, i)
if not (isfinite(a) and c > 0):
raise _ValueError(a=a, b=b, c=c)
if eps > 0:
val = max(eps * 1e8, EPS)
else: # no validation
val, eps = 0, -eps
i = None
if z:
if y:
if x:
u = fabs(x / a)
v = fabs(y / b)
w = fabs(z / c)
g = _hypot21(u, v, w)
if g:
r = T._1e2ac # (c / a)**2
s = T._1e2bc # (c / b)**2
t, i = _rootXd(_1_0 / r, _1_0 / s, u, v, w, g, eps)
a = x / (t * r + _1_0)
b = y / (t * s + _1_0)
c = z / (t + _1_0)
d = hypot_(x - a, y - b, z - c)
else: # on the ellipsoid
a, b, c, d = x, y, z, _0_0
else: # x == 0
a = x # 0
b, c, d, i = _normalTo4(y, z, b, c, eps=eps)
elif x: # y == 0
b = y # 0
a, c, d, i = _normalTo4(x, z, a, c, eps=eps)
else: # x == y == 0
if z < 0:
c = -c
a, b, d = x, y, fabs(z - c)
else: # z == 0
t = False
n = a * x
d = T._a2c2 # (a + c) * (a - c)
if d > fabs(n):
u = n / d
n = b * y
d = T._b2c2 # (b + c) * (b - c)
if d > fabs(n):
v = n / d
n = _hypot21(u, v)
if n < 0:
a *= u
b *= v
c *= sqrt(-n)
d = hypot_(x - a, y - b, c)
t = True
if not t:
c = z # 0
a, b, d, i = _normalTo4(x, y, a, b, eps=eps)
if val > 0: # validate
e = T.sideOf(a, b, c, eps=val)
if e: # not near the ellipsoid's surface
raise _ValueError(a=a, b=b, c=c, d=d,
sideOf=e, eps=val)
if d: # angle of delta and normal vector
m = Vector3d(x, y, z).minus_(a, b, c)
if m.euclid > val:
m = m.unit()
n = T.normal3d(a, b, c)
e = n.dot(m) # n.negate().dot(m)
if not isnear1(fabs(e), eps1=val):
raise _ValueError(n=n, m=m,
dot=e, eps=val)
return a, b, c, d, i
def _otherV3d_(x_xyz, y, z, **name):
'''(INTERNAL) Get a Vector3d from C{x_xyz}, C{y} and C{z}.
'''
return Vector3d(x_xyz, y, z, **name) if isscalar(x_xyz) else \
_otherV3d(x_xyz=x_xyz)
def _rootXd(r, s, u, v, w, g, eps):
'''(INTERNAL) Robust 2d- or 3d-root finder:
2d- if C{s == v == 0} otherwise 3d-root.
'''
_1, __2 = _1_0, _0_5
_a, _h2 = fabs, _hypot21
u *= r
v *= s # 0 for 2d-root
t0 = w - _1
t1 = _0_0 if g < 0 else _h2(u, w, v)
for i in range(1, _TRIPS):
e = _a(t0 - t1)
if e < eps:
break
t = (t0 + t1) * __2
if t in (t0, t1):
break
g = _h2(u / (t + r), w / (t + _1),
(v / (t + s)) if v else 0)
if g > 0:
t0 = t
elif g < 0:
t1 = t
else:
break
else: # PYCHOK no cover
t = Fmt.no_convergence(e, eps)
raise _ValueError(t, txt=_rootXd.__name__)
return t, i
def _sideOf(xyz, abc, eps=EPS): # in .formy
'''(INTERNAL) Helper for C{_hartzell3d2}, M{.sideOf} and M{.reverseCartesian}.
@return: M{sum((x / a)**2 for x, a in zip(xyz, abc)) - 1} or C{INT0},
'''
s = _hypot21(*((x / a) for x, a in _zip(xyz, abc) if a)) # strict=True
return s if fabs(s) > eps else INT0
if __name__ == '__main__':
from pygeodesy import printf
from pygeodesy.interns import _COMMA_, _NL_, _NLATvar_
# __doc__ of this file, force all into registery
t = [NN] + Triaxials.toRepr(all=True, asorted=True).split(_NL_)
printf(_NLATvar_.join(i.strip(_COMMA_) for i in t))
# **) MIT License
#
# Copyright (C) 2022-2023 -- mrJean1 at Gmail -- All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
|
66098cc7f759b1235a0678f0b8eb956f5be9c4bb
|
1742b6719b988e5519373002305e31d28b8bd691
|
/sdk/python/pulumi_aws/imagebuilder/get_distribution_configurations.py
|
ab2048968af4de2751b742084c4e18cabd480c53
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-aws
|
4f7fdb4a816c5ea357cff2c2e3b613c006e49f1a
|
42b0a0abdf6c14da248da22f8c4530af06e67b98
|
refs/heads/master
| 2023-08-03T23:08:34.520280
| 2023-08-01T18:09:58
| 2023-08-01T18:09:58
| 97,484,940
| 384
| 171
|
Apache-2.0
| 2023-09-14T14:48:40
| 2017-07-17T14:20:33
|
Java
|
UTF-8
|
Python
| false
| false
| 4,954
|
py
|
get_distribution_configurations.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = [
'GetDistributionConfigurationsResult',
'AwaitableGetDistributionConfigurationsResult',
'get_distribution_configurations',
'get_distribution_configurations_output',
]
@pulumi.output_type
class GetDistributionConfigurationsResult:
"""
A collection of values returned by getDistributionConfigurations.
"""
def __init__(__self__, arns=None, filters=None, id=None, names=None):
if arns and not isinstance(arns, list):
raise TypeError("Expected argument 'arns' to be a list")
pulumi.set(__self__, "arns", arns)
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
pulumi.set(__self__, "filters", filters)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if names and not isinstance(names, list):
raise TypeError("Expected argument 'names' to be a list")
pulumi.set(__self__, "names", names)
@property
@pulumi.getter
def arns(self) -> Sequence[str]:
"""
Set of ARNs of the matched Image Builder Distribution Configurations.
"""
return pulumi.get(self, "arns")
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.GetDistributionConfigurationsFilterResult']]:
return pulumi.get(self, "filters")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def names(self) -> Sequence[str]:
"""
Set of names of the matched Image Builder Distribution Configurations.
"""
return pulumi.get(self, "names")
class AwaitableGetDistributionConfigurationsResult(GetDistributionConfigurationsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDistributionConfigurationsResult(
arns=self.arns,
filters=self.filters,
id=self.id,
names=self.names)
def get_distribution_configurations(filters: Optional[Sequence[pulumi.InputType['GetDistributionConfigurationsFilterArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDistributionConfigurationsResult:
"""
Use this data source to get the ARNs and names of Image Builder Distribution Configurations matching the specified criteria.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.imagebuilder.get_distribution_configurations(filters=[aws.imagebuilder.GetDistributionConfigurationsFilterArgs(
name="name",
values=["example"],
)])
```
:param Sequence[pulumi.InputType['GetDistributionConfigurationsFilterArgs']] filters: Configuration block(s) for filtering. Detailed below.
"""
__args__ = dict()
__args__['filters'] = filters
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:imagebuilder/getDistributionConfigurations:getDistributionConfigurations', __args__, opts=opts, typ=GetDistributionConfigurationsResult).value
return AwaitableGetDistributionConfigurationsResult(
arns=pulumi.get(__ret__, 'arns'),
filters=pulumi.get(__ret__, 'filters'),
id=pulumi.get(__ret__, 'id'),
names=pulumi.get(__ret__, 'names'))
@_utilities.lift_output_func(get_distribution_configurations)
def get_distribution_configurations_output(filters: Optional[pulumi.Input[Optional[Sequence[pulumi.InputType['GetDistributionConfigurationsFilterArgs']]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDistributionConfigurationsResult]:
"""
Use this data source to get the ARNs and names of Image Builder Distribution Configurations matching the specified criteria.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.imagebuilder.get_distribution_configurations(filters=[aws.imagebuilder.GetDistributionConfigurationsFilterArgs(
name="name",
values=["example"],
)])
```
:param Sequence[pulumi.InputType['GetDistributionConfigurationsFilterArgs']] filters: Configuration block(s) for filtering. Detailed below.
"""
...
|
22d2ef24553a3df2add79bae3a38965e31dc8d63
|
df91e6d46cd520039f9366d601ae2be82cbb9a98
|
/tests/test_conv_gate.py
|
9b4c8531236d92eb4a7c0a8c14713d29cf8611dc
|
[] |
no_license
|
AlexxIT/XiaomiGateway3
|
aeb63a4bccb7f206140b65836a45fc7216e0bf79
|
ff8a4a6d4f8c08803fb4c84e97d21f7bcc15eda3
|
refs/heads/master
| 2023-08-29T23:41:34.257447
| 2023-08-26T16:37:16
| 2023-08-26T16:37:16
| 291,484,700
| 2,051
| 374
| null | 2023-09-07T14:03:41
| 2020-08-30T14:19:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,885
|
py
|
test_conv_gate.py
|
from custom_components.xiaomi_gateway3.core.converters import GATEWAY
from custom_components.xiaomi_gateway3.core.device import XDevice
DID = "123456789"
MAC = "112233aabbcc"
def test_gateway():
device = XDevice(GATEWAY, "lumi.gateway.mgl03", DID, MAC)
assert device.info.name == "Xiaomi Multimode Gateway"
device.setup_converters()
p = device.decode_lumi([{"res_name": "8.0.2109", "value": 60}])
assert p == {"pair": True}
p = device.encode({"pair": False})
assert p == {"params": [{"res_name": "8.0.2109", "value": 0}]}
# old zigbee pairing
p = device.decode_lumi(
[
{
"res_name": "8.0.2111",
"value": {
"code": 0,
"install_code": "",
"mac": "",
"message": "no data",
},
"error_code": 0,
}
]
)
assert p
# _sync.zigbee3_get_install_code error
p = device.decode_lumi(
[
{
"res_name": "8.0.2111",
"value": {
"code": -4001002,
"install_code": "",
"mac": "",
"message": "no data",
},
"error_code": 0,
}
]
)
assert p
# zigbee3 pairing
p = device.decode_lumi(
[
{
"res_name": "8.0.2111",
"value": {"code": 0, "install_code": "<36 hex>", "mac": "<16 hex>"},
"error_code": 0,
}
]
)
assert p
p = device.decode_lumi(
[{"res_name": "8.0.2155", "value": '{"cloud_link":1,"tz_updated":"GMT3"}'}]
)
assert p == {"cloud_link": True}
p = device.decode_lumi([{"res_name": "8.0.2155", "value": 1}])
assert p == {"cloud_link": True}
|
89d12880bd52ee4676ea5c24bc81afaf8091e211
|
8988a329c571cb04a5d97c691d0cd8bc4caf81d4
|
/tests/test_trackingcomposite.py
|
a8564e08b01faa850c1959cb9b94bbf6c7b1eb13
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
dwavesystems/dimod
|
85329cbee86bdf5a73de05fa25884c877ea53002
|
8433f221a1e79101e1db0d80968ab5a2f59b865d
|
refs/heads/main
| 2023-08-29T08:37:24.565927
| 2023-08-17T17:14:58
| 2023-08-17T17:14:58
| 100,658,303
| 118
| 93
|
Apache-2.0
| 2023-09-13T18:15:37
| 2017-08-18T01:02:17
|
Python
|
UTF-8
|
Python
| false
| false
| 3,767
|
py
|
test_trackingcomposite.py
|
# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import dimod
class TestConstruction(unittest.TestCase):
def test_construction(self):
sampler = dimod.TrackingComposite(dimod.ExactSolver())
dimod.testing.assert_sampler_api(sampler)
dimod.testing.assert_composite_api(sampler)
self.assertEqual(sampler.inputs, [])
self.assertEqual(sampler.outputs, [])
@dimod.testing.load_sampler_bqm_tests(dimod.TrackingComposite(dimod.ExactSolver()))
class TestSample(unittest.TestCase):
def test_clear(self):
sampler = dimod.TrackingComposite(dimod.ExactSolver())
h0 = {'a': -1}
J0 = {('a', 'b'): -1}
ss0 = sampler.sample_ising(h0, J0)
h1 = {'b': -1}
J1 = {('b', 'c'): 2}
ss1 = sampler.sample_ising(h1, J1)
sampler.clear()
self.assertEqual(sampler.inputs, [])
self.assertEqual(sampler.outputs, [])
def test_missing_inputs(self):
sampler = dimod.TrackingComposite(dimod.ExactSolver())
with self.assertRaises(ValueError):
sampler.input
with self.assertRaises(ValueError):
sampler.output
def test_sample(self):
sampler = dimod.TrackingComposite(dimod.ExactSolver())
bqm = dimod.BinaryQuadraticModel.from_ising({'a': -1}, {})
ss = sampler.sample(bqm)
self.assertEqual(sampler.input, dict(bqm=bqm))
self.assertEqual(sampler.output, ss)
def test_sample_ising(self):
sampler = dimod.TrackingComposite(dimod.ExactSolver())
h0 = {'a': -1}
J0 = {('a', 'b'): -1}
ss0 = sampler.sample_ising(h0, J0)
h1 = {'b': -1}
J1 = {('b', 'c'): 2}
ss1 = sampler.sample_ising(h1, J1)
self.assertEqual(sampler.input, dict(h=h1, J=J1))
self.assertEqual(sampler.output, ss1)
self.assertEqual(sampler.inputs, [dict(h=h0, J=J0), dict(h=h1, J=J1)])
self.assertEqual(sampler.outputs, [ss0, ss1])
def test_sample_ising_copy_true(self):
sampler = dimod.TrackingComposite(dimod.ExactSolver(), copy=True)
h0 = {'a': -1}
J0 = {('a', 'b'): -1}
ss0 = sampler.sample_ising(h0, J0)
self.assertIsNot(sampler.input['h'], h0)
self.assertIsNot(sampler.output, ss0)
def test_sample_ising_copy_false(self):
sampler = dimod.TrackingComposite(dimod.ExactSolver(), copy=False)
h0 = {'a': -1}
J0 = {('a', 'b'): -1}
ss0 = sampler.sample_ising(h0, J0)
self.assertIs(sampler.input['h'], h0)
self.assertIs(sampler.output, ss0)
def test_sample_ising_kwargs(self):
sampler = dimod.TrackingComposite(dimod.RandomSampler())
h = {'a': -1}
J = {('a', 'b'): -1}
ss = sampler.sample_ising(h, J, num_reads=5)
self.assertEqual(sampler.input, dict(h=h, J=J, num_reads=5))
self.assertEqual(sampler.output, ss)
def test_sample_qubo(self):
sampler = dimod.TrackingComposite(dimod.ExactSolver())
Q = {('a', 'b'): -1}
ss = sampler.sample_qubo(Q)
self.assertEqual(sampler.input, dict(Q=Q))
self.assertEqual(sampler.output, ss)
|
f1be361afdb7125a9a30aff8c6203be6dd98dd5d
|
c710fa2a979b19d26a45c0821b1c4a46134ba0ae
|
/baton/autodiscover/admin.py
|
af64e3f9d6660987911f3f46a0d9507aa1a12664
|
[
"MIT"
] |
permissive
|
otto-torino/django-baton
|
4e1c6fb321e613ac8af9dba496e508fa6a194cb4
|
a20b2f84c41639c6626357c1486fa86f9ef935c1
|
refs/heads/master
| 2023-08-26T20:27:31.907955
| 2023-08-11T09:45:30
| 2023-08-11T09:45:30
| 81,565,159
| 827
| 103
|
MIT
| 2023-08-28T17:26:42
| 2017-02-10T12:54:24
|
Python
|
UTF-8
|
Python
| false
| false
| 938
|
py
|
admin.py
|
from django.contrib import admin
from ..config import get_config
class BatonAdminSite(admin.AdminSite):
site_header = get_config('SITE_HEADER')
site_title = get_config('SITE_TITLE')
index_title = get_config('INDEX_TITLE')
index_template = 'baton/index.html' if get_config(
'ANALYTICS') else 'admin/index.html'
enable_nav_sidebar = False
def __init__(self, *args, **kwargs):
""" Registers all apps with BatonAdminSite """
super(BatonAdminSite, self).__init__(*args, **kwargs)
# copy registered actions
self._actions = admin.site._actions
self._registry.update(admin.site._registry)
for model in admin.site._registry:
self.unregister([model])
self.register([model], type(admin.site._registry[model]))
site = BatonAdminSite()
# override otherwise in admindocs the default admin site is used showing
# the navbar
admin.site = site
|
3c83656a8db8628df51e61777ca2375759a91acc
|
0545a8f13cde075572335d05dc74586965c62f8f
|
/examples/video.py
|
295f3714ffbde4707f68fdb42e5ffea97c5fcd09
|
[
"MIT"
] |
permissive
|
rm-hull/luma.examples
|
cabf85927f9dcad55fc627fdf105b94141a7e2ae
|
464812ea21c836b9f9047a4f78d5076cb0b110d1
|
refs/heads/master
| 2023-08-31T00:13:32.515806
| 2023-08-03T06:45:12
| 2023-08-03T06:45:12
| 78,778,717
| 335
| 161
|
MIT
| 2023-08-27T16:21:28
| 2017-01-12T19:20:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,343
|
py
|
video.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2023 Richard Hull and contributors
# See LICENSE.rst for details.
# PYTHON_ARGCOMPLETE_OK
"""
Display a video clip.
Make sure to install the av system packages:
$ sudo apt-get install -y libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libavresample-dev libavfilter-dev
And the pyav package (might take a while):
$ sudo -H pip install av
"""
import sys
from pathlib import Path
from demo_opts import get_device
import PIL
try:
import av
except ImportError:
print("The pyav library could not be found. Install it using 'sudo -H pip install av'.")
sys.exit()
def main():
video_path = str(Path(__file__).resolve().parent.joinpath('images', 'movie.mp4'))
print(f'Loading {video_path}...')
clip = av.open(video_path)
for frame in clip.decode(video=0):
print(f'{frame.index} ------')
img = frame.to_image()
if img.width != device.width or img.height != device.height:
# resize video to fit device
size = device.width, device.height
img = img.resize(size, PIL.Image.LANCZOS)
device.display(img.convert(device.mode))
if __name__ == "__main__":
try:
device = get_device()
main()
except KeyboardInterrupt:
pass
|
25ec5ca1a8c5f56cf4b14db40fcd0fdf042c0802
|
8188f026dcfa3ca6c4e2d58e6c56d04d24e37a18
|
/projectq/setups/decompositions/rz2rx_test.py
|
7418c5a1d52b66b82c5b2d8eb136ffabac47342a
|
[
"Apache-2.0"
] |
permissive
|
ProjectQ-Framework/ProjectQ
|
2e342da0622d4b5d513c15504556e95d3d0e2aea
|
67c660ca18725d23ab0b261a45e34873b6a58d03
|
refs/heads/develop
| 2023-09-04T02:18:25.581119
| 2023-03-09T16:03:57
| 2023-03-09T16:03:57
| 77,520,796
| 886
| 335
|
Apache-2.0
| 2023-07-24T07:07:15
| 2016-12-28T09:31:53
|
Python
|
UTF-8
|
Python
| false
| false
| 4,542
|
py
|
rz2rx_test.py
|
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Tests for projectq.setups.decompositions.rz2rx.py"
import math
import numpy as np
import pytest
from projectq import MainEngine
from projectq.backends import Simulator
from projectq.cengines import (
AutoReplacer,
DecompositionRuleSet,
DummyEngine,
InstructionFilter,
)
from projectq.meta import Control
from projectq.ops import Measure, Rz
from . import rz2rx
def test_recognize_correct_gates():
"""Test that recognize_RzNoCtrl recognizes ctrl qubits"""
saving_backend = DummyEngine(save_commands=True)
eng = MainEngine(backend=saving_backend)
qubit = eng.allocate_qubit()
ctrl_qubit = eng.allocate_qubit()
eng.flush()
Rz(0.3) | qubit
with Control(eng, ctrl_qubit):
Rz(0.4) | qubit
eng.flush(deallocate_qubits=True)
assert rz2rx._recognize_RzNoCtrl(saving_backend.received_commands[3])
assert not rz2rx._recognize_RzNoCtrl(saving_backend.received_commands[4])
def rz_decomp_gates(eng, cmd):
"""Test that cmd.gate is the gate Rz"""
g = cmd.gate
if isinstance(g, Rz):
return False
else:
return True
# ------------test_decomposition function-------------#
# Creates two engines, correct_eng and test_eng.
# correct_eng implements Rz(angle) gate.
# test_eng implements the decomposition of the Rz(angle) gate.
# correct_qb and test_qb represent results of these two engines, respectively.
#
# The decomposition only needs to produce the same state in a qubit up to a
# global phase.
# test_vector and correct_vector represent the final wave states of correct_qb
# and test_qb.
#
# The dot product of correct_vector and test_vector should have absolute value
# 1, if the two vectors are the same up to a global phase.
@pytest.mark.parametrize("angle", [0, math.pi, 2 * math.pi, 4 * math.pi, 0.5])
def test_decomposition(angle):
"""
Test that this decomposition of Rz produces correct amplitudes
Note that this function tests each DecompositionRule in
rz2rx.all_defined_decomposition_rules
"""
decomposition_rule_list = rz2rx.all_defined_decomposition_rules
for rule in decomposition_rule_list:
for basis_state in ([1, 0], [0, 1]):
correct_dummy_eng = DummyEngine(save_commands=True)
correct_eng = MainEngine(backend=Simulator(), engine_list=[correct_dummy_eng])
rule_set = DecompositionRuleSet(rules=[rule])
test_dummy_eng = DummyEngine(save_commands=True)
test_eng = MainEngine(
backend=Simulator(),
engine_list=[
AutoReplacer(rule_set),
InstructionFilter(rz_decomp_gates),
test_dummy_eng,
],
)
correct_qb = correct_eng.allocate_qubit()
Rz(angle) | correct_qb
correct_eng.flush()
test_qb = test_eng.allocate_qubit()
Rz(angle) | test_qb
test_eng.flush()
# Create empty vectors for the wave vectors for the correct and
# test qubits
correct_vector = np.zeros((2, 1), dtype=np.complex_)
test_vector = np.zeros((2, 1), dtype=np.complex_)
i = 0
for fstate in ['0', '1']:
test = test_eng.backend.get_amplitude(fstate, test_qb)
correct = correct_eng.backend.get_amplitude(fstate, correct_qb)
correct_vector[i] = correct
test_vector[i] = test
i += 1
# Necessary to transpose vector to use matrix dot product
test_vector = test_vector.transpose()
# Remember that transposed vector should come first in product
vector_dot_product = np.dot(test_vector, correct_vector)
assert np.absolute(vector_dot_product) == pytest.approx(1, rel=1e-12, abs=1e-12)
Measure | test_qb
Measure | correct_qb
|
5a19f5133b3074b9804546bd5a969f839598ba50
|
f8215144c61ef88ed63ed536334a74abc53c5631
|
/keras_nlp/models/deberta_v3/relative_embedding.py
|
6c08424f3c8c77d5f2d0ad47895dfc1bb8bedd3e
|
[
"Apache-2.0"
] |
permissive
|
keras-team/keras-nlp
|
3906a35c64f543dc3713ed619eb5a790a6ff4a32
|
43cf146cb7670fc94f98ba88ed940f12d9848726
|
refs/heads/master
| 2023-08-16T05:12:06.003760
| 2023-08-15T22:51:58
| 2023-08-15T22:51:58
| 267,715,375
| 579
| 175
|
Apache-2.0
| 2023-09-14T19:33:47
| 2020-05-28T23:03:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,490
|
py
|
relative_embedding.py
|
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Relative embedding layer."""
from keras_nlp.backend import keras
from keras_nlp.backend import ops
class RelativeEmbedding(keras.layers.Layer):
"""Relative embedding layer.
This is an implementation of relative embedding as described in the
paper ["DeBERTaV3: Improving DeBERTa using ELECTRA-Style Pre-Training with Gradient-Disentangled Embedding Sharing"](https://arxiv.org/abs/2111.09543).
This layer initializes an embedding matrix (of shape
`(2 * batch_size, hidden_dim)`) for relative position encoding. It then
applies layer normalization on the embedding matrix and returns the relative
embedding matrix.
Args:
hidden_dim: int. The size of the dense embedding.
bucket_size: int. The size of the relative position buckets.
layer_norm_epsilon: float. Epsilon value to initialize the layer
normalization layer.
kernel_initializer: string or `keras.initializers` initializer.
The kernel initializer for the dense embedding.
Defaults to `"glorot_uniform"`.
"""
def __init__(
self,
hidden_dim,
bucket_size,
layer_norm_epsilon=1e-05,
kernel_initializer="glorot_uniform",
**kwargs,
):
super().__init__(**kwargs)
self.hidden_dim = hidden_dim
self.bucket_size = bucket_size
self.layer_norm_epsilon = layer_norm_epsilon
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.rel_embeddings = self.add_weight(
shape=(self.bucket_size * 2, self.hidden_dim),
initializer=self.kernel_initializer,
name="rel_embedding",
)
self.layer_norm = keras.layers.LayerNormalization(
epsilon=layer_norm_epsilon, name="rel_embeddings_layer_norm"
)
def call(self, inputs):
batch_size = ops.shape(inputs)[0]
rel_embeddings = ops.expand_dims(
ops.convert_to_tensor(self.rel_embeddings), axis=0
)
rel_embeddings = self.layer_norm(rel_embeddings)
# Repeat `rel_embeddings` along axis = 0 `batch_size` times. The
# resultant shape is `(batch_size, bucket_size * 2, hidden_dim)`.
rel_embeddings = ops.repeat(rel_embeddings, repeats=batch_size, axis=0)
return rel_embeddings
def get_config(self):
config = super().get_config()
config.update(
{
"hidden_dim": self.hidden_dim,
"bucket_size": self.bucket_size,
"layer_norm_epsilon": self.layer_norm_epsilon,
"kernel_initializer": keras.initializers.serialize(
self.kernel_initializer
),
}
)
return config
def compute_output_shape(self, input_shape):
return (input_shape[0],) + (self.bucket_size * 2, self.hidden_dim)
|
b695d365502a842ef2f5d5e1a2596de473b1898e
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/CommonTools/TriggerUtils/test/GenericTriggerEventFlag_AlCaRecoTriggerBitsRcd_read_cfg.py
|
7acb905832b00ed86ac33b91ae7f75d8809ce9db
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,215
|
py
|
GenericTriggerEventFlag_AlCaRecoTriggerBitsRcd_read_cfg.py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process( "READ" )
process.load( "FWCore.MessageLogger.MessageLogger_cfi" )
process.MessageLogger.cerr.enable = False
process.MessageLogger.cout = cms.untracked.PSet(
INFO = cms.untracked.PSet(
reportEvery = cms.untracked.int32( 250 )
)
)
process.source = cms.Source( "EmptySource"
, numberEventsInRun = cms.untracked.uint32( 1 ) # do not change!
, firstRun = cms.untracked.uint32( 123000 )
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32( 2000 )
)
import CondCore.DBCommon.CondDBSetup_cfi
process.dbInput = cms.ESSource( "PoolDBESSource"
, CondCore.DBCommon.CondDBSetup_cfi.CondDBSetup
, connect = cms.string( 'sqlite_file:GenericTriggerEventFlag_AlCaRecoTriggerBits.db' )
, toGet = cms.VPSet(
cms.PSet(
record = cms.string( 'AlCaRecoTriggerBitsRcd' )
, tag = cms.string( 'AlCaRecoTriggerBits_v0_test' )
)
)
)
process.AlCaRecoTriggerBitsRcdRead = cms.EDAnalyzer( "AlCaRecoTriggerBitsRcdRead"
, outputType = cms.untracked.string( 'text' )
, rawFileName = cms.untracked.string( 'GenericTriggerEventFlag_AlCaRecoTriggerBits' )
)
process.p = cms.Path(
process.AlCaRecoTriggerBitsRcdRead
)
|
1aea2338ddb5b8e4313d471e6b755087cb0c6b77
|
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
|
/deskutils/bookworm/files/patch-data_scripts_mobi__lib_mobi__ncx.py
|
efc028ed0cd2b3776828b2042de37b4c6bbb9c16
|
[
"BSD-2-Clause"
] |
permissive
|
freebsd/freebsd-ports
|
86f2e89d43913412c4f6b2be3e255bc0945eac12
|
605a2983f245ac63f5420e023e7dce56898ad801
|
refs/heads/main
| 2023-08-30T21:46:28.720924
| 2023-08-30T19:33:44
| 2023-08-30T19:33:44
| 1,803,961
| 916
| 918
|
NOASSERTION
| 2023-09-08T04:06:26
| 2011-05-26T11:15:35
| null |
UTF-8
|
Python
| false
| false
| 4,614
|
py
|
patch-data_scripts_mobi__lib_mobi__ncx.py
|
--- data/scripts/mobi_lib/mobi_ncx.py.orig 2021-08-16 04:25:11 UTC
+++ data/scripts/mobi_lib/mobi_ncx.py
@@ -34,8 +34,8 @@ class ncxExtract:
if self.ncxidx != 0xffffffff:
outtbl, ctoc_text = self.mi.getIndexData(self.ncxidx)
if DEBUG_NCX:
- print ctoc_text
- print outtbl
+ print(ctoc_text)
+ print(outtbl)
num = 0
for [text, tagMap] in outtbl:
tmp = {
@@ -68,16 +68,16 @@ class ncxExtract:
tmp['kind'] = ctoc_text.get(fieldvalue, 'Unknown Kind')
indx_data.append(tmp)
if DEBUG_NCX:
- print "record number: ", num
- print "name: ", tmp['name'],
- print "position", tmp['pos']," length: ", tmp['len']
- print "text: ", tmp['text']
- print "kind: ", tmp['kind']
- print "heading level: ", tmp['hlvl']
- print "parent:", tmp['parent']
- print "first child: ",tmp['child1']," last child: ", tmp['childn']
- print "pos_fid is ", tmp['pos_fid']
- print "\n\n"
+ print("record number: ", num)
+ print("name: ", tmp['name'])
+ print("position", tmp['pos']," length: ", tmp['len'])
+ print("text: ", tmp['text'])
+ print("kind: ", tmp['kind'])
+ print("heading level: ", tmp['hlvl'])
+ print("parent:", tmp['parent'])
+ print("first child: ",tmp['child1']," last child: ", tmp['childn'])
+ print("pos_fid is ", tmp['pos_fid'])
+ print("\n\n")
num += 1
num += 1
self.indx_data = indx_data
@@ -118,10 +118,10 @@ class ncxExtract:
#recursive part
def recursINDX(max_lvl=0, num=0, lvl=0, start=-1, end=-1):
if start>len(indx_data) or end>len(indx_data):
- print "Warning: missing INDX child entries", start, end, len(indx_data)
+ print("Warning: missing INDX child entries", start, end, len(indx_data))
return ''
if DEBUG_NCX:
- print "recursINDX lvl %d from %d to %d" % (lvl, start, end)
+ print("recursINDX lvl %d from %d to %d" % (lvl, start, end))
xml = ''
if start <= 0:
start = 0
@@ -155,13 +155,13 @@ class ncxExtract:
header = ncx_header % (ident, max_lvl + 1, title)
ncx = header + body + ncx_footer
if not len(indx_data) == num:
- print "Warning: different number of entries in NCX", len(indx_data), num
+ print("Warning: different number of entries in NCX", len(indx_data), num)
return ncx
def writeNCX(self, metadata):
# build the xml
self.isNCX = True
- print "Write ncx"
+ print("Write ncx")
htmlname = os.path.basename(self.files.outbase)
htmlname += '.html'
xml = self.buildNCX(htmlname, metadata['Title'][0], metadata['UniqueID'][0])
@@ -202,10 +202,10 @@ class ncxExtract:
#recursive part
def recursINDX(max_lvl=0, num=0, lvl=0, start=-1, end=-1):
if start>len(indx_data) or end>len(indx_data):
- print "Warning: missing INDX child entries", start, end, len(indx_data)
+ print("Warning: missing INDX child entries", start, end, len(indx_data))
return ''
if DEBUG_NCX:
- print "recursINDX lvl %d from %d to %d" % (lvl, start, end)
+ print("recursINDX lvl %d from %d to %d" % (lvl, start, end))
xml = ''
if start <= 0:
start = 0
@@ -244,13 +244,13 @@ class ncxExtract:
header = ncx_header % (ident, max_lvl + 1, title)
ncx = header + body + ncx_footer
if not len(indx_data) == num:
- print "Warning: different number of entries in NCX", len(indx_data), num
+ print("Warning: different number of entries in NCX", len(indx_data), num)
return ncx
def writeK8NCX(self, ncx_data, metadata):
# build the xml
self.isNCX = True
- print "Write K8 ncx"
+ print("Write K8 ncx")
xml = self.buildK8NCX(ncx_data, metadata['Title'][0], metadata['UniqueID'][0])
bname = 'toc.ncx'
ncxname = os.path.join(self.files.k8oebps,bname)
|
50c2e580bf749afe5f902565a450d2785b369141
|
a902290fb3b911676358ae4d93f83061a6c2bd0f
|
/InvenTree/plugin/samples/integration/broken_file.py
|
f56932e876c47909446faed8ae3671c6c13e1e48
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
inventree/InvenTree
|
a15e54182c9bfafdf5348cc9a66da1004e23e760
|
e88a8e99a5f0b201c67a95cba097c729f090d5e2
|
refs/heads/master
| 2023-09-03T19:32:35.438375
| 2023-08-30T00:25:40
| 2023-08-30T00:25:40
| 85,894,461
| 3,077
| 549
|
MIT
| 2023-09-14T14:21:01
| 2017-03-23T01:44:10
|
Python
|
UTF-8
|
Python
| false
| false
| 217
|
py
|
broken_file.py
|
"""Sample of a broken python file that will be ignored on import."""
from plugin import InvenTreePlugin
class BrokenFileIntegrationPlugin(InvenTreePlugin):
"""An very broken plugin."""
aaa = bb # noqa: F821
|
f3f6462f6ec8bf7102c57882a0a5263295b50b92
|
73a0f661f1423d63e86489d4b2673f0103698aab
|
/python/oneflow/test/modules/test_fused_attention_ops.py
|
25929bb8b3b3f446320c451bd7548f109a9338bd
|
[
"Apache-2.0"
] |
permissive
|
Oneflow-Inc/oneflow
|
4fc3e081e45db0242a465c4330d8bcc8b21ee924
|
0aab78ea24d4b1c784c30c57d33ec69fe5605e4a
|
refs/heads/master
| 2023-08-25T16:58:30.576596
| 2023-08-22T14:15:46
| 2023-08-22T14:15:46
| 81,634,683
| 5,495
| 786
|
Apache-2.0
| 2023-09-14T09:44:31
| 2017-02-11T06:09:53
|
C++
|
UTF-8
|
Python
| false
| false
| 25,128
|
py
|
test_fused_attention_ops.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.test_util import GenArgList
import math
import itertools
import os
import oneflow as flow
def _ref(
query,
key,
value,
num_heads,
attn_mask_type="none",
attn_bias=None,
causal_diagonal_offset=0,
query_seq_len=None,
key_seq_len=None,
):
query = query.permute(0, 2, 1, 3)
key = key.permute(0, 2, 3, 1)
value = value.permute(0, 2, 1, 3)
scores = flow.matmul(query, key) / math.sqrt(query.shape[-1])
if attn_mask_type == "causal_from_bottom_right":
causal_diagonal_offset += key.shape[-1] - query.shape[-2]
if (
attn_mask_type == "causal_from_top_left"
or attn_mask_type == "causal_from_bottom_right"
):
causal_mask = flow.triu(
flow.ones(
scores.shape[-2], scores.shape[-1], dtype=flow.bool, device="cuda"
),
causal_diagonal_offset + 1,
)
scores = flow.masked_fill(scores, causal_mask, float("-inf"))
if attn_bias is not None:
scores = scores + attn_bias
if query_seq_len is not None:
scores = flow.masked_fill(
scores,
flow.arange(scores.shape[-2], device=query_seq_len.device).view(
1, 1, scores.shape[-2], 1
)
>= query_seq_len.view(scores.shape[0], 1, 1, 1),
float("-inf"),
)
if key_seq_len is not None:
scores = flow.masked_fill(
scores,
flow.arange(scores.shape[-1], device=key_seq_len.device).view(
1, 1, 1, scores.shape[-1]
)
>= key_seq_len.view(scores.shape[0], 1, 1, 1),
float("-inf"),
)
attn = flow.softmax(scores, dim=-1)
out = flow.matmul(attn, value)
out = out.permute(0, 2, 1, 3)
out = out.reshape(out.shape[0], out.shape[1], -1)
return out
def _to_layout(ts, layout, tensor_index, seq_len=None):
if layout == "BMHK":
return ts[tensor_index]
elif layout == "BM(HK)":
return ts[tensor_index].view(
ts[tensor_index].shape[0], ts[tensor_index].shape[1], -1
)
elif layout == "MB(HK)":
return (
ts[tensor_index]
.view(ts[tensor_index].shape[0], ts[tensor_index].shape[1], -1)
.transpose(0, 1)
)
elif layout == "BHMK":
return ts[tensor_index].transpose(1, 2)
elif layout == "MBHK":
return ts[tensor_index].transpose(0, 1)
elif layout == "BM(H3K)":
return flow.stack(ts, -2).view(ts[0].shape[0], ts[0].shape[1], -1)
elif layout == "MB(H3K)":
return (
flow.stack(ts, -2).view(ts[0].shape[0], ts[0].shape[1], -1).transpose(0, 1)
)
elif layout == "BM(H2K)":
return flow.stack(ts[1:], -2).view(ts[1].shape[0], ts[1].shape[1], -1)
elif layout == "MB(H2K)":
return (
flow.stack(ts[1:], -2)
.view(ts[1].shape[0], ts[1].shape[1], -1)
.transpose(0, 1)
)
elif layout == "(BM)HK":
t = ts[tensor_index]
if seq_len is None:
return t.view(-1, t.shape[-2], t.shape[-1])
mask = flow.arange(t.shape[1], device=t.device).view(
1, t.shape[1]
) < seq_len.view(t.shape[0], 1)
return flow.masked_select(
t, mask.view(mask.shape[0], mask.shape[1], 1, 1)
).view(-1, t.shape[-2], t.shape[-1])
elif layout == "(BM)(HK)":
t = ts[tensor_index]
if seq_len is None:
return t.view(-1, t.shape[-2] * t.shape[-1])
mask = flow.arange(t.shape[1], device=t.device).view(
1, t.shape[1]
) < seq_len.view(t.shape[0], 1)
return flow.masked_select(
t, mask.view(mask.shape[0], mask.shape[1], 1, 1)
).view(-1, t.shape[-2] * t.shape[-1])
elif layout == "(BM)(H2K)":
t = flow.stack(ts[1:], -2)
if seq_len is None:
return t.view(t.shape[0] * t.shape[1], -1)
mask = flow.arange(t.shape[1], device=t.device).view(
1, t.shape[1]
) < seq_len.view(t.shape[0], 1)
return flow.masked_select(
t, mask.view(mask.shape[0], mask.shape[1], 1, 1, 1)
).view(-1, t.shape[-3] * t.shape[-2] * t.shape[-1])
elif layout == "(BM)(H3K)":
t = flow.stack(ts, -2)
if seq_len is None:
return t.view(t.shape[0] * t.shape[1], -1)
mask = flow.arange(t.shape[1], device=t.device).view(
1, t.shape[1]
) < seq_len.view(t.shape[0], 1)
return flow.masked_select(
t, mask.view(mask.shape[0], mask.shape[1], 1, 1, 1)
).view(-1, t.shape[-3] * t.shape[-2] * t.shape[-1])
else:
raise NotImplementedError
def _fused_mha(
query,
key,
value,
num_heads,
attn_mask_type="none",
attn_bias=None,
causal_diagonal_offset=0,
query_layout="BM(HK)",
key_layout="BM(HK)",
value_layout="BM(HK)",
output_layout="MB(HK)",
query_seq_len=None,
key_seq_len=None,
use_kv_seq_len=False,
):
batch_size = query.shape[0]
query_max_seq_len = query.shape[1]
query_head_size = query.shape[-1]
key_max_seq_len = key.shape[1]
ts = [query, key, value]
query = _to_layout(ts, query_layout, 0, query_seq_len)
if use_kv_seq_len:
key = _to_layout(ts, key_layout, 1)
value = _to_layout(ts, value_layout, 2)
else:
key = _to_layout(ts, key_layout, 1, key_seq_len)
value = _to_layout(ts, value_layout, 2, key_seq_len)
if query_seq_len is not None:
query_seq_start = (
flow.cumsum(flow.pad(query_seq_len, (1, 0)), dim=-1)
.to(flow.int32)
.to(query.device)
)
else:
query_seq_start = None
query_max_seq_len = None
if key_seq_len is not None:
if use_kv_seq_len:
key_seq_start = flow.arange(
0,
key_max_seq_len * (batch_size + 1),
key_max_seq_len,
dtype=flow.int32,
device=key_seq_len.device,
)
else:
key_seq_start = (
flow.cumsum(flow.pad(key_seq_len, (1, 0)), dim=-1)
.to(flow.int32)
.to(query.device)
)
else:
key_seq_start = None
key_max_seq_len = None
if attn_bias is not None and attn_bias.shape[-1] % 8 != 0:
pad = 8 - attn_bias.shape[-1] % 8
attn_bias = flow.pad(attn_bias, (0, pad), "constant", 0)
output = flow._C.fused_multi_head_attention_inference_v2(
query=query,
key=key,
value=value,
query_head_size=query_head_size,
attn_mask_type=attn_mask_type,
attn_bias=attn_bias,
causal_diagonal_offset=causal_diagonal_offset,
query_layout=query_layout,
key_layout=key_layout,
value_layout=value_layout,
output_layout=output_layout,
query_seq_start=query_seq_start,
key_seq_start=key_seq_start,
key_seq_len=key_seq_len.to(flow.int32).to("cuda") if use_kv_seq_len else None,
query_max_seq_len=query_max_seq_len,
key_max_seq_len=key_max_seq_len,
)
if output_layout == "BM(HK)" or output_layout == "(BM)(HK)":
return output
elif output_layout == "MB(HK)":
return output.transpose(0, 1)
else:
raise NotImplementedError
def _test_fused_attention_concat_past_key_value(
test_case,
dtype,
b,
past_m,
m,
h,
k,
past_key_layout,
past_value_layout,
key_layout,
value_layout,
):
if past_m > 0:
past_key = flow.randn((b, past_m, h, k), device="cuda", dtype=flow.float,).to(
dtype
)
past_value = flow.randn((b, past_m, h, k), device="cuda", dtype=flow.float,).to(
dtype
)
else:
past_key = None
past_value = None
key = flow.randn((b, m, h, k), device="cuda", dtype=flow.float,).to(dtype)
value = flow.randn((b, m, h, k), device="cuda", dtype=flow.float,).to(dtype)
(
fused_concated_key,
fused_concated_value,
) = flow._C.fused_attention_concat_past_key_value(
past_key=_to_layout([past_key, past_key, past_value], past_key_layout, 1),
past_key_layout=past_key_layout,
past_value=_to_layout([past_key, past_key, past_value], past_value_layout, 2),
past_value_layout=past_value_layout,
key=_to_layout([key, key, value], key_layout, 1),
key_layout=key_layout,
value=_to_layout([key, key, value], value_layout, 2),
value_layout=value_layout,
key_head_size=k,
)
if past_m > 0:
concated_key = flow.cat([past_key, key], dim=1)
concated_value = flow.cat([past_value, value], dim=1)
else:
concated_key = key
concated_value = value
ref_concated_key = _to_layout(
[concated_key, concated_key, concated_value], past_key_layout, 1
)
ref_concated_value = _to_layout(
[concated_key, concated_key, concated_value], past_value_layout, 2
)
test_case.assertTrue(
np.array_equal(fused_concated_key.numpy(), ref_concated_key.numpy())
)
test_case.assertTrue(
np.array_equal(fused_concated_value.numpy(), ref_concated_value.numpy())
)
def _test_fused_multi_head_attention_inference(
test_case,
batch_size,
num_heads,
query_seq_len,
kv_seq_len,
query_head_size,
value_head_size,
dtype,
attn_mask_type="none",
causal_diagonal_offset=0,
query_layout="BM(HK)",
key_layout="BM(HK)",
value_layout="BM(HK)",
output_layout="BM(HK)",
):
query = flow.randn(
(batch_size, query_seq_len, num_heads, query_head_size),
device="cuda",
dtype=flow.float,
).to(dtype)
key = flow.randn(
(batch_size, kv_seq_len, num_heads, query_head_size),
device="cuda",
dtype=flow.float,
).to(dtype)
value = flow.randn(
(batch_size, kv_seq_len, num_heads, value_head_size),
device="cuda",
dtype=flow.float,
).to(dtype)
fused_out = _fused_mha(
query,
key,
value,
num_heads,
attn_mask_type=attn_mask_type,
causal_diagonal_offset=causal_diagonal_offset,
query_layout=query_layout,
key_layout=key_layout,
value_layout=value_layout,
output_layout=output_layout,
).numpy()
ref_out = _ref(
query,
key,
value,
num_heads,
attn_mask_type=attn_mask_type,
causal_diagonal_offset=causal_diagonal_offset,
).numpy()
test_case.assertTrue(np.allclose(ref_out, fused_out, atol=1e-2, rtol=1e-2))
def _test_fused_multi_head_attention_inference_with_attn_bias(
test_case,
batch_size,
num_heads,
query_seq_len,
kv_seq_len,
query_head_size,
value_head_size,
dtype,
attn_mask_type="none",
):
query = flow.randn(
(batch_size, query_seq_len, num_heads, query_head_size),
device="cuda",
dtype=flow.float,
).to(dtype)
key = flow.randn(
(batch_size, kv_seq_len, num_heads, query_head_size),
device="cuda",
dtype=flow.float,
).to(dtype)
value = flow.randn(
(batch_size, kv_seq_len, num_heads, value_head_size),
device="cuda",
dtype=flow.float,
).to(dtype)
attn_bias = flow.randn((kv_seq_len,), device="cuda", dtype=flow.float).to(dtype)
ref_out = _ref(
query, key, value, num_heads, attn_bias=attn_bias, attn_mask_type=attn_mask_type
).numpy()
fused_out = _fused_mha(
query, key, value, num_heads, attn_bias=attn_bias, attn_mask_type=attn_mask_type
).numpy()
test_case.assertTrue(np.allclose(ref_out, fused_out, atol=1e-2, rtol=1e-2))
attn_bias = flow.randn(
(query_seq_len, kv_seq_len), device="cuda", dtype=flow.float
).to(dtype)
ref_out = _ref(
query, key, value, num_heads, attn_bias=attn_bias, attn_mask_type=attn_mask_type
).numpy()
fused_out = _fused_mha(
query, key, value, num_heads, attn_bias=attn_bias, attn_mask_type=attn_mask_type
).numpy()
test_case.assertTrue(np.allclose(ref_out, fused_out, atol=1e-2, rtol=1e-2))
attn_bias = flow.randn(
(num_heads, query_seq_len, kv_seq_len), device="cuda", dtype=flow.float
).to(dtype)
ref_out = _ref(
query, key, value, num_heads, attn_bias=attn_bias, attn_mask_type=attn_mask_type
).numpy()
fused_out = _fused_mha(
query, key, value, num_heads, attn_bias=attn_bias, attn_mask_type=attn_mask_type
).numpy()
test_case.assertTrue(np.allclose(ref_out, fused_out, atol=1e-2, rtol=1e-2))
attn_bias = flow.randn(
(batch_size, num_heads, query_seq_len, kv_seq_len),
device="cuda",
dtype=flow.float,
).to(dtype)
ref_out = _ref(
query, key, value, num_heads, attn_bias=attn_bias, attn_mask_type=attn_mask_type
).numpy()
fused_out = _fused_mha(
query, key, value, num_heads, attn_bias=attn_bias, attn_mask_type=attn_mask_type
).numpy()
test_case.assertTrue(np.allclose(ref_out, fused_out, atol=1e-2, rtol=1e-2))
attn_bias = flow.randn(
(num_heads, 1, kv_seq_len), device="cuda", dtype=flow.float
).to(dtype)
ref_out = _ref(
query, key, value, num_heads, attn_bias=attn_bias, attn_mask_type=attn_mask_type
).numpy()
fused_out = _fused_mha(
query, key, value, num_heads, attn_bias=attn_bias, attn_mask_type=attn_mask_type
).numpy()
test_case.assertTrue(np.allclose(ref_out, fused_out, atol=1e-2, rtol=1e-2))
def _test_fused_multi_head_attention_inference_variable_length(
test_case,
batch_size,
num_heads,
query_seq_len,
kv_seq_len,
query_head_size,
value_head_size,
dtype,
query_layout,
key_layout,
value_layout,
use_kv_seq_len,
attn_mask_type="none",
causal_diagonal_offset=0,
):
query = flow.randn(
(batch_size, query_seq_len, num_heads, query_head_size),
device="cuda",
dtype=flow.float,
).to(dtype)
key = flow.randn(
(batch_size, kv_seq_len, num_heads, query_head_size),
device="cuda",
dtype=flow.float,
).to(dtype)
value = flow.randn(
(batch_size, kv_seq_len, num_heads, value_head_size),
device="cuda",
dtype=flow.float,
).to(dtype)
query_seq_len_t = flow.randint(
low=1,
high=query.shape[1],
size=(query.shape[0],),
device="cuda",
dtype=flow.int32,
)
key_seq_len_t = flow.randint(
low=1, high=key.shape[1], size=(key.shape[0],), device="cuda", dtype=flow.int32
)
fused_out = _fused_mha(
query,
key,
value,
num_heads,
attn_mask_type=attn_mask_type,
causal_diagonal_offset=causal_diagonal_offset,
query_layout=query_layout,
key_layout=key_layout,
value_layout=value_layout,
output_layout="(BM)(HK)",
query_seq_len=query_seq_len_t,
key_seq_len=key_seq_len_t,
use_kv_seq_len=use_kv_seq_len,
)
ref_out = _ref(
query,
key,
value,
num_heads,
attn_mask_type=attn_mask_type,
causal_diagonal_offset=causal_diagonal_offset,
query_seq_len=query_seq_len_t,
key_seq_len=key_seq_len_t,
)
ref_out = ref_out.view(batch_size, query_seq_len, num_heads, value_head_size)
ref_out = _to_layout([ref_out], "(BM)HK", 0, seq_len=query_seq_len_t)
ref_out = ref_out.view(ref_out.shape[0], -1)
test_case.assertTrue(
np.allclose(ref_out.numpy(), fused_out.numpy(), atol=1e-2, rtol=1e-2)
)
@unittest.skipIf(True, "skip test")
@flow.unittest.skip_unless_1n1d()
class TestFusedMultiHeadAttentionInference(flow.unittest.TestCase):
def test_multi_head_attention_inference(test_case):
# test_case,batch_size, num_heads,query_seq_len, kv_seq_len,query_head_size,value_head_size,dtype
_test_fused_multi_head_attention_inference(
test_case, 2, 8, 4096, 4096, 40, 40, flow.float16
)
_test_fused_multi_head_attention_inference(
test_case, 2, 8, 4096, 77, 40, 40, flow.float16
)
_test_fused_multi_head_attention_inference(
test_case, 2, 8, 1024, 1024, 80, 80, flow.float16
)
_test_fused_multi_head_attention_inference(
test_case, 2, 8, 1024, 77, 80, 80, flow.float16
)
_test_fused_multi_head_attention_inference(
test_case, 2, 8, 256, 256, 160, 160, flow.float16
)
_test_fused_multi_head_attention_inference(
test_case, 2, 8, 256, 77, 160, 160, flow.float16
)
_test_fused_multi_head_attention_inference(
test_case, 2, 8, 4096, 4096, 40, 40, flow.float
)
_test_fused_multi_head_attention_inference(
test_case, 2, 8, 4096, 77, 40, 40, flow.float
)
_test_fused_multi_head_attention_inference(
test_case, 2, 8, 1024, 1024, 80, 80, flow.float
)
_test_fused_multi_head_attention_inference(
test_case, 2, 8, 1024, 77, 80, 80, flow.float
)
_test_fused_multi_head_attention_inference(
test_case, 2, 8, 256, 256, 160, 160, flow.float
)
_test_fused_multi_head_attention_inference(
test_case, 2, 8, 256, 77, 160, 160, flow.float
)
_test_fused_multi_head_attention_inference(
test_case,
1,
8,
4,
8,
16,
16,
flow.float,
attn_mask_type="causal_from_top_left",
causal_diagonal_offset=4,
)
def test_multi_head_attention_inference_with_attn_bias(test_case):
# test_case,batch_size, num_heads,query_seq_len, kv_seq_len,query_head_size,value_head_size,dtype
_test_fused_multi_head_attention_inference_with_attn_bias(
test_case, 2, 8, 4096, 4096, 40, 40, flow.float16
)
_test_fused_multi_head_attention_inference_with_attn_bias(
test_case, 2, 8, 4096, 4096, 40, 40, flow.float
)
_test_fused_multi_head_attention_inference_with_attn_bias(
test_case, 2, 8, 4096, 4096, 40, 40, flow.float16, "causal_from_top_left"
)
_test_fused_multi_head_attention_inference_with_attn_bias(
test_case, 2, 8, 4096, 4096, 40, 40, flow.float, "causal_from_bottom_right"
)
_test_fused_multi_head_attention_inference_with_attn_bias(
test_case, 2, 8, 4096, 80, 40, 40, flow.float16
)
_test_fused_multi_head_attention_inference_with_attn_bias(
test_case, 2, 8, 4096, 80, 40, 40, flow.float
)
_test_fused_multi_head_attention_inference_with_attn_bias(
test_case, 2, 8, 4096, 80, 40, 40, flow.float16, "causal_from_top_left"
)
_test_fused_multi_head_attention_inference_with_attn_bias(
test_case, 2, 8, 80, 4096, 40, 40, flow.float16, "causal_from_bottom_right"
)
_test_fused_multi_head_attention_inference_with_attn_bias(
test_case, 2, 8, 4096, 80, 40, 40, flow.float, "causal_from_top_left"
)
_test_fused_multi_head_attention_inference_with_attn_bias(
test_case, 2, 8, 4096, 77, 40, 40, flow.float, "causal_from_top_left"
)
def test_multi_head_attention_inference_with_layout(test_case):
layouts = [
"BM(HK)",
"BMHK",
"MBHK",
"BHMK",
"MB(HK)",
"BM(H3K)",
"BM(H2K)",
"MB(H3K)",
"MB(H2K)",
]
for query_layout, key_layout, value_layout in itertools.product(
layouts, layouts, layouts
):
if query_layout == "BM(H2K)" or query_layout == "MB(H2K)":
continue
_test_fused_multi_head_attention_inference(
test_case,
2,
8,
256,
256,
160,
160,
flow.float16,
query_layout=query_layout,
key_layout=key_layout,
value_layout=value_layout,
)
def test_multi_head_attention_inference_with_output_layout(test_case):
layouts = [
"BM(HK)",
"MB(HK)",
]
for output_layout in layouts:
_test_fused_multi_head_attention_inference(
test_case,
2,
8,
256,
256,
160,
160,
flow.float16,
output_layout=output_layout,
)
_test_fused_multi_head_attention_inference(
test_case,
1,
8,
256,
256,
160,
160,
flow.float16,
output_layout=output_layout,
)
def test_multi_head_attention_inference_variable_length(test_case):
# test_case,batch_size, num_heads,query_seq_len, kv_seq_len,query_head_size,value_head_size,dtype
layouts = ["(BM)HK", "(BM)(HK)", "(BM)(H2K)", "(BM)(H3K)"]
for (
query_layout,
key_layout,
value_layout,
use_kv_seq_len,
) in itertools.product(layouts, layouts, layouts, (False, True)):
if query_layout == "(BM)(H2K)":
continue
_test_fused_multi_head_attention_inference_variable_length(
test_case,
2,
8,
16,
16,
40,
40,
flow.float16,
query_layout=query_layout,
key_layout=key_layout,
value_layout=value_layout,
use_kv_seq_len=use_kv_seq_len,
)
if (
query_layout == "(BM)(H3K)"
or key_layout == "(BM)(H3K)"
or value_layout == "(BM)(H3K)"
):
continue
_test_fused_multi_head_attention_inference_variable_length(
test_case,
2,
8,
16,
32,
40,
40,
flow.float16,
query_layout=query_layout,
key_layout=key_layout,
value_layout=value_layout,
use_kv_seq_len=use_kv_seq_len,
)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n1d()
class TestFusedAttentionConcatPastKeyValue(flow.unittest.TestCase):
def test_fused_attention_concat_past_key_value(test_case):
kv_layouts = [
"BM(HK)",
"BMHK",
"MBHK",
"BHMK",
"MB(HK)",
"BM(H3K)",
# "BM(H2K)",
# "MB(H3K)",
"MB(H2K)",
]
past_layouts = [
"BM(HK)",
"BMHK",
# "MBHK",
# "BHMK",
"MB(HK)",
]
types = [flow.float16]
for (
past_key_layout,
past_value_layout,
key_layout,
value_layout,
dtype,
) in itertools.product(
past_layouts, past_layouts, kv_layouts, kv_layouts, types
):
_test_fused_attention_concat_past_key_value(
test_case,
dtype,
1,
127,
1,
40,
128,
past_key_layout=past_key_layout,
past_value_layout=past_value_layout,
key_layout=key_layout,
value_layout=value_layout,
)
_test_fused_attention_concat_past_key_value(
test_case,
flow.float,
1,
0,
1,
40,
128,
past_key_layout="BMHK",
past_value_layout="BMHK",
key_layout="BMHK",
value_layout="BMHK",
)
if __name__ == "__main__":
unittest.main()
|
af01ea02ca497177ee672bdabfba58684dc9607b
|
35b55815a7278fffbf05aedb256e84fbf9536b14
|
/modeltranslation/management/commands/update_translation_fields.py
|
751ac513b3cc56d1ae1307930f12df25eee4c2a8
|
[
"BSD-3-Clause"
] |
permissive
|
deschler/django-modeltranslation
|
1dadf1efca1ed573f0f2dbd16b2d11d6e65fbeaa
|
b1d32066ed46c9223c94053bb817beb04fec9522
|
refs/heads/master
| 2023-08-10T09:14:24.994377
| 2023-08-03T07:27:14
| 2023-08-09T06:45:53
| 6,241,977
| 1,107
| 278
|
BSD-3-Clause
| 2023-09-08T06:00:00
| 2012-10-16T09:33:56
|
Python
|
UTF-8
|
Python
| false
| false
| 4,016
|
py
|
update_translation_fields.py
|
from django.core.management.base import BaseCommand, CommandError
from django.db.models import F, ManyToManyField, Q
from modeltranslation.settings import AVAILABLE_LANGUAGES, DEFAULT_LANGUAGE
from modeltranslation.translator import translator
from modeltranslation.utils import build_localized_fieldname
COMMASPACE = ", "
class Command(BaseCommand):
help = (
'Updates empty values of translation fields using'
' values from original fields (in all translated models).'
)
def add_arguments(self, parser):
parser.add_argument(
'app_label',
nargs='?',
help='App label of an application to update empty values.',
)
parser.add_argument(
'model_name',
nargs='?',
help='Model name to update empty values of only this model.',
)
parser.add_argument(
'--language',
action='store',
help=(
'Language translation field the be updated.'
' Default language field if not provided'
),
)
def handle(self, *args, **options):
verbosity = options['verbosity']
if verbosity > 0:
self.stdout.write("Using default language: %s" % DEFAULT_LANGUAGE)
# get all models excluding proxy- and not managed models
models = translator.get_registered_models(abstract=False)
models = [m for m in models if not m._meta.proxy and m._meta.managed]
# optionally filter by given app_label
app_label = options['app_label']
if app_label:
models = [m for m in models if m._meta.app_label == app_label]
# optionally filter by given model_name
model_name = options['model_name']
if model_name:
model_name = model_name.lower()
models = [m for m in models if m._meta.model_name == model_name]
# optionally defining the translation field language
lang = options.get('language') or DEFAULT_LANGUAGE
if lang not in AVAILABLE_LANGUAGES:
raise CommandError(
"Cannot find language '%s'. Options are %s."
% (lang, COMMASPACE.join(AVAILABLE_LANGUAGES))
)
else:
lang = lang.replace('-', '_')
if verbosity > 0:
self.stdout.write(
"Working on models: %s"
% ', '.join(
["{app_label}.{object_name}".format(**m._meta.__dict__) for m in models]
)
)
for model in models:
if verbosity > 0:
self.stdout.write("Updating data of model '%s'" % model)
opts = translator.get_options_for_model(model)
for field_name in opts.fields.keys():
def_lang_fieldname = build_localized_fieldname(field_name, lang)
# We'll only update fields which do not have an existing value
q = Q(**{f"{def_lang_fieldname}__isnull": True})
field = model._meta.get_field(field_name)
if isinstance(field, ManyToManyField):
trans_field = getattr(model, def_lang_fieldname)
if not trans_field.through.objects.exists():
field_names = [f.name for f in trans_field.through._meta.fields]
trans_field.through.objects.bulk_create(
trans_field.through(
**{f: v for f, v in dict(inst.__dict__) if f in field_names}
)
for inst in getattr(model, field_name).through.objects.all()
)
continue
if field.empty_strings_allowed:
q |= Q(**{def_lang_fieldname: ""})
model._default_manager.filter(q).rewrite(False).order_by().update(
**{def_lang_fieldname: F(field_name)}
)
|
a9992af589ed9d073a55851c1f4c12d9e23313fa
|
c56f70ff9e593118ada1f188e79b0f3015f91f24
|
/mesh_tensorflow/transformer/gin/models/lm_base.gin
|
a743ad4ea9aafc89fc308aa1b8268317646f0e85
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/mesh
|
44f567ec4d5fa0db58f1a41b818702603d781c64
|
fbf7b1e547e8b8cb134e81e1cd350c312c0b5a16
|
refs/heads/master
| 2023-08-15T09:18:33.205493
| 2023-05-16T13:01:43
| 2023-05-16T13:01:43
| 149,666,254
| 1,508
| 290
|
Apache-2.0
| 2023-05-16T13:01:48
| 2018-09-20T20:23:34
|
Python
|
UTF-8
|
Python
| false
| false
| 44
|
gin
|
lm_base.gin
|
# -*-Python-*-
utils.run.model_type = "lm"
|
1af0c0a9b01c8f7729a59998afeb8b2c2bf79e98
|
aef0a344e13f6a10f7145e8cd63a514adaa2f5a7
|
/example/fb2CG/fpga/tb/fpga_core/test_fpga_core.py
|
56b91ea37b68607babad12bd643bd03521d64251
|
[
"MIT"
] |
permissive
|
alexforencich/verilog-pcie
|
a0ff59662e2d9cac100295b43a9b4ad374bcd406
|
75126f133318b31f226ae13ebc46a40eb52cf3ac
|
refs/heads/master
| 2023-07-20T01:19:06.004282
| 2023-06-24T05:38:06
| 2023-06-24T05:38:06
| 164,569,208
| 765
| 223
|
MIT
| 2023-07-18T08:36:17
| 2019-01-08T05:28:51
|
Verilog
|
UTF-8
|
Python
| false
| false
| 20,544
|
py
|
test_fpga_core.py
|
"""
Copyright (c) 2020 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import os
import cocotb_test.simulator
import cocotb
from cocotb.log import SimLog
from cocotb.triggers import RisingEdge, FallingEdge, Timer
from cocotbext.axi import AxiStreamBus
from cocotbext.pcie.core import RootComplex
from cocotbext.pcie.xilinx.us import UltraScalePlusPcieDevice
class TB(object):
def __init__(self, dut):
self.dut = dut
self.log = SimLog("cocotb.tb")
self.log.setLevel(logging.DEBUG)
# PCIe
self.rc = RootComplex()
self.dev = UltraScalePlusPcieDevice(
# configuration options
pcie_generation=3,
pcie_link_width=16,
user_clk_frequency=250e6,
alignment="dword",
cq_straddle=True,
cc_straddle=True,
rq_straddle=True,
rc_straddle=True,
rc_4tlp_straddle=True,
pf_count=1,
max_payload_size=1024,
enable_client_tag=True,
enable_extended_tag=True,
enable_parity=False,
enable_rx_msg_interface=False,
enable_sriov=False,
enable_extended_configuration=False,
pf0_msi_enable=False,
pf0_msi_count=1,
pf1_msi_enable=False,
pf1_msi_count=1,
pf2_msi_enable=False,
pf2_msi_count=1,
pf3_msi_enable=False,
pf3_msi_count=1,
pf0_msix_enable=True,
pf0_msix_table_size=31,
pf0_msix_table_bir=4,
pf0_msix_table_offset=0x00000000,
pf0_msix_pba_bir=4,
pf0_msix_pba_offset=0x00008000,
pf1_msix_enable=False,
pf1_msix_table_size=0,
pf1_msix_table_bir=0,
pf1_msix_table_offset=0x00000000,
pf1_msix_pba_bir=0,
pf1_msix_pba_offset=0x00000000,
pf2_msix_enable=False,
pf2_msix_table_size=0,
pf2_msix_table_bir=0,
pf2_msix_table_offset=0x00000000,
pf2_msix_pba_bir=0,
pf2_msix_pba_offset=0x00000000,
pf3_msix_enable=False,
pf3_msix_table_size=0,
pf3_msix_table_bir=0,
pf3_msix_table_offset=0x00000000,
pf3_msix_pba_bir=0,
pf3_msix_pba_offset=0x00000000,
# signals
# Clock and Reset Interface
user_clk=dut.clk,
user_reset=dut.rst,
# user_lnk_up
# sys_clk
# sys_clk_gt
# sys_reset
# phy_rdy_out
# Requester reQuest Interface
rq_bus=AxiStreamBus.from_prefix(dut, "m_axis_rq"),
pcie_rq_seq_num0=dut.s_axis_rq_seq_num_0,
pcie_rq_seq_num_vld0=dut.s_axis_rq_seq_num_valid_0,
pcie_rq_seq_num1=dut.s_axis_rq_seq_num_1,
pcie_rq_seq_num_vld1=dut.s_axis_rq_seq_num_valid_1,
# pcie_rq_tag0
# pcie_rq_tag1
# pcie_rq_tag_av
# pcie_rq_tag_vld0
# pcie_rq_tag_vld1
# Requester Completion Interface
rc_bus=AxiStreamBus.from_prefix(dut, "s_axis_rc"),
# Completer reQuest Interface
cq_bus=AxiStreamBus.from_prefix(dut, "s_axis_cq"),
# pcie_cq_np_req
# pcie_cq_np_req_count
# Completer Completion Interface
cc_bus=AxiStreamBus.from_prefix(dut, "m_axis_cc"),
# Transmit Flow Control Interface
# pcie_tfc_nph_av=dut.pcie_tfc_nph_av,
# pcie_tfc_npd_av=dut.pcie_tfc_npd_av,
# Configuration Management Interface
cfg_mgmt_addr=dut.cfg_mgmt_addr,
cfg_mgmt_function_number=dut.cfg_mgmt_function_number,
cfg_mgmt_write=dut.cfg_mgmt_write,
cfg_mgmt_write_data=dut.cfg_mgmt_write_data,
cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable,
cfg_mgmt_read=dut.cfg_mgmt_read,
cfg_mgmt_read_data=dut.cfg_mgmt_read_data,
cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done,
# cfg_mgmt_debug_access
# Configuration Status Interface
# cfg_phy_link_down
# cfg_phy_link_status
# cfg_negotiated_width
# cfg_current_speed
cfg_max_payload=dut.cfg_max_payload,
cfg_max_read_req=dut.cfg_max_read_req,
# cfg_function_status
# cfg_vf_status
# cfg_function_power_state
# cfg_vf_power_state
# cfg_link_power_state
# cfg_err_cor_out
# cfg_err_nonfatal_out
# cfg_err_fatal_out
# cfg_local_error_out
# cfg_local_error_valid
# cfg_rx_pm_state
# cfg_tx_pm_state
# cfg_ltssm_state
cfg_rcb_status=dut.cfg_rcb_status,
# cfg_obff_enable
# cfg_pl_status_change
# cfg_tph_requester_enable
# cfg_tph_st_mode
# cfg_vf_tph_requester_enable
# cfg_vf_tph_st_mode
# Configuration Received Message Interface
# cfg_msg_received
# cfg_msg_received_data
# cfg_msg_received_type
# Configuration Transmit Message Interface
# cfg_msg_transmit
# cfg_msg_transmit_type
# cfg_msg_transmit_data
# cfg_msg_transmit_done
# Configuration Flow Control Interface
cfg_fc_ph=dut.cfg_fc_ph,
cfg_fc_pd=dut.cfg_fc_pd,
cfg_fc_nph=dut.cfg_fc_nph,
cfg_fc_npd=dut.cfg_fc_npd,
cfg_fc_cplh=dut.cfg_fc_cplh,
cfg_fc_cpld=dut.cfg_fc_cpld,
cfg_fc_sel=dut.cfg_fc_sel,
# Configuration Control Interface
# cfg_hot_reset_in
# cfg_hot_reset_out
# cfg_config_space_enable
# cfg_dsn
# cfg_bus_number
# cfg_ds_port_number
# cfg_ds_bus_number
# cfg_ds_device_number
# cfg_ds_function_number
# cfg_power_state_change_ack
# cfg_power_state_change_interrupt
cfg_err_cor_in=dut.status_error_cor,
cfg_err_uncor_in=dut.status_error_uncor,
# cfg_flr_in_process
# cfg_flr_done
# cfg_vf_flr_in_process
# cfg_vf_flr_func_num
# cfg_vf_flr_done
# cfg_pm_aspm_l1_entry_reject
# cfg_pm_aspm_tx_l0s_entry_disable
# cfg_req_pm_transition_l23_ready
# cfg_link_training_enable
# Configuration Interrupt Controller Interface
# cfg_interrupt_int
# cfg_interrupt_sent
# cfg_interrupt_pending
# cfg_interrupt_msi_enable
# cfg_interrupt_msi_mmenable
# cfg_interrupt_msi_mask_update
# cfg_interrupt_msi_data
# cfg_interrupt_msi_select
# cfg_interrupt_msi_int
# cfg_interrupt_msi_pending_status
# cfg_interrupt_msi_pending_status_data_enable
# cfg_interrupt_msi_pending_status_function_num
# cfg_interrupt_msi_sent
# cfg_interrupt_msi_fail
cfg_interrupt_msix_enable=dut.cfg_interrupt_msix_enable,
cfg_interrupt_msix_mask=dut.cfg_interrupt_msix_mask,
cfg_interrupt_msix_vf_enable=dut.cfg_interrupt_msix_vf_enable,
cfg_interrupt_msix_vf_mask=dut.cfg_interrupt_msix_vf_mask,
cfg_interrupt_msix_address=dut.cfg_interrupt_msix_address,
cfg_interrupt_msix_data=dut.cfg_interrupt_msix_data,
cfg_interrupt_msix_int=dut.cfg_interrupt_msix_int,
cfg_interrupt_msix_vec_pending=dut.cfg_interrupt_msix_vec_pending,
cfg_interrupt_msix_vec_pending_status=dut.cfg_interrupt_msix_vec_pending_status,
cfg_interrupt_msix_sent=dut.cfg_interrupt_msix_sent,
cfg_interrupt_msix_fail=dut.cfg_interrupt_msix_fail,
# cfg_interrupt_msi_attr
# cfg_interrupt_msi_tph_present
# cfg_interrupt_msi_tph_type
# cfg_interrupt_msi_tph_st_tag
cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number,
# Configuration Extend Interface
# cfg_ext_read_received
# cfg_ext_write_received
# cfg_ext_register_number
# cfg_ext_function_number
# cfg_ext_write_data
# cfg_ext_write_byte_enable
# cfg_ext_read_data
# cfg_ext_read_data_valid
)
# self.dev.log.setLevel(logging.DEBUG)
self.rc.make_port().connect(self.dev)
self.dev.functions[0].configure_bar(0, 2**len(dut.example_core_pcie_us_inst.core_pcie_inst.axil_ctrl_awaddr))
self.dev.functions[0].configure_bar(2, 2**len(dut.example_core_pcie_us_inst.core_pcie_inst.axi_ram_awaddr))
self.dev.functions[0].configure_bar(4, 2**len(dut.example_core_pcie_us_inst.core_pcie_inst.axil_msix_awaddr))
async def init(self):
await FallingEdge(self.dut.rst)
await Timer(100, 'ns')
await self.rc.enumerate()
dev = self.rc.find_device(self.dev.functions[0].pcie_id)
await dev.enable_device()
await dev.set_master()
await dev.alloc_irq_vectors(32, 32)
@cocotb.test()
async def run_test(dut):
tb = TB(dut)
await tb.init()
mem = tb.rc.mem_pool.alloc_region(16*1024*1024)
mem_base = mem.get_absolute_address(0)
dev = tb.rc.find_device(tb.dev.functions[0].pcie_id)
dev_pf0_bar0 = dev.bar_window[0]
dev_pf0_bar2 = dev.bar_window[2]
tb.log.info("Test memory write to BAR 2")
test_data = b'\x11\x22\x33\x44'
await dev_pf0_bar2.write(0, test_data)
await Timer(100, 'ns')
tb.log.info("Test memory read from BAR 2")
val = await dev_pf0_bar2.read(0, len(test_data), timeout=1000)
tb.log.info("Read data: %s", val)
assert val == test_data
tb.log.info("Test DMA")
# write packet data
mem[0:1024] = bytearray([x % 256 for x in range(1024)])
# enable DMA
await dev_pf0_bar0.write_dword(0x000000, 1)
# enable interrupts
await dev_pf0_bar0.write_dword(0x000008, 0x3)
# write pcie read descriptor
await dev_pf0_bar0.write_dword(0x000100, (mem_base+0x0000) & 0xffffffff)
await dev_pf0_bar0.write_dword(0x000104, (mem_base+0x0000 >> 32) & 0xffffffff)
await dev_pf0_bar0.write_dword(0x000108, 0x100)
await dev_pf0_bar0.write_dword(0x000110, 0x400)
await dev_pf0_bar0.write_dword(0x000114, 0xAA)
await Timer(2000, 'ns')
# read status
val = await dev_pf0_bar0.read_dword(0x000118)
tb.log.info("Status: 0x%x", val)
assert val == 0x800000AA
# write pcie write descriptor
await dev_pf0_bar0.write_dword(0x000200, (mem_base+0x1000) & 0xffffffff)
await dev_pf0_bar0.write_dword(0x000204, (mem_base+0x1000 >> 32) & 0xffffffff)
await dev_pf0_bar0.write_dword(0x000208, 0x100)
await dev_pf0_bar0.write_dword(0x000210, 0x400)
await dev_pf0_bar0.write_dword(0x000214, 0x55)
await Timer(2000, 'ns')
# read status
val = await dev_pf0_bar0.read_dword(0x000218)
tb.log.info("Status: 0x%x", val)
assert val == 0x80000055
tb.log.info("%s", mem.hexdump_str(0x1000, 64))
assert mem[0:1024] == mem[0x1000:0x1000+1024]
tb.log.info("Test immediate write")
# write pcie write descriptor
await dev_pf0_bar0.write_dword(0x000200, (mem_base+0x1000) & 0xffffffff)
await dev_pf0_bar0.write_dword(0x000204, (mem_base+0x1000 >> 32) & 0xffffffff)
await dev_pf0_bar0.write_dword(0x000208, 0x44332211)
await dev_pf0_bar0.write_dword(0x000210, 0x4)
await dev_pf0_bar0.write_dword(0x000214, 0x800000AA)
await Timer(2000, 'ns')
# read status
val = await dev_pf0_bar0.read_dword(0x000218)
tb.log.info("Status: 0x%x", val)
assert val == 0x800000AA
tb.log.info("%s", mem.hexdump_str(0x1000, 64))
assert mem[0x1000:0x1000+4] == b'\x11\x22\x33\x44'
tb.log.info("Test DMA block operations")
region_len = 0x2000
src_offset = 0x0000
dest_offset = 0x4000
block_size = 256
block_stride = block_size
block_count = 32
# write packet data
mem[src_offset:src_offset+region_len] = bytearray([x % 256 for x in range(region_len)])
# enable DMA
await dev_pf0_bar0.write_dword(0x000000, 1)
# disable interrupts
await dev_pf0_bar0.write_dword(0x000008, 0)
# configure operation (read)
# DMA base address
await dev_pf0_bar0.write_dword(0x001080, (mem_base+src_offset) & 0xffffffff)
await dev_pf0_bar0.write_dword(0x001084, (mem_base+src_offset >> 32) & 0xffffffff)
# DMA offset address
await dev_pf0_bar0.write_dword(0x001088, 0)
await dev_pf0_bar0.write_dword(0x00108c, 0)
# DMA offset mask
await dev_pf0_bar0.write_dword(0x001090, region_len-1)
await dev_pf0_bar0.write_dword(0x001094, 0)
# DMA stride
await dev_pf0_bar0.write_dword(0x001098, block_stride)
await dev_pf0_bar0.write_dword(0x00109c, 0)
# RAM base address
await dev_pf0_bar0.write_dword(0x0010c0, 0)
await dev_pf0_bar0.write_dword(0x0010c4, 0)
# RAM offset address
await dev_pf0_bar0.write_dword(0x0010c8, 0)
await dev_pf0_bar0.write_dword(0x0010cc, 0)
# RAM offset mask
await dev_pf0_bar0.write_dword(0x0010d0, region_len-1)
await dev_pf0_bar0.write_dword(0x0010d4, 0)
# RAM stride
await dev_pf0_bar0.write_dword(0x0010d8, block_stride)
await dev_pf0_bar0.write_dword(0x0010dc, 0)
# clear cycle count
await dev_pf0_bar0.write_dword(0x001008, 0)
await dev_pf0_bar0.write_dword(0x00100c, 0)
# block length
await dev_pf0_bar0.write_dword(0x001010, block_size)
# block count
await dev_pf0_bar0.write_dword(0x001018, block_count)
await dev_pf0_bar0.write_dword(0x00101c, 0)
# start
await dev_pf0_bar0.write_dword(0x001000, 1)
for k in range(10):
cnt = await dev_pf0_bar0.read_dword(0x001018)
await Timer(1000, 'ns')
if cnt == 0:
break
# configure operation (write)
# DMA base address
await dev_pf0_bar0.write_dword(0x001180, (mem_base+dest_offset) & 0xffffffff)
await dev_pf0_bar0.write_dword(0x001184, (mem_base+dest_offset >> 32) & 0xffffffff)
# DMA offset address
await dev_pf0_bar0.write_dword(0x001188, 0)
await dev_pf0_bar0.write_dword(0x00118c, 0)
# DMA offset mask
await dev_pf0_bar0.write_dword(0x001190, region_len-1)
await dev_pf0_bar0.write_dword(0x001194, 0)
# DMA stride
await dev_pf0_bar0.write_dword(0x001198, block_stride)
await dev_pf0_bar0.write_dword(0x00119c, 0)
# RAM base address
await dev_pf0_bar0.write_dword(0x0011c0, 0)
await dev_pf0_bar0.write_dword(0x0011c4, 0)
# RAM offset address
await dev_pf0_bar0.write_dword(0x0011c8, 0)
await dev_pf0_bar0.write_dword(0x0011cc, 0)
# RAM offset mask
await dev_pf0_bar0.write_dword(0x0011d0, region_len-1)
await dev_pf0_bar0.write_dword(0x0011d4, 0)
# RAM stride
await dev_pf0_bar0.write_dword(0x0011d8, block_stride)
await dev_pf0_bar0.write_dword(0x0011dc, 0)
# clear cycle count
await dev_pf0_bar0.write_dword(0x001108, 0)
await dev_pf0_bar0.write_dword(0x00110c, 0)
# block length
await dev_pf0_bar0.write_dword(0x001110, block_size)
# block count
await dev_pf0_bar0.write_dword(0x001118, block_count)
await dev_pf0_bar0.write_dword(0x00111c, 0)
# start
await dev_pf0_bar0.write_dword(0x001100, 1)
for k in range(10):
cnt = await dev_pf0_bar0.read_dword(0x001118)
await Timer(1000, 'ns')
if cnt == 0:
break
tb.log.info("%s", mem.hexdump_str(dest_offset, region_len))
assert mem[src_offset:src_offset+region_len] == mem[dest_offset:dest_offset+region_len]
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
pcie_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'pcie', 'rtl'))
def test_fpga_core(request):
dut = "fpga_core"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(rtl_dir, "common", "example_core_pcie_us.v"),
os.path.join(rtl_dir, "common", "example_core_pcie.v"),
os.path.join(rtl_dir, "common", "example_core.v"),
os.path.join(rtl_dir, "common", "axi_ram.v"),
os.path.join(pcie_rtl_dir, "pcie_us_if.v"),
os.path.join(pcie_rtl_dir, "pcie_us_if_rc.v"),
os.path.join(pcie_rtl_dir, "pcie_us_if_rq.v"),
os.path.join(pcie_rtl_dir, "pcie_us_if_cq.v"),
os.path.join(pcie_rtl_dir, "pcie_us_if_cc.v"),
os.path.join(pcie_rtl_dir, "pcie_us_cfg.v"),
os.path.join(pcie_rtl_dir, "pcie_axil_master.v"),
os.path.join(pcie_rtl_dir, "pcie_axi_master.v"),
os.path.join(pcie_rtl_dir, "pcie_axi_master_rd.v"),
os.path.join(pcie_rtl_dir, "pcie_axi_master_wr.v"),
os.path.join(pcie_rtl_dir, "pcie_tlp_demux_bar.v"),
os.path.join(pcie_rtl_dir, "pcie_tlp_demux.v"),
os.path.join(pcie_rtl_dir, "pcie_tlp_mux.v"),
os.path.join(pcie_rtl_dir, "pcie_tlp_fifo.v"),
os.path.join(pcie_rtl_dir, "pcie_tlp_fifo_raw.v"),
os.path.join(pcie_rtl_dir, "pcie_msix.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_rd.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_wr.v"),
os.path.join(pcie_rtl_dir, "dma_psdpram.v"),
os.path.join(pcie_rtl_dir, "priority_encoder.v"),
os.path.join(pcie_rtl_dir, "pulse_merge.v"),
]
parameters = {}
parameters['AXIS_PCIE_DATA_WIDTH'] = 512
parameters['AXIS_PCIE_KEEP_WIDTH'] = parameters['AXIS_PCIE_DATA_WIDTH'] // 32
parameters['AXIS_PCIE_RQ_USER_WIDTH'] = 62 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 137
parameters['AXIS_PCIE_RC_USER_WIDTH'] = 75 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 161
parameters['AXIS_PCIE_CQ_USER_WIDTH'] = 88 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 183
parameters['AXIS_PCIE_CC_USER_WIDTH'] = 33 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 81
parameters['RC_STRADDLE'] = int(parameters['AXIS_PCIE_DATA_WIDTH'] >= 256)
parameters['RQ_STRADDLE'] = int(parameters['AXIS_PCIE_DATA_WIDTH'] >= 512)
parameters['CQ_STRADDLE'] = int(parameters['AXIS_PCIE_DATA_WIDTH'] >= 512)
parameters['CC_STRADDLE'] = int(parameters['AXIS_PCIE_DATA_WIDTH'] >= 512)
parameters['RQ_SEQ_NUM_WIDTH'] = 6
parameters['RQ_SEQ_NUM_ENABLE'] = 1
parameters['PCIE_TAG_COUNT'] = 64
parameters['BAR0_APERTURE'] = 24
parameters['BAR2_APERTURE'] = 24
parameters['BAR4_APERTURE'] = 16
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
|
dcd9d39f3f38cd5f80f0da183def646769eb51b1
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/testapps/test_pillowtop/tests/test_sms_pillow.py
|
9f0308c1aeacd312dc9fe4fe2990ad233caae937
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,497
|
py
|
test_sms_pillow.py
|
from datetime import datetime
from unittest.mock import patch
from django.test import TestCase
from dimagi.utils.parsing import json_format_datetime
from corehq.apps.change_feed import topics
from corehq.apps.change_feed.consumer.feed import (
change_meta_from_kafka_message,
)
from corehq.apps.change_feed.tests.utils import get_test_kafka_consumer
from corehq.apps.change_feed.topics import get_topic_offset
from corehq.apps.es.client import manager
from corehq.apps.es.sms import SMSES, sms_adapter
from corehq.apps.es.tests.utils import es_test
from corehq.apps.sms.tests.data_generator import create_fake_sms
from corehq.pillows.sms import get_sql_sms_pillow
@patch('corehq.apps.sms.change_publishers.do_publish')
@es_test(requires=[sms_adapter])
class SqlSMSPillowTest(TestCase):
domain = 'sms-pillow-test-domain'
def _to_json(self, sms_dict, sms):
result = {
'_id': sms.couch_id,
'id': sms.pk,
'date_modified': json_format_datetime(sms.date_modified)
}
for k, v in sms_dict.items():
if k != 'couch_id':
value = json_format_datetime(v) if isinstance(v, datetime) else v
result[k] = value
return result
def test_sql_sms_pillow(self, mock_do_publish):
mock_do_publish.return_value = True
consumer = get_test_kafka_consumer(topics.SMS)
# get the seq id before the change is published
kafka_seq = get_topic_offset(topics.SMS)
# create an sms
sms_and_dict = create_fake_sms(self.domain)
self.sms = sms_and_dict.sms
sms_json = self._to_json(sms_and_dict.sms_dict, self.sms)
# test serialization
self.assertEqual(self.sms.to_json(), sms_json)
# publish the change and confirm it gets to kafka
self.sms.publish_change()
message = next(consumer)
change_meta = change_meta_from_kafka_message(message.value)
self.assertEqual(self.sms.couch_id, change_meta.document_id)
self.assertEqual(self.domain, change_meta.domain)
# send to elasticsearch
sms_pillow = get_sql_sms_pillow('SqlSMSPillow')
sms_pillow.process_changes(since=kafka_seq, forever=False)
manager.index_refresh(sms_adapter.index_name)
# confirm change made it to elasticserach
results = SMSES().run()
self.assertEqual(1, results.total)
sms_doc = results.hits[0]
self.assertEqual(sms_doc, sms_json)
|
feb3f7d1bd0b479b698eebfa6bd81f836c334a5d
|
cb6c99094c81387c41aa85c80c9151f283e5307e
|
/validate_nb.py
|
8d2092b7e54528b4271395d535245d013100fb84
|
[
"Apache-2.0"
] |
permissive
|
bernhard-42/jupyter-cadquery
|
9789d2edfe47ea76fd16c28431531f7d040c280e
|
578b430af6e03ba05187e4fa22f64624b58a9cf5
|
refs/heads/master
| 2023-05-13T12:56:03.865420
| 2023-01-03T18:11:12
| 2023-01-03T18:11:12
| 180,015,957
| 266
| 41
|
Apache-2.0
| 2023-04-27T22:40:51
| 2019-04-07T19:52:57
|
Python
|
UTF-8
|
Python
| false
| false
| 181
|
py
|
validate_nb.py
|
import nbformat
import json
import sys
with open(sys.argv[1], "r") as fd:
nb = json.load(fd)
try:
nbformat.validate(nb)
print("==> OK")
except:
print("==> ERROR")
|
851435578fa9abaa5deb27f88bc0a4bc67ebb963
|
67371573b8b88aa90c0623382e2e0196ca67acbd
|
/flowkit/tests/gating_strategy_reused_gates_tests.py
|
3e4f66e8a3d307ed985798e01eb76694ffa1e3f6
|
[
"BSD-3-Clause"
] |
permissive
|
whitews/FlowKit
|
3542c4fa56f1066a48b3a12d37fa2db5b9a6dd21
|
e539236c7b480582b80eeacbf47533ff12bb19de
|
refs/heads/master
| 2023-06-24T01:51:29.605400
| 2023-05-08T22:52:32
| 2023-05-08T22:52:32
| 138,655,889
| 142
| 33
|
BSD-3-Clause
| 2023-09-12T16:39:16
| 2018-06-25T22:19:17
|
Python
|
UTF-8
|
Python
| false
| false
| 4,727
|
py
|
gating_strategy_reused_gates_tests.py
|
"""
Tests for re-used gates in the GatingStrategy Class
"""
import unittest
import flowkit as fk
class GatingStrategyReusedGatesTestCase(unittest.TestCase):
def setUp(self):
"""
This TestCase tests more complex GatingStrategy use cases, particularly
the re-use of a gate in 2 different branches where the parent of each
gate is also re-used. For example:
root
╰── Gate_A
├── Gate_B
│ ╰── ReusedParent
│ ╰── ReusedChild
╰── Gate_C
╰── ReusedParent
╰── ReusedChild
:return: None
"""
self.gs = fk.GatingStrategy()
time_dim = fk.Dimension('Time', range_min=0.1, range_max=0.9)
dim_fsc_w = fk.Dimension('FSC-W')
dim_fsc_h = fk.Dimension('FSC-H')
dim_ssc_a = fk.Dimension('SSC-A')
dim_amine_a = fk.Dimension('Aqua Amine FLR-A')
dim_cd3_a = fk.Dimension('CD3 APC-H7 FLR-A')
gate_a = fk.gates.RectangleGate('Gate_A', [time_dim])
self.gs.add_gate(gate_a, ('root',))
gate_b_vertices = [
[0.328125, 0.1640625],
[0.296875, 0.1484375],
[0.30859375, 0.8515625],
[0.34765625, 0.3984375],
[0.3359375, 0.1875]
]
gate_b = fk.gates.PolygonGate(
'Gate_B', dimensions=[dim_fsc_w, dim_fsc_h], vertices=gate_b_vertices
)
self.gs.add_gate(gate_b, ('root', 'Gate_A'))
gate_c_vertices = [
[0.328125, 0.1640625],
[0.296875, 0.1484375],
[0.30859375, 0.8515625],
[0.34765625, 0.3984375],
[0.3359375, 0.1875]
]
gate_c = fk.gates.PolygonGate(
'Gate_C', dimensions=[dim_fsc_h, dim_fsc_w], vertices=gate_c_vertices
)
self.gs.add_gate(gate_c, ('root', 'Gate_A'))
reused_parent_vertices = [
[0.2629268137285685, 0.0625],
[0.24318837264468562, 0.03515625],
[0.21573453285608676, 0.0390625],
[0.29042797365869377, 0.24609375],
[0.29042797365869377, 0.1484375]
]
reused_parent_gate_1 = fk.gates.PolygonGate(
'ReusedParent', [dim_amine_a, dim_ssc_a], reused_parent_vertices
)
reused_parent_gate_2 = fk.gates.PolygonGate(
'ReusedParent', [dim_amine_a, dim_ssc_a], reused_parent_vertices
)
self.gs.add_gate(reused_parent_gate_1, ('root', 'Gate_A', 'Gate_B'))
self.gs.add_gate(reused_parent_gate_2, ('root', 'Gate_A', 'Gate_C'))
reused_child_vertices = [
[0.28415161867527605, 0.11328125],
[0.3132637699981912, 0.203125],
[0.6896802981119161, 0.05078125],
[0.5692952580886116, 0.01953125],
[0.3192472844795108, 0.01953125]
]
reused_child_gate = fk.gates.PolygonGate(
'ReusedChild', [dim_cd3_a, dim_ssc_a], reused_child_vertices
)
gate_path_1 = ('root', 'Gate_A', 'Gate_B', 'ReusedParent')
gate_path_2 = ('root', 'Gate_A', 'Gate_C', 'ReusedParent')
self.gs.add_gate(reused_child_gate, gate_path=gate_path_1)
self.gs.add_gate(reused_child_gate, gate_path=gate_path_2)
self.all_gate_ids = [
('Gate_A', ('root',)),
('Gate_B', ('root', 'Gate_A')),
('ReusedParent', ('root', 'Gate_A', 'Gate_B')),
('ReusedChild', ('root', 'Gate_A', 'Gate_B', 'ReusedParent')),
('Gate_C', ('root', 'Gate_A')),
('ReusedParent', ('root', 'Gate_A', 'Gate_C')),
('ReusedChild', ('root', 'Gate_A', 'Gate_C', 'ReusedParent'))
]
def test_gate_reuse_with_reused_parent(self):
self.assertListEqual(self.all_gate_ids, self.gs.get_gate_ids())
def test_get_gate(self):
# test getting all individual gates
for gate_item in self.all_gate_ids:
gate = self.gs.get_gate(gate_item[0], gate_item[1])
self.assertEqual(gate.gate_name, gate_item[0])
def test_get_child_gate_ids(self):
parent_gate_name = 'Gate_A'
parent_gate_path = ['root']
child_gate_names = ['Gate_B', 'Gate_C']
child_gate_ids = self.gs.get_child_gate_ids(parent_gate_name, parent_gate_path)
retrieved_gate_names = []
for gate_name, gate_path in child_gate_ids:
retrieved_gate_names.append(gate_name)
self.assertListEqual(child_gate_names, sorted(retrieved_gate_names))
def test_get_gate_fails_without_path(self):
self.assertRaises(fk.exceptions.GateReferenceError, self.gs.get_gate, 'ReusedParent')
|
d34c1376fa21dc52495215167da60a963416feda
|
5130754859e274cd06f63260439e5203c2000a11
|
/stubs/elasticsearch/client/__init__.pyi
|
ea1b2098ea2024416b24d5bbad8a8704943a1003
|
[
"Apache-2.0"
] |
permissive
|
oppia/oppia
|
8ebc9c7c7f2b336e9a79ce04533abe3956f48cbe
|
d16fdf23d790eafd63812bd7239532256e30a21d
|
refs/heads/develop
| 2023-09-04T07:50:13.661276
| 2023-09-03T09:21:32
| 2023-09-03T09:21:32
| 40,687,563
| 6,172
| 4,666
|
Apache-2.0
| 2023-09-14T18:25:11
| 2015-08-14T00:16:14
|
Python
|
UTF-8
|
Python
| false
| false
| 8,127
|
pyi
|
__init__.pyi
|
import logging
from typing import Any, MutableMapping, Optional, Type, Union, Collection
from .indices import IndicesClient
from ..transport import Transport
logger: logging.Logger
class Elasticsearch(object):
indices: IndicesClient
def __init__(
self, hosts: Any = ..., transport_class: Type[Transport] = ..., **kwargs: Any
) -> None: ...
def __repr__(self) -> str: ...
def __enter__(self) -> 'Elasticsearch': ...
def __exit__(self, *_: Any) -> None: ...
def index(
self,
index: Any,
body: Any,
doc_type: Optional[Any] = ...,
id: Optional[Any] = ...,
if_primary_term: Optional[Any] = ...,
if_seq_no: Optional[Any] = ...,
op_type: Optional[Any] = ...,
pipeline: Optional[Any] = ...,
refresh: Optional[Any] = ...,
require_alias: Optional[Any] = ...,
routing: Optional[Any] = ...,
timeout: Optional[Any] = ...,
version: Optional[Any] = ...,
version_type: Optional[Any] = ...,
wait_for_active_shards: Optional[Any] = ...,
pretty: Optional[bool] = ...,
human: Optional[bool] = ...,
error_trace: Optional[bool] = ...,
format: Optional[str] = ...,
filter_path: Optional[Union[str, Collection[str]]] = ...,
request_timeout: Optional[Union[int, float]] = ...,
ignore: Optional[Union[int, Collection[int]]] = ...,
opaque_id: Optional[str] = ...,
params: Optional[MutableMapping[str, Any]] = ...,
headers: Optional[MutableMapping[str, str]] = ...
) -> Any: ...
def exists(
self,
index: Any,
id: Any,
*,
doc_type: Optional[Any] = ...,
_source: Optional[Any] = ...,
_source_excludes: Optional[Any] = ...,
_source_includes: Optional[Any] = ...,
preference: Optional[Any] = ...,
realtime: Optional[Any] = ...,
refresh: Optional[Any] = ...,
routing: Optional[Any] = ...,
stored_fields: Optional[Any] = ...,
version: Optional[Any] = ...,
version_type: Optional[Any] = ...,
pretty: Optional[bool] = ...,
human: Optional[bool] = ...,
error_trace: Optional[bool] = ...,
format: Optional[str] = ...,
filter_path: Optional[Union[str, Collection[str]]] = ...,
request_timeout: Optional[Union[int, float]] = ...,
ignore: Optional[Union[int, Collection[int]]] = ...,
opaque_id: Optional[str] = ...,
params: Optional[MutableMapping[str, Any]] = ...,
headers: Optional[MutableMapping[str, str]] = ...
) -> bool: ...
def delete(
self,
index: Any,
id: Any,
*,
doc_type: Optional[Any] = ...,
if_primary_term: Optional[Any] = ...,
if_seq_no: Optional[Any] = ...,
refresh: Optional[Any] = ...,
routing: Optional[Any] = ...,
timeout: Optional[Any] = ...,
version: Optional[Any] = ...,
version_type: Optional[Any] = ...,
wait_for_active_shards: Optional[Any] = ...,
pretty: Optional[bool] = ...,
human: Optional[bool] = ...,
error_trace: Optional[bool] = ...,
format: Optional[str] = ...,
filter_path: Optional[Union[str, Collection[str]]] = ...,
request_timeout: Optional[Union[int, float]] = ...,
ignore: Optional[Union[int, Collection[int]]] = ...,
opaque_id: Optional[str] = ...,
params: Optional[MutableMapping[str, Any]] = ...,
headers: Optional[MutableMapping[str, str]] = ...
) -> Any: ...
def delete_by_query(
self,
index: Any,
body: Any,
doc_type: Optional[Any] = ...,
_source: Optional[Any] = ...,
_source_excludes: Optional[Any] = ...,
_source_includes: Optional[Any] = ...,
allow_no_indices: Optional[Any] = ...,
analyze_wildcard: Optional[Any] = ...,
analyzer: Optional[Any] = ...,
conflicts: Optional[Any] = ...,
default_operator: Optional[Any] = ...,
df: Optional[Any] = ...,
expand_wildcards: Optional[Any] = ...,
from_: Optional[Any] = ...,
ignore_unavailable: Optional[Any] = ...,
lenient: Optional[Any] = ...,
max_docs: Optional[Any] = ...,
preference: Optional[Any] = ...,
q: Optional[Any] = ...,
refresh: Optional[Any] = ...,
request_cache: Optional[Any] = ...,
requests_per_second: Optional[Any] = ...,
routing: Optional[Any] = ...,
scroll: Optional[Any] = ...,
scroll_size: Optional[Any] = ...,
search_timeout: Optional[Any] = ...,
search_type: Optional[Any] = ...,
size: Optional[Any] = ...,
slices: Optional[Any] = ...,
sort: Optional[Any] = ...,
stats: Optional[Any] = ...,
terminate_after: Optional[Any] = ...,
timeout: Optional[Any] = ...,
version: Optional[Any] = ...,
wait_for_active_shards: Optional[Any] = ...,
wait_for_completion: Optional[Any] = ...,
pretty: Optional[bool] = ...,
human: Optional[bool] = ...,
error_trace: Optional[bool] = ...,
format: Optional[str] = ...,
filter_path: Optional[Union[str, Collection[str]]] = ...,
request_timeout: Optional[Union[int, float]] = ...,
ignore: Optional[Union[int, Collection[int]]] = ...,
opaque_id: Optional[str] = ...,
params: Optional[MutableMapping[str, Any]] = ...,
headers: Optional[MutableMapping[str, str]] = ...
) -> Any: ...
def search(
self,
*,
body: Optional[Any] = ...,
index: Optional[Any] = ...,
doc_type: Optional[Any] = ...,
_source: Optional[Any] = ...,
_source_excludes: Optional[Any] = ...,
_source_includes: Optional[Any] = ...,
allow_no_indices: Optional[Any] = ...,
allow_partial_search_results: Optional[Any] = ...,
analyze_wildcard: Optional[Any] = ...,
analyzer: Optional[Any] = ...,
batched_reduce_size: Optional[Any] = ...,
ccs_minimize_roundtrips: Optional[Any] = ...,
default_operator: Optional[Any] = ...,
df: Optional[Any] = ...,
docvalue_fields: Optional[Any] = ...,
expand_wildcards: Optional[Any] = ...,
explain: Optional[Any] = ...,
from_: Optional[Any] = ...,
ignore_throttled: Optional[Any] = ...,
ignore_unavailable: Optional[Any] = ...,
lenient: Optional[Any] = ...,
max_concurrent_shard_requests: Optional[Any] = ...,
pre_filter_shard_size: Optional[Any] = ...,
preference: Optional[Any] = ...,
q: Optional[Any] = ...,
request_cache: Optional[Any] = ...,
rest_total_hits_as_int: Optional[Any] = ...,
routing: Optional[Any] = ...,
scroll: Optional[Any] = ...,
search_type: Optional[Any] = ...,
seq_no_primary_term: Optional[Any] = ...,
size: Optional[Any] = ...,
sort: Optional[Any] = ...,
stats: Optional[Any] = ...,
stored_fields: Optional[Any] = ...,
suggest_field: Optional[Any] = ...,
suggest_mode: Optional[Any] = ...,
suggest_size: Optional[Any] = ...,
suggest_text: Optional[Any] = ...,
terminate_after: Optional[Any] = ...,
timeout: Optional[Any] = ...,
track_scores: Optional[Any] = ...,
track_total_hits: Optional[Any] = ...,
typed_keys: Optional[Any] = ...,
version: Optional[Any] = ...,
pretty: Optional[bool] = ...,
human: Optional[bool] = ...,
error_trace: Optional[bool] = ...,
format: Optional[str] = ...,
filter_path: Optional[Union[str, Collection[str]]] = ...,
request_timeout: Optional[Union[int, float]] = ...,
ignore: Optional[Union[int, Collection[int]]] = ...,
opaque_id: Optional[str] = ...,
params: Optional[MutableMapping[str, Any]] = ...,
headers: Optional[MutableMapping[str, str]] = ...
) -> Any: ...
|
7eb6de8ed610650206a3bd24216a52730588d75c
|
46c0e11d52c6ebcd384a5759ce0868b0350d1c94
|
/algorithms/c04_tree/__init__.py
|
40e8162f774b0a2bdf52216c1699dc29cf6f67e2
|
[
"Apache-2.0"
] |
permissive
|
yidao620c/core-algorithm
|
7835580109fe3b33d7984762cff043abea7eb3dc
|
322cb388295e7085a3772b2dcd7b83c34165c98e
|
refs/heads/master
| 2023-08-31T01:25:22.303598
| 2023-08-06T10:09:05
| 2023-08-06T10:09:05
| 13,033,144
| 869
| 346
| null | 2018-09-21T15:34:23
| 2013-09-23T10:42:19
|
Python
|
UTF-8
|
Python
| false
| false
| 59
|
py
|
__init__.py
|
# -*- encoding: utf-8 -*-
"""
树结构、堆结构。
"""
|
64c61d1402dfd48a837d713acec9a94ded14db9e
|
f8215144c61ef88ed63ed536334a74abc53c5631
|
/keras_nlp/models/xlnet/xlnet_backbone.py
|
dd6abd577327077eb2d9ebd1bae5c24b83f469f7
|
[
"Apache-2.0"
] |
permissive
|
keras-team/keras-nlp
|
3906a35c64f543dc3713ed619eb5a790a6ff4a32
|
43cf146cb7670fc94f98ba88ed940f12d9848726
|
refs/heads/master
| 2023-08-16T05:12:06.003760
| 2023-08-15T22:51:58
| 2023-08-15T22:51:58
| 267,715,375
| 579
| 175
|
Apache-2.0
| 2023-09-14T19:33:47
| 2020-05-28T23:03:54
|
Python
|
UTF-8
|
Python
| false
| false
| 7,488
|
py
|
xlnet_backbone.py
|
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XLNet backbone model."""
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.backend import keras
from keras_nlp.models.backbone import Backbone
from keras_nlp.models.xlnet.xlnet_content_and_query_embedding import (
ContentAndQueryEmbedding,
)
from keras_nlp.models.xlnet.xlnet_encoder import XLNetAttentionMaskLayer
from keras_nlp.models.xlnet.xlnet_encoder import XLNetEncoder
from keras_nlp.models.xlnet.xlnet_encoder import XLNetSegmentMatrixLayer
@keras_nlp_export("keras_nlp.models.XLNetBackbone")
class XLNetBackbone(Backbone):
"""XLNet encoder network.
This class implements a XLNet Transformer.
The default constructor gives a fully customizable, randomly initialized
XLNet encoder with any number of layers, heads, and embedding dimensions.
To load preset architectures and weights, use the `from_preset` constructor.
Disclaimer: Pre-trained models are provided on an "as is" basis, without
warranties or conditions of any kind.
Attributes:
vocabulary_size: int. The size of the token vocabulary.
num_layers: int. The number of transformer encoder layers.
num_heads: int, the number of heads in the
`keras.layers.TwoStreamRelativeAttention` layer.
hidden_dim: int, the size hidden states.
intermediate_dim: int, the hidden size of feedforward network.
dropout: float, defaults to 0.0 the dropout value, shared by
`keras.layers.TwoStreamRelativeAttention` and feedforward network.
activation: string or `keras.activations`, defaults to "gelu". the
activation function of feedforward network.
kernel_initializer_range: int, defaults to 0.02. The kernel initializer
range for the dense and relative attention layers.
bias_initializer: string or `keras.initializers` initializer,
defaults to "zeros". The bias initializer for
the dense and multiheaded relative attention layers.
Call Args:
token_ids: Indices of input sequence tokens in the vocabulary of shape
`[batch_size, sequence_length]`.
segment_ids: Segment token indices to indicate first and second portions
of the inputs of shape `[batch_size, sequence_length]`.
padding_mask: Mask to avoid performing attention on padding token indices
of shape `[batch_size, sequence_length]`.
Examples:
```python
import numpy as np
from keras_nlp.models import XLNetBackbone
input_data = {
"token_ids": np.array(
[460, 5272, 1758, 4905, 9, 4, 3], shape=(1, 7),
),
"segment_ids": np.array(
[0, 0, 0, 0, 0, 0, 2], shape=(1, 7),
),
"padding_mask": np.array(
[1, 1, 1, 1, 1, 1, 1], shape=(1, 7)
),
}
# Randomly initialized XLNet encoder with a custom config
model = keras_nlp.models.XLNetBackbone(
vocabulary_size=32000,
num_layers=12,
num_heads=12,
hidden_dim=768,
intermediate_dim=3072,
)
output = model(input_data)
```
"""
def __init__(
self,
vocabulary_size,
num_layers,
num_heads,
hidden_dim,
intermediate_dim,
dropout=0.0,
activation="gelu",
kernel_initializer_range=0.02,
bias_initializer="zeros",
**kwargs,
):
# Inputs
token_id_input = keras.Input(
shape=(None,), dtype="int32", name="token_ids"
)
padding_mask = keras.Input(
shape=(None,), dtype="int32", name="padding_mask"
)
segment_ids = keras.Input(
shape=(None,), dtype="int32", name="segment_ids"
)
# Content and Query Embedding
word_emb, pos_emb = ContentAndQueryEmbedding(
vocabulary_size=vocabulary_size,
hidden_dim=hidden_dim,
dropout=dropout,
name="content_query_embedding",
)(token_id_input=token_id_input)
# Apply XLNetAttentionMaskLayer and XLNetSegmentMatrixLayer Layers
# to get the processed attention masks and segment matrix.
attn_mask_content, attn_mask_query = XLNetAttentionMaskLayer(
hidden_dim=hidden_dim,
kernel_initializer_range=kernel_initializer_range,
name="encoder_block_attn_mask_layer",
)(padding_mask)
seg_mat = XLNetSegmentMatrixLayer(name="encoder_block_seg_mat_layer")(
segment_ids
)
output_content = word_emb
# Encoders
head_dim = hidden_dim // num_heads
for i in range(num_layers):
output_content, output_query = XLNetEncoder(
num_heads=num_heads,
hidden_dim=hidden_dim,
head_dim=head_dim,
intermediate_dim=intermediate_dim,
dropout=dropout,
activation=activation,
layer_norm_epsilon=1e-12,
kernel_initializer_range=kernel_initializer_range,
bias_initializer=bias_initializer,
name=f"xlnet_encoder_{i}",
)(
output_content=output_content,
attn_mask_content=attn_mask_content,
attn_mask_query=attn_mask_query,
pos_emb=pos_emb,
seg_mat=seg_mat,
)
output = keras.layers.Dropout(dropout)(output_content)
super().__init__(
inputs={
"token_ids": token_id_input,
"padding_mask": padding_mask,
"segment_ids": segment_ids,
},
outputs=output,
**kwargs,
)
# All references to `self` below this line
self.vocabulary_size = vocabulary_size
self.num_layers = num_layers
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.intermediate_dim = intermediate_dim
self.dropout = dropout
self.activation = activation
self.kernel_initializer_range = kernel_initializer_range
self.bias_initializer = bias_initializer
def get_config(self):
config = super().get_config()
config.update(
{
"vocabulary_size": self.vocabulary_size,
"num_layers": self.num_layers,
"num_heads": self.num_heads,
"hidden_dim": self.hidden_dim,
"intermediate_dim": self.intermediate_dim,
"dropout": self.dropout,
"activation": self.activation,
"kernel_initializer_range": self.kernel_initializer_range,
"bias_initializer": self.bias_initializer,
}
)
return config
@property
def token_embedding(self):
return self.get_layer("content_query_embedding").word_embed
|
2258e91bf32e362c3f92d849f3db06e2a8e2b1ec
|
1e92cc7daabe240b374d73da462878e80814171b
|
/manila/api/views/scheduler_stats.py
|
a7e3505c8ca96527e89cff80761fbc5c875e23af
|
[
"Apache-2.0"
] |
permissive
|
openstack/manila
|
e211281dd16128ac5685cd7b1a13a09d9e6456e1
|
a93a844398a11a8a85f204782fb9456f7caccdbe
|
refs/heads/master
| 2023-08-19T04:23:24.084637
| 2023-08-17T14:55:58
| 2023-08-17T14:55:58
| 24,245,238
| 178
| 141
|
Apache-2.0
| 2023-08-03T10:43:19
| 2014-09-19T20:57:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,818
|
py
|
scheduler_stats.py
|
# Copyright (c) 2014 eBay Inc.
# Copyright (c) 2015 Rushil Chugh
# Copyright (c) 2015 Clinton Knight
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from manila.api import common
class ViewBuilder(common.ViewBuilder):
"""Model scheduler-stats API responses as a python dictionary."""
_collection_name = "scheduler-stats"
def pool_summary(self, pool):
"""Summary view of a single pool."""
return {
'pool': {
'name': pool.get('name'),
'host': pool.get('host'),
'backend': pool.get('backend'),
'pool': pool.get('pool'),
}
}
def pool_detail(self, pool):
"""Detailed view of a single pool."""
return {
'pool': {
'name': pool.get('name'),
'host': pool.get('host'),
'backend': pool.get('backend'),
'pool': pool.get('pool'),
'capabilities': pool.get('capabilities'),
}
}
def pools(self, pools, detail=False):
"""View of a list of pools seen by scheduler."""
view_method = self.pool_detail if detail else self.pool_summary
return {"pools": [view_method(pool)['pool'] for pool in pools]}
|
d19f8771e497b9818d5beea73ee4359365d7deab
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/insteon/schemas.py
|
e6b22a8cbb964e1627905d2a0fe9064bbc8a139c
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 6,889
|
py
|
schemas.py
|
"""Schemas used by insteon component."""
from __future__ import annotations
from binascii import Error as HexError, unhexlify
from pyinsteon.address import Address
from pyinsteon.constants import HC_LOOKUP
import voluptuous as vol
from homeassistant.const import (
CONF_ADDRESS,
CONF_DEVICE,
CONF_ENTITY_ID,
CONF_HOST,
CONF_PASSWORD,
CONF_PLATFORM,
CONF_PORT,
CONF_USERNAME,
ENTITY_MATCH_ALL,
)
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_CAT,
CONF_DIM_STEPS,
CONF_HOUSECODE,
CONF_OVERRIDE,
CONF_SUBCAT,
CONF_UNITCODE,
CONF_X10,
HOUSECODES,
PORT_HUB_V1,
PORT_HUB_V2,
SRV_ALL_LINK_GROUP,
SRV_ALL_LINK_MODE,
SRV_CONTROLLER,
SRV_HOUSECODE,
SRV_LOAD_DB_RELOAD,
SRV_RESPONDER,
X10_PLATFORMS,
)
ADD_ALL_LINK_SCHEMA = vol.Schema(
{
vol.Required(SRV_ALL_LINK_GROUP): vol.Range(min=0, max=255),
vol.Required(SRV_ALL_LINK_MODE): vol.In([SRV_CONTROLLER, SRV_RESPONDER]),
}
)
DEL_ALL_LINK_SCHEMA = vol.Schema(
{vol.Required(SRV_ALL_LINK_GROUP): vol.Range(min=0, max=255)}
)
LOAD_ALDB_SCHEMA = vol.Schema(
{
vol.Required(CONF_ENTITY_ID): vol.Any(cv.entity_id, ENTITY_MATCH_ALL),
vol.Optional(SRV_LOAD_DB_RELOAD, default=False): cv.boolean,
}
)
PRINT_ALDB_SCHEMA = vol.Schema({vol.Required(CONF_ENTITY_ID): cv.entity_id})
X10_HOUSECODE_SCHEMA = vol.Schema({vol.Required(SRV_HOUSECODE): vol.In(HOUSECODES)})
TRIGGER_SCENE_SCHEMA = vol.Schema(
{vol.Required(SRV_ALL_LINK_GROUP): vol.Range(min=0, max=255)}
)
ADD_DEFAULT_LINKS_SCHEMA = vol.Schema({vol.Required(CONF_ENTITY_ID): cv.entity_id})
def normalize_byte_entry_to_int(entry: int | bytes | str):
"""Format a hex entry value."""
if isinstance(entry, int):
if entry in range(0, 256):
return entry
raise ValueError("Must be single byte")
if isinstance(entry, str):
if entry[0:2].lower() == "0x":
entry = entry[2:]
if len(entry) != 2:
raise ValueError("Not a valid hex code")
try:
entry = unhexlify(entry)
except HexError as err:
raise ValueError("Not a valid hex code") from err
return int.from_bytes(entry, byteorder="big")
def add_device_override(config_data, new_override):
"""Add a new device override."""
try:
address = str(Address(new_override[CONF_ADDRESS]))
cat = normalize_byte_entry_to_int(new_override[CONF_CAT])
subcat = normalize_byte_entry_to_int(new_override[CONF_SUBCAT])
except ValueError as err:
raise ValueError("Incorrect values") from err
overrides = []
for override in config_data.get(CONF_OVERRIDE, []):
if override[CONF_ADDRESS] != address:
overrides.append(override)
curr_override = {}
curr_override[CONF_ADDRESS] = address
curr_override[CONF_CAT] = cat
curr_override[CONF_SUBCAT] = subcat
overrides.append(curr_override)
new_config = {}
if config_data.get(CONF_X10):
new_config[CONF_X10] = config_data[CONF_X10]
new_config[CONF_OVERRIDE] = overrides
return new_config
def add_x10_device(config_data, new_x10):
"""Add a new X10 device to X10 device list."""
x10_devices = []
for x10_device in config_data.get(CONF_X10, []):
if (
x10_device[CONF_HOUSECODE] != new_x10[CONF_HOUSECODE]
or x10_device[CONF_UNITCODE] != new_x10[CONF_UNITCODE]
):
x10_devices.append(x10_device)
curr_device = {}
curr_device[CONF_HOUSECODE] = new_x10[CONF_HOUSECODE]
curr_device[CONF_UNITCODE] = new_x10[CONF_UNITCODE]
curr_device[CONF_PLATFORM] = new_x10[CONF_PLATFORM]
curr_device[CONF_DIM_STEPS] = new_x10[CONF_DIM_STEPS]
x10_devices.append(curr_device)
new_config = {}
if config_data.get(CONF_OVERRIDE):
new_config[CONF_OVERRIDE] = config_data[CONF_OVERRIDE]
new_config[CONF_X10] = x10_devices
return new_config
def build_device_override_schema(
address=vol.UNDEFINED,
cat=vol.UNDEFINED,
subcat=vol.UNDEFINED,
firmware=vol.UNDEFINED,
):
"""Build the device override schema for config flow."""
return vol.Schema(
{
vol.Required(CONF_ADDRESS, default=address): str,
vol.Optional(CONF_CAT, default=cat): str,
vol.Optional(CONF_SUBCAT, default=subcat): str,
}
)
def build_x10_schema(
housecode=vol.UNDEFINED,
unitcode=vol.UNDEFINED,
platform=vol.UNDEFINED,
dim_steps=22,
):
"""Build the X10 schema for config flow."""
return vol.Schema(
{
vol.Required(CONF_HOUSECODE, default=housecode): vol.In(HC_LOOKUP.keys()),
vol.Required(CONF_UNITCODE, default=unitcode): vol.In(range(1, 17)),
vol.Required(CONF_PLATFORM, default=platform): vol.In(X10_PLATFORMS),
vol.Optional(CONF_DIM_STEPS, default=dim_steps): vol.In(range(1, 255)),
}
)
def _find_likely_port(ports):
"""Return the most likely USB port for a PLM."""
test_strings = ["FTDI", "0403:6001", "10BF:"]
for port, name in ports.items():
for test_string in test_strings:
if test_string in name:
return port
return vol.UNDEFINED
def build_plm_schema(ports: dict[str, str], device=vol.UNDEFINED):
"""Build the PLM schema for config flow."""
if not device or device == vol.UNDEFINED:
device = _find_likely_port(ports)
return vol.Schema({vol.Required(CONF_DEVICE, default=device): vol.In(ports)})
def build_hub_schema(
hub_version,
host=vol.UNDEFINED,
port=vol.UNDEFINED,
username=vol.UNDEFINED,
password=vol.UNDEFINED,
):
"""Build the Hub schema for config flow."""
if port == vol.UNDEFINED:
port = PORT_HUB_V2 if hub_version == 2 else PORT_HUB_V1
schema = {
vol.Required(CONF_HOST, default=host): str,
vol.Required(CONF_PORT, default=port): int,
}
if hub_version == 2:
schema[vol.Required(CONF_USERNAME, default=username)] = str
schema[vol.Required(CONF_PASSWORD, default=password)] = str
return vol.Schema(schema)
def build_remove_override_schema(data):
"""Build the schema to remove device overrides in config flow options."""
selection = []
for override in data:
selection.append(override[CONF_ADDRESS])
return vol.Schema({vol.Required(CONF_ADDRESS): vol.In(selection)})
def build_remove_x10_schema(data):
"""Build the schema to remove an X10 device in config flow options."""
selection = []
for device in data:
housecode = device[CONF_HOUSECODE].upper()
unitcode = device[CONF_UNITCODE]
selection.append(f"Housecode: {housecode}, Unitcode: {unitcode}")
return vol.Schema({vol.Required(CONF_DEVICE): vol.In(selection)})
|
aeb814723079525aa01d216b4d57b8461609c955
|
1095cfe2e29ddf4e4c5e12d713bd12f45c9b6f7d
|
/src/python/m5/util/fdthelper.py
|
136936c51278e941e4cf893c30b2ce05382855f5
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] |
permissive
|
gem5/gem5
|
9ec715ae036c2e08807b5919f114e1d38d189bce
|
48a40cf2f5182a82de360b7efa497d82e06b1631
|
refs/heads/stable
| 2023-09-03T15:56:25.819189
| 2023-08-31T05:53:03
| 2023-08-31T05:53:03
| 27,425,638
| 1,185
| 1,177
|
BSD-3-Clause
| 2023-09-14T08:29:31
| 2014-12-02T09:46:00
|
C++
|
UTF-8
|
Python
| false
| false
| 10,684
|
py
|
fdthelper.py
|
# Copyright (c) 2016,2019 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Glenn Bergmans
from m5.ext.pyfdt import pyfdt
import re
import os
from m5.SimObject import SimObject
from m5.util import fatal
class FdtProperty(pyfdt.FdtProperty):
"""Create a property without values."""
pass
class FdtPropertyWords(pyfdt.FdtPropertyWords):
"""Create a property with word (32-bit unsigned) values."""
def __init__(self, name, words):
if type(words) != list:
words = [words]
# Make sure all values are ints (use automatic base detection if the
# type is str)
words = [int(w, base=0) if type(w) == str else int(w) for w in words]
super().__init__(name, words)
class FdtPropertyStrings(pyfdt.FdtPropertyStrings):
"""Create a property with string values."""
def __init__(self, name, strings):
if type(strings) == str:
strings = [strings]
strings = [
str(string) for string in strings
] # Make all values strings
super().__init__(name, strings)
class FdtPropertyBytes(pyfdt.FdtPropertyBytes):
"""Create a property with integer (8-bit signed) values."""
def __init__(self, name, values):
if type(values) != list:
values = [values]
# Make sure all values are ints (use automatic base detection if the
# type is str)
values = [
int(v, base=0) if isinstance(v, str) else int(v) for v in values
]
super().__init__(name, values)
class FdtState(object):
"""Class for maintaining state while recursively generating a flattened
device tree. The state tracks address, size and CPU address cell sizes, and
maintains a dictionary of allocated phandles."""
phandle_counter = 0
phandles = dict()
def __init__(self, **kwargs):
"""Instantiate values of this state. The state can only be initialized
once."""
self.addr_cells = kwargs.pop("addr_cells", 0)
self.size_cells = kwargs.pop("size_cells", 0)
self.cpu_cells = kwargs.pop("cpu_cells", 0)
self.interrupt_cells = kwargs.pop("interrupt_cells", 0)
def phandle(self, obj):
"""Return a unique phandle number for a key. The key can be a SimObject
or any value that is castable to a string. If the phandle doesn't exist
a new one is created, otherwise the existing one is returned."""
if isinstance(obj, SimObject):
key = str(id(obj))
else:
try:
key = str(obj)
except ValueError:
raise ValueError("Phandle keys must be castable to str")
if not key in FdtState.phandles:
FdtState.phandle_counter += 1
return FdtState.phandles.setdefault(key, FdtState.phandle_counter)
def resetPhandles(self):
FdtState.phandle_counter = 0
FdtState.phandles = dict()
def int_to_cells(self, value, cells):
"""Helper function for: generates a list of 32 bit cells from an int,
used to split up addresses in appropriate 32 bit chunks."""
value = int(value)
if (value >> (32 * cells)) != 0:
fatal("Value %d doesn't fit in %d cells" % (value, cells))
return [
(value >> 32 * (x - 1)) & 0xFFFFFFFF for x in range(cells, 0, -1)
]
def addrCells(self, addr):
"""Format an integer type according to the address_cells value of this
state."""
return self.int_to_cells(addr, self.addr_cells)
def CPUAddrCells(self, addr):
"""Format an integer type according to the cpu_cells value of this
state."""
return self.int_to_cells(addr, self.cpu_cells)
def sizeCells(self, size):
"""Format an integer type according to the size_cells value of this
state."""
return self.int_to_cells(size, self.size_cells)
def interruptCells(self, interrupt):
"""Format an integer type according to the interrupt_cells value
of this state."""
return self.int_to_cells(interrupt, self.interrupt_cells)
def addrCellsProperty(self):
"""Return an #address-cells property with the value of this state."""
return FdtPropertyWords("#address-cells", self.addr_cells)
def sizeCellsProperty(self):
"""Return an #size-cells property with the value of this state."""
return FdtPropertyWords("#size-cells", self.size_cells)
def CPUCellsProperty(self):
"""Return an #address-cells property for cpu nodes with the value
of this state."""
return FdtPropertyWords("#address-cells", self.cpu_cells)
def interruptCellsProperty(self):
"""Return an #interrupt-cells property for cpu nodes with the value
of this state."""
return FdtPropertyWords("#interrupt-cells", self.interrupt_cells)
class FdtNop(pyfdt.FdtNop):
"""Create an empty node."""
pass
class FdtNode(pyfdt.FdtNode):
def __init__(self, name, obj=None):
"""Create a new node and immediately set the phandle property, if obj
is supplied"""
super().__init__(name)
if obj != None:
self.appendPhandle(obj)
def append(self, subnodes):
"""Change the behavior of the normal append to override if a node with
the same name already exists or merge if the name exists and is a node
type. Can also take a list of subnodes, that each get appended."""
if not hasattr(subnodes, "__iter__"):
subnodes = [subnodes]
for subnode in subnodes:
try:
if not issubclass(type(subnode), pyfdt.FdtNop):
index = self.index(subnode.name)
item = self.pop(index)
else:
item = None
except ValueError:
item = None
if isinstance(item, pyfdt.FdtNode) and isinstance(
subnode, pyfdt.FdtNode
):
item.merge(subnode)
subnode = item
super().append(subnode)
def appendList(self, subnode_list):
"""Append all properties/nodes in the iterable."""
for subnode in subnode_list:
self.append(subnode)
def appendCompatible(self, compatible):
"""Append a compatible property with the supplied compatibility
strings."""
if isinstance(compatible, str):
compatible = [compatible]
self.append(FdtPropertyStrings("compatible", compatible))
def appendPhandle(self, obj):
"""Append a phandle property to this node with the phandle of the
supplied object."""
# Create a bogus state because we only need the Phandle dictionary
state = FdtState(addr_cells=1, size_cells=1, cpu_cells=1)
phandle = state.phandle(obj)
self.append(FdtPropertyWords("phandle", [phandle]))
class Fdt(pyfdt.Fdt):
def sortNodes(self, node):
"""Move all properties to the beginning and subnodes to the end
while maintaining the order of the subnodes. DTB files require the
properties to go before the nodes, but the PyFdt doesn't account for
defining nodes and properties in a random order."""
properties = FdtNode(node.name)
subnodes = FdtNode(node.name)
while len(node):
subnode = node.pop(0)
if issubclass(type(subnode), pyfdt.FdtNode):
subnode = self.sortNodes(subnode)
subnodes.append(subnode)
else:
properties.append(subnode)
properties.merge(subnodes)
return properties
def add_rootnode(self, rootnode, prenops=None, postnops=None):
"""First sort the device tree, so that properties are before nodes."""
rootnode = self.sortNodes(rootnode)
super().add_rootnode(rootnode, prenops, postnops)
def writeDtbFile(self, filename):
"""Convert the device tree to DTB and write to a file."""
filename = os.path.realpath(filename)
try:
with open(filename, "wb") as f:
f.write(self.to_dtb())
return filename
except IOError:
raise RuntimeError("Failed to open DTB output file")
def writeDtsFile(self, filename):
"""Convert the device tree to DTS and write to a file."""
filename = os.path.realpath(filename)
try:
with open(filename, "w") as f:
f.write(self.to_dts())
return filename
except IOError:
raise RuntimeError("Failed to open DTS output file")
|
b0b21148c077bc12a88c18e066d32fa1ca49cb44
|
ec7b8378698ed9dfc5e62b94c20524bf3aefc3c3
|
/tiatoolbox/tools/pyramid.py
|
2529a69802269ceb14102cad4af660642ec40074
|
[
"BSD-3-Clause"
] |
permissive
|
TissueImageAnalytics/tiatoolbox
|
52fe15704b396a055d9b4fccc678787ef489aed8
|
f26387f46f675a7b9a8a48c95dad26e819229f2f
|
refs/heads/develop
| 2023-08-16T15:47:19.282604
| 2023-08-14T16:50:45
| 2023-08-14T16:50:45
| 267,705,904
| 222
| 44
|
NOASSERTION
| 2023-09-14T16:57:15
| 2020-05-28T22:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 24,119
|
py
|
pyramid.py
|
"""Tile pyramid generation in standard formats.
Included methods are DeepZoom and Zoomify in addition to a generic
method.
These are generally intended for serialisation or streaming via a web
UI. The `get_tile` method returns a Pillow Image object which can be
easily serialised via the use of an io.BytesIO object or saved directly
to disk.
"""
from __future__ import annotations
import tarfile
import time
import zipfile
from io import BytesIO
from pathlib import Path
from typing import TYPE_CHECKING, Iterator
import defusedxml
import numpy as np
from PIL import Image
from tiatoolbox import DuplicateFilter, logger
from tiatoolbox.utils.transforms import imresize, locsize2bounds
from tiatoolbox.utils.visualization import AnnotationRenderer, random_colors
if TYPE_CHECKING: # pragma: no cover
from tiatoolbox.annotation import AnnotationStore
from tiatoolbox.wsicore.wsireader import WSIMeta, WSIReader
defusedxml.defuse_stdlib()
class TilePyramidGenerator:
r"""Generic tile pyramid generator with sensible defaults.
Args:
wsi (WSIReader):
The WSI reader object. Must implement
`tiatoolbox.wsicore.wsi_Reader.WSIReader.read_rect`.
tile_size (int):
The size of tiles to generate. Default is 256. Note that the
output tile size will be :math:`\text{tile size} + 2
\times\text{overlap}`.
downsample (int):
The downsample factor between levels. Default is 2.
overlap (int):
The number of extra pixel to add to each edge of the tile.
Default is 0.
"""
def __init__(
self: TilePyramidGenerator,
wsi: WSIReader,
tile_size: int = 256,
downsample: int = 2,
overlap: int = 0,
) -> None:
"""Initialize :class:`TilePyramidGenerator`."""
self.wsi = wsi
self.tile_size = tile_size
self.overlap = overlap
self.downsample = downsample
@property
def output_tile_size(self: TilePyramidGenerator) -> int:
r"""The size of the tile which will be returned.
This is equivalent to :math:`\text{tile size} + 2*\text{overlay}`.
"""
return self.tile_size + 2 * self.overlap
def level_downsample(self: TilePyramidGenerator, level: int) -> float:
"""Find the downsample factor for a level."""
return 2 ** (self.level_count - level - 1)
def level_dimensions(self: TilePyramidGenerator, level: int) -> tuple[int, int]:
"""The total pixel dimensions of the tile pyramid at a given level.
Args:
level (int):
The level to calculate the dimensions for.
"""
baseline_dims = self.wsi.info.slide_dimensions
level_dims = np.ceil(
np.divide(baseline_dims, self.level_downsample(level)),
).astype(int)
return tuple(level_dims)
def tile_grid_size(self: TilePyramidGenerator, level: int) -> tuple[int, int]:
"""Width and height of the minimal grid of tiles to cover the slide.
Args:
level (int):
The level to calculate the grid size for.
"""
if level < 0 or level >= self.level_count:
msg = "Invalid level."
raise IndexError(msg)
return tuple(
np.ceil(np.divide(self.level_dimensions(level), self.tile_size)).astype(
int,
),
)
@property
def sub_tile_level_count(self: TilePyramidGenerator) -> int:
"""The number of sub-tile levels in the pyramid."""
return 0
@property
def level_count(self: TilePyramidGenerator) -> int:
"""Number of levels in the tile pyramid.
The number of levels is such that level_count - 1 is a 1:1 of
the slide baseline resolution (level 0 of the WSI).
"""
wsi_to_tile_ratio = np.divide(self.wsi.info.slide_dimensions, self.tile_size)
# Levels where a tile contains only part of the wsi
super_level_count = np.ceil(np.log2(wsi_to_tile_ratio)).max()
total_level_count = super_level_count + 1 + self.sub_tile_level_count
return int(total_level_count)
def get_thumb_tile(self: TilePyramidGenerator) -> Image:
"""Return a thumbnail which fits the whole slide in one tile.
The thumbnail output size has the longest edge equal to the tile
size. The other edge preserves the original aspect ratio.
"""
slide_dims = np.array(self.wsi.info.slide_dimensions)
tile_dim = self.tile_size + self.overlap
out_dims = np.round(slide_dims / slide_dims.max() * tile_dim).astype(int)
bounds = (0, 0, *slide_dims)
thumb = self.wsi.read_bounds(
bounds,
resolution=self.wsi.info.level_count - 1,
units="level",
)
thumb = imresize(thumb, output_size=out_dims)
return Image.fromarray(thumb)
def get_tile(
self: TilePyramidGenerator,
level: int,
x: int,
y: int,
res: int = 1,
pad_mode: str = "constant",
interpolation: str = "optimise",
) -> Image:
"""Get a tile at a given level and coordinate.
Note that levels are in the reverse order of those in WSIReader.
I.E. level 0 here corresponds to the lowest resolution whereas
level 0 in WSIReader corresponds to the maximum resolution
(baseline).
Args:
level (int):
The pyramid level of the tile starting from 0 (the whole
slide in one tile, 0-0-0).
x (int):
The tile index in the x direction.
y (int):
The tile index in the y direction.
res (int):
The resolution of the tile. Defaults to 1, can be set to 2 for
double resolution.
pad_mode (str):
Method for padding when reading areas outside the
input image. Default is constant (0 padding). This is
passed to `read_func` which defaults to
:func:`safe_padded_read`. See :func:`safe_padded_read`
for supported pad modes. Setting to "none" or None will
result in no padding being applied.
interpolation (str):
Interpolation mode to use. Defaults to optimise.
Possible values are: linear, cubic, lanczos, nearest,
area, optimise. Linear most closely matches OpenSlide.
Returns:
PIL.Image:
Pillow image of the tile.
Example:
>>> from tiatoolbox.tools.pyramid import TilePyramidGenerator
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> wsi = WSIReader.open("sample.svs")
>>> tile_generator = TilePyramidGenerator(
... wsi=wsi,
... tile_size=256,
... )
>>> tile_0_0_0 = tile_generator.get_tile(level=0, x=0, y=0)
"""
if level < 0:
raise IndexError
if level > self.level_count:
msg = "Invalid level."
raise IndexError(msg)
scale = self.level_downsample(level)
baseline_x = (x * self.tile_size * scale) - (self.overlap * scale)
baseline_y = (y * self.tile_size * scale) - (self.overlap * scale)
output_size = [self.output_tile_size] * 2
coord = (int(baseline_x), int(baseline_y))
if level < self.sub_tile_level_count:
output_size = self.output_tile_size // 2 ** (
self.sub_tile_level_count - level
)
output_size = np.repeat(output_size, 2).astype(int)
thumb = self.get_thumb_tile()
thumb.thumbnail(output_size)
return thumb
slide_dimensions = np.array(self.wsi.info.slide_dimensions)
if all(slide_dimensions < [baseline_x, baseline_y]):
raise IndexError
# Don't print out multiple warnings about interpolation etc.
duplicate_filter = DuplicateFilter()
logger.addFilter(duplicate_filter)
tile = self.wsi.read_rect(
coord,
size=[v * res for v in output_size],
resolution=res / scale,
units="baseline",
pad_mode=pad_mode,
interpolation=interpolation,
)
logger.removeFilter(duplicate_filter)
return Image.fromarray(tile)
def tile_path(self: TilePyramidGenerator, level: int, x: int, y: int) -> Path:
"""Generate the path for a specified tile.
Args:
level (int):
The pyramid level of the tile starting from 0 (the whole
slide in one tile, 0-0-0).
x (int):
The tile index in the x direction.
y (int):
The tile index in the y direction.
Returns:
pathlib.Path:
A pathlib path object with two parts.
"""
raise NotImplementedError
def dump(
self: TilePyramidGenerator,
path: str | Path,
container: str | None = None,
compression: str | None = None,
) -> None:
"""Write all tiles to disk.
Arguments:
path (str or Path):
The path to write the tiles to.
container (str):
Container to use. Defaults to None which saves to a
directory. Possible values are "zip", "tar".
compression (str):
Compression method. Defaults to None. Possible values
are None, "deflate", "gzip", "bz2", "lzma". Note that
tar does not support deflate and zip does not support
gzip.
Examples:
>>> from tiatoolbox.tools.pyramid import TilePyramidGenerator
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> wsi = WSIReader.open("sample.svs")
>>> tile_generator = TilePyramidGenerator(
... wsi=reader,
... tile_size=256,
... )
>>> tile_generator.dump(
... path="sample.gz.zip",
... container="zip",
... compression="gzip",
... )
"""
path = Path(path)
if container not in [None, "zip", "tar"]:
msg = "Unsupported container."
raise ValueError(msg)
if container is None:
path.mkdir(parents=False)
if compression is not None:
msg = "Unsupported compression for container None."
raise ValueError(msg)
def save_tile(tile_path: Path, tile: Image.Image) -> None:
"""Write the tile to the output directory."""
full_path = path / tile_path
full_path.parent.mkdir(parents=True, exist_ok=True)
tile.save(full_path)
elif container == "zip":
compression2enum = {
None: zipfile.ZIP_STORED,
"deflate": zipfile.ZIP_DEFLATED,
"bz2": zipfile.ZIP_BZIP2,
"lzma": zipfile.ZIP_LZMA,
}
if compression not in compression2enum:
msg = "Unsupported compression for zip."
raise ValueError(msg)
archive = zipfile.ZipFile(
path,
mode="w",
compression=compression2enum[compression],
)
def save_tile(tile_path: Path, tile: Image.Image) -> None:
"""Write the tile to the output tar."""
bio = BytesIO()
tile.save(bio, format="jpeg")
bio.seek(0)
data = bio.read()
archive.writestr(
str(tile_path),
data,
compress_type=compression2enum[compression],
)
else: # container == "tar":
compression2mode = {
None: "w",
"gzip": "w:gz",
"bz2": "w:bz2",
"lzma": "w:xz",
}
if compression not in compression2mode:
msg = "Unsupported compression for tar."
raise ValueError(msg)
archive = tarfile.TarFile.open(path, mode=compression2mode[compression])
def save_tile(tile_path: Path, tile: Image.Image) -> None:
"""Write the tile to the output zip."""
bio = BytesIO()
tile.save(bio, format="jpeg")
bio.seek(0)
tar_info = tarfile.TarInfo(name=str(tile_path))
tar_info.mtime = time.time()
tar_info.size = bio.tell()
archive.addfile(tarinfo=tar_info, fileobj=bio)
for level in range(self.level_count):
for x, y in np.ndindex(self.tile_grid_size(level)):
tile = self.get_tile(level=level, x=x, y=y)
tile_path = self.tile_path(level, x, y)
save_tile(tile_path, tile)
if container is not None:
archive.close()
def __len__(self: TilePyramidGenerator) -> int:
"""Return length of instance attributes."""
return sum(
np.prod(self.tile_grid_size(level)) for level in range(self.level_count)
)
def __iter__(self: TilePyramidGenerator) -> Iterator:
"""Return an iterator for the given object."""
for level in range(self.level_count):
for x, y in np.ndindex(self.tile_grid_size(level)):
yield self.get_tile(level=level, x=x, y=y)
class ZoomifyGenerator(TilePyramidGenerator):
r"""Pyramid tile generator with extra Zoomify specific methods.
Zoomify splits tiles into groups of 256 (due to old file system
limitations). The extra `tile_group` method here is for calculating
these tile groups when generating tile paths.
An old description of the Zoomify format can be found `here`_.
.. _here:
https://ecommons.cornell.edu/bitstream/handle/1813/5410/Introducing_Zoomify_Image.pdf
Args:
wsi (WSIReader):
The WSI reader object. Must implement
`tiatoolbox.wsicore.wsi_Reader.WSIReader.read_rect`.
tile_size (int):
The size of tiles to generate. Default is 256. Note that the
output tile size will be :math:`\text{tile size} + 2
\times\text{overlap}`.
downsample (int):
The downsample factor between levels. Default is 2.
overlap (int):
The number of extra pixel to add to each edge of the tile.
Default is 0.
"""
def tile_group(self: ZoomifyGenerator, level: int, x: int, y: int) -> int:
"""Find the tile group for a tile index.
Tile groups are numbered from level 0 (tile 0-0-0) and increment
every 256 tiles in ZXY axis order.
Args:
level (int):
The pyramid level of the tile starting from 0 (the whole
slide in one tile, 0-0-0).
x (int):
The tile index in the x direction.
y (int):
The tile index in the y direction.
Raises:
IndexError:
If the level, x, y tile index is out of bounds.
Returns:
int:
The tile group for the specified tile.
"""
grid_size = np.array(self.tile_grid_size(level))
if any(grid_size <= [x, y]):
raise IndexError
cumulative_sum = sum(np.prod(self.tile_grid_size(n)) for n in range(level))
index_in_level = np.ravel_multi_index((y, x), self.tile_grid_size(level)[::-1])
tile_index = cumulative_sum + index_in_level
return tile_index // 256 # the tile group
def tile_path(self: ZoomifyGenerator, level: int, x: int, y: int) -> Path:
"""Generate the Zoomify path for a specified tile.
Args:
level (int):
The pyramid level of the tile starting from 0 (the whole
slide in one tile, 0-0-0).
x (int):
The tile index in the x direction.
y (int):
The tile index in the y direction.
Returns:
pathlib.Path:
A pathlib path object with two parts.
"""
g = self.tile_group(level, x, y)
z = level
return Path(f"TileGroup{g}") / f"{z}-{x}-{y}.jpg"
class AnnotationTileGenerator(ZoomifyGenerator):
r"""Define AnnotationTileGenerator for rendering AnnotationStore.
Tile generator using an AnnotationRenderer to render tiles
showing annotations held in an AnnotationStore.
Args:
info (WSIMeta):
An WSIMeta Object storing the metadata of the slide this
generator is rendering tiles for
store (AnnotationStore):
An AnnotationStore Object containing annotations to be
rendered for given slide
renderer (AnnotationRenderer):
An AnnotationRenderer Object which will render annotations
belonging to a tile according to specified parameters
tile_size (int):
The size of tiles to generate. Default is 256. Note that the
output tile size will be :math:`\text{tile size} + 2
\times\text{overlap}`.
downsample (int):
The downsample factor between levels. Default is 2.
overlap (int):
The number of extra pixel to add to each edge of the tile.
Default is 0.
"""
def __init__(
self: AnnotationTileGenerator,
info: WSIMeta,
store: AnnotationStore,
renderer: AnnotationRenderer | None = None,
tile_size: int = 256,
downsample: int = 2,
overlap: int = 0,
) -> None:
"""Initialize :class:`AnnotationTileGenerator`."""
super().__init__(None, tile_size, downsample, overlap)
self.info = info
self.store = store
if renderer is None:
renderer = AnnotationRenderer()
self.renderer = renderer
# if using blur, render overlapping tiles to minimise edge effects.
# factor of 1.5 below chosen empirically as a good balance between
# empirical visual quality and added rendering time.
self.overlap = int(1.5 * renderer.blur_radius)
output_size = [self.output_tile_size] * 2
self.empty_img = Image.fromarray(
np.zeros((output_size[0], output_size[1], 4), dtype=np.uint8),
)
if self.renderer.mapper == "categorical":
# get the possible categories for given score_prop from store
types = self.store.pquery(f"props[{self.renderer.score_prop!r}]")
# make a random dictionary colour map
colors = random_colors(len(types), bright=True)
mapper = {key: (*color, 1) for key, color in zip(types, colors)}
self.renderer.mapper = lambda x: mapper[x]
def get_thumb_tile(self: AnnotationTileGenerator) -> Image:
"""Return a thumbnail which fits the whole slide in one tile.
The thumbnail output size has the longest edge equal to the tile
size. The other edge preserves the original aspect ratio.
"""
slide_dims = np.array(self.info.slide_dimensions)
scale = self.level_downsample(self.level_count - 1)
bounds = (0, 0, *slide_dims)
thumb = self.renderer.render_annotations(self.store, bounds, scale)
return Image.fromarray(thumb)
def level_dimensions(self: AnnotationTileGenerator, level: int) -> tuple[int, int]:
"""The total pixel dimensions of the tile pyramid at a given level.
Args:
level (int):
The level to calculate the dimensions for.
"""
baseline_dims = self.info.slide_dimensions
level_dims = np.ceil(
np.divide(baseline_dims, self.level_downsample(level)),
).astype(int)
return tuple(level_dims)
@property
def level_count(self: AnnotationTileGenerator) -> int:
"""Number of levels in the tile pyramid.
The number of levels is such that level_count - 1 is a 1:1 of
the slide baseline resolution (level 0 of the WSI).
"""
wsi_to_tile_ratio = np.divide(self.info.slide_dimensions, self.tile_size)
# Levels where a tile contains only part of the wsi
super_level_count = np.ceil(np.log2(wsi_to_tile_ratio)).max()
total_level_count = super_level_count + 1 + self.sub_tile_level_count
return int(total_level_count)
def get_tile(
self: AnnotationTileGenerator,
level: int,
x: int,
y: int,
res: int = 1,
pad_mode: str | None = None,
interpolation: str | None = None,
) -> Image:
"""Render a tile at a given level and coordinate.
Note that levels are in the reverse order of those in WSIReader.
I.E. level 0 here corresponds to the lowest resolution whereas
level 0 in WSIReader corresponds to the maximum resolution
(baseline).
Args:
level (int):
The pyramid level of the tile starting from 0 (the whole
slide in one tile, 0-0-0).
x (int):
The tile index in the x direction.
y (int):
The tile index in the y direction.
res (int):
The resolution of the tile. Defaults to 1, can be set to 2 for
double resolution.
pad_mode (str):
Method for padding at edges of the WSI. Default to
'constant'. See :func:`numpy.pad` for more information.
interpolation (str):
Method of interpolation. Possible values are: nearest,
linear, cubic, lanczos, area. Defaults to nearest.
Returns:
PIL.Image:
Pillow image of the tile.
Example:
>>> from tiatoolbox.tools.pyramid import AnnotationTileGenerator
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> from tiatoolbox.annotation.storage import SQLiteStore
>>> wsi = WSIReader.open("sample.svs")
>>> SQ=SQLiteStore.from_geojson(geo_path)
>>> tile_generator = AnnotationTileGenerator(
... info=wsi.info,
... store=SQ,
... )
>>> tile_0_0_0 = tile_generator.get_tile(level=0, x=0, y=0)
"""
if pad_mode is not None or interpolation is not None:
logger.warning(
"interpolation, pad_mode are unused by AnnotationTileGenerator",
stacklevel=2,
)
if level < 0:
raise IndexError
if level > self.level_count:
msg = "Invalid level."
raise IndexError(msg)
scale = self.level_downsample(level)
baseline_x = (x * self.tile_size * scale) - (self.overlap * scale)
baseline_y = (y * self.tile_size * scale) - (self.overlap * scale)
coord = [baseline_x, baseline_y]
if level < self.sub_tile_level_count:
output_size = self.output_tile_size // 2 ** (
self.sub_tile_level_count - level
)
output_size = np.repeat(output_size, 2).astype(int)
thumb = self.get_thumb_tile()
thumb.thumbnail(output_size)
return thumb
slide_dimensions = np.array(self.info.slide_dimensions)
if all(slide_dimensions < [baseline_x, baseline_y]):
raise IndexError
bounds = locsize2bounds(coord, [self.output_tile_size * scale] * 2)
tile = self.renderer.render_annotations(
self.store,
bounds,
scale,
res,
self.overlap,
)
return Image.fromarray(tile)
|
bd8021964bc08f806bb9a002590af41adc0e8b4b
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Learning_TF_Hope/05__text_and_visualizations/BasicRNNCell.py
|
a9101d0c646719453a491b25488b1df5b5892de4
|
[
"MIT"
] |
permissive
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,548
|
py
|
BasicRNNCell.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 20 17:34:43 2016
@author: tomhope
"""
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
element_size = 28;time_steps = 28;num_classes = 10
batch_size = 128;hidden_layer_size = 128
_inputs = tf.placeholder(tf.float32,shape=[None, time_steps,
element_size],
name='inputs')
y = tf.placeholder(tf.float32, shape=[None, num_classes],name='inputs')
#TensorFlow built-in functions
rnn_cell = tf.contrib.rnn.BasicRNNCell(hidden_layer_size)
outputs, _ = tf.nn.dynamic_rnn(rnn_cell, _inputs, dtype=tf.float32)
Wl = tf.Variable(tf.truncated_normal([hidden_layer_size, num_classes],
mean=0,stddev=.01))
bl = tf.Variable(tf.truncated_normal([num_classes],mean=0,stddev=.01))
def get_linear_layer(vector):
return tf.matmul(vector, Wl) + bl
last_rnn_output = outputs[:,-1,:]
final_output = get_linear_layer(last_rnn_output)
softmax = tf.nn.softmax_cross_entropy_with_logits(logits=final_output, labels=y)
cross_entropy = tf.reduce_mean(softmax)
train_step = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(final_output,1))
accuracy = (tf.reduce_mean(tf.cast(correct_prediction, tf.float32)))*100
sess=tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
test_data = mnist.test.images[:batch_size].reshape((-1,
time_steps, element_size))
test_label = mnist.test.labels[:batch_size]
for i in range(3001):
batch_x, batch_y = mnist.train.next_batch(batch_size)
batch_x = batch_x.reshape((batch_size, time_steps, element_size))
sess.run(train_step,feed_dict={_inputs:batch_x,
y:batch_y})
if i % 1000 == 0:
acc = sess.run(accuracy, feed_dict={_inputs: batch_x,
y: batch_y})
loss = sess.run(cross_entropy,feed_dict={_inputs:batch_x,
y:batch_y})
print ("Iter " + str(i) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
print ("Testing Accuracy:", \
sess.run(accuracy, feed_dict={_inputs: test_data, y: test_label}))
|
cb83cb0203e315352280e6f890fd1860f3adfd03
|
167c6226bc77c5daaedab007dfdad4377f588ef4
|
/python/ql/test/library-tests/frameworks/stdlib/pep249.py
|
6f601f5b6c28c206ec685bea0679285b78210954
|
[
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] |
permissive
|
github/codeql
|
1eebb449a34f774db9e881b52cb8f7a1b1a53612
|
d109637e2d7ab3b819812eb960c05cb31d9d2168
|
refs/heads/main
| 2023-08-20T11:32:39.162059
| 2023-08-18T14:33:32
| 2023-08-18T14:33:32
| 143,040,428
| 5,987
| 1,363
|
MIT
| 2023-09-14T19:36:50
| 2018-07-31T16:35:51
|
CodeQL
|
UTF-8
|
Python
| false
| false
| 427
|
py
|
pep249.py
|
import sqlite3
db = sqlite3.connect("example.db")
# non standard
db.execute("some sql", (42,)) # $ getSql="some sql"
cursor = db.cursor()
cursor.execute("some sql", (42,)) # $ getSql="some sql"
cursor.executescript("sql") # $ getSql="sql"
cursor.executescript(sql_script="sql") # $ getSql="sql"
import sqlite3.dbapi2
conn = sqlite3.dbapi2.connect()
cursor = conn.cursor()
cursor.execute("some sql") # $ getSql="some sql"
|
699fffb33966709950f977f24a8b9fa3b03a4854
|
0da4f9195a7e1c4651b709463a66308dc7cf771e
|
/python/examples/xgboost_regressor.py
|
49a43b2cd5fb7829cc35681aa2fc7577fd1b56d2
|
[
"MIT"
] |
permissive
|
NorskRegnesentral/shapr
|
6437ab6d4bb2de004dc5937397ab8e15ab2cf43c
|
25f207d2e20835cb114ca13224fac49b91443bdb
|
refs/heads/master
| 2023-08-17T22:12:15.012082
| 2023-08-17T14:02:38
| 2023-08-17T14:02:38
| 133,624,629
| 128
| 27
|
NOASSERTION
| 2023-09-11T11:13:53
| 2018-05-16T07:05:13
|
R
|
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
xgboost_regressor.py
|
import xgboost as xgb
from shaprpy import explain
from shaprpy.datasets import load_california_housing
dfx_train, dfx_test, dfy_train, dfy_test = load_california_housing()
## Fit model
model = xgb.XGBRegressor()
model.fit(dfx_train, dfy_train.values.flatten())
## Shapr
df_shapley, pred_explain, internal, timing = explain(
model = model,
x_train = dfx_train,
x_explain = dfx_test,
approach = 'empirical',
prediction_zero = dfy_train.mean().item(),
)
print(df_shapley)
"""
none MedInc HouseAge AveRooms AveBedrms Population AveOccup \
1 2.205937 -0.697653 0.103323 -0.066003 0.115853 -0.057640 -0.292739
2 2.205938 -0.521995 0.064876 -0.445466 -0.230454 -0.019290 0.080655
3 2.205938 0.307681 0.563008 0.062743 -0.626912 0.050450 1.050069
4 2.205938 0.479900 -0.100035 0.030474 0.104301 -0.154396 -0.148057
5 2.205938 -0.088568 -0.101495 -0.121637 0.213535 0.169194 0.253711
Latitude Longitude
1 -0.573533 -0.237709
2 -0.265165 -0.090790
3 0.181936 0.412604
4 0.078605 0.186957
5 0.126759 0.376471
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.