blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
44330ba8ca76f3de0d7720c4522e8aed6d50972d
|
c058f51b99f91faebf27183b2b579e9f96e0d8f5
|
/botorch/sampling/get_sampler.py
|
11bf8ceaf9b6aa309f585df8e26819bc0a577104
|
[
"MIT"
] |
permissive
|
pytorch/botorch
|
255d62f698cc615c750e9343c278a63c7e96a586
|
4cc5ed59b2e8a9c780f786830c548e05cc74d53c
|
refs/heads/main
| 2023-08-22T15:23:51.071048
| 2023-08-22T05:30:38
| 2023-08-22T05:30:38
| 142,940,093
| 2,891
| 373
|
MIT
| 2023-09-13T00:16:13
| 2018-07-30T23:59:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,964
|
py
|
get_sampler.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Type, Union
import torch
from botorch.logging import logger
from botorch.posteriors.deterministic import DeterministicPosterior
from botorch.posteriors.ensemble import EnsemblePosterior
from botorch.posteriors.gpytorch import GPyTorchPosterior
from botorch.posteriors.posterior import Posterior
from botorch.posteriors.posterior_list import PosteriorList
from botorch.posteriors.torch import TorchPosterior
from botorch.posteriors.transformed import TransformedPosterior
from botorch.sampling.base import MCSampler
from botorch.sampling.deterministic import DeterministicSampler
from botorch.sampling.index_sampler import IndexSampler
from botorch.sampling.list_sampler import ListSampler
from botorch.sampling.normal import (
IIDNormalSampler,
NormalMCSampler,
SobolQMCNormalSampler,
)
from botorch.utils.dispatcher import Dispatcher
from gpytorch.distributions import MultivariateNormal
from torch.distributions import Distribution
from torch.quasirandom import SobolEngine
def _posterior_to_distribution_encoder(
posterior: Posterior,
) -> Union[Type[Distribution], Type[Posterior]]:
r"""An encoder returning the type of the distribution for `TorchPosterior`
and the type of the posterior for the rest.
"""
if isinstance(posterior, TorchPosterior):
return type(posterior.distribution)
return type(posterior)
GetSampler = Dispatcher("get_sampler", encoder=_posterior_to_distribution_encoder)
def get_sampler(
posterior: TorchPosterior, sample_shape: torch.Size, **kwargs: Any
) -> MCSampler:
r"""Get the sampler for the given posterior.
The sampler can be used as `sampler(posterior)` to produce samples
suitable for use in acquisition function optimization via SAA.
Args:
posterior: A `Posterior` to get the sampler for.
sample_shape: The sample shape of the samples produced by the
given sampler. The full shape of the resulting samples is
given by `posterior._extended_shape(sample_shape)`.
kwargs: Optional kwargs, passed down to the samplers during construction.
Returns:
The `MCSampler` object for the given posterior.
"""
kwargs["sample_shape"] = sample_shape
return GetSampler(posterior, **kwargs)
@GetSampler.register(MultivariateNormal)
def _get_sampler_mvn(
posterior: GPyTorchPosterior, sample_shape: torch.Size, **kwargs: Any
) -> NormalMCSampler:
r"""The Sobol normal sampler for the `MultivariateNormal` posterior.
If the output dim is too large, falls back to `IIDNormalSampler`.
"""
sampler = SobolQMCNormalSampler(sample_shape=sample_shape, **kwargs)
collapsed_shape = sampler._get_collapsed_shape(posterior=posterior)
base_collapsed_shape = collapsed_shape[len(sample_shape) :]
if base_collapsed_shape.numel() > SobolEngine.MAXDIM:
logger.warning(
f"Output dim {base_collapsed_shape.numel()} is too large for the "
"Sobol engine. Using IIDNormalSampler instead."
)
sampler = IIDNormalSampler(sample_shape=sample_shape, **kwargs)
return sampler
@GetSampler.register(TransformedPosterior)
def _get_sampler_derived(
posterior: TransformedPosterior, sample_shape: torch.Size, **kwargs: Any
) -> MCSampler:
r"""Get the sampler for the underlying posterior."""
return get_sampler(
posterior=posterior._posterior, sample_shape=sample_shape, **kwargs
)
@GetSampler.register(PosteriorList)
def _get_sampler_list(
posterior: PosteriorList, sample_shape: torch.Size, **kwargs: Any
) -> MCSampler:
r"""Get the `ListSampler` with the appropriate list of samplers."""
samplers = [
get_sampler(posterior=p, sample_shape=sample_shape, **kwargs)
for p in posterior.posteriors
]
return ListSampler(*samplers)
@GetSampler.register(DeterministicPosterior)
def _get_sampler_deterministic(
posterior: DeterministicPosterior, sample_shape: torch.Size, **kwargs: Any
) -> MCSampler:
r"""Get the dummy `DeterministicSampler` for the `DeterministicPosterior`."""
return DeterministicSampler(sample_shape=sample_shape, **kwargs)
@GetSampler.register(EnsemblePosterior)
def _get_sampler_ensemble(
posterior: EnsemblePosterior, sample_shape: torch.Size, **kwargs: Any
) -> MCSampler:
r"""Get the `IndexSampler` for the `EnsemblePosterior`."""
return IndexSampler(sample_shape=sample_shape, **kwargs)
@GetSampler.register(object)
def _not_found_error(
posterior: Posterior, sample_shape: torch.Size, **kwargs: Any
) -> None:
raise NotImplementedError(
f"A registered `MCSampler` for posterior {posterior} is not found. You can "
"implement and register one using `@GetSampler.register`."
)
|
beaf434ca8424cfe2e2da829d5d69f5ef41aea52
|
959b7d21c8baa677f2725fbef14ed53e702f3dcb
|
/pygount/xmldialect.py
|
daaa8597dd9f310bf8a3a9b558697f284f68d377
|
[
"BSD-3-Clause"
] |
permissive
|
roskakori/pygount
|
8e46d3b13465084349edbc07a1f6300f9f448951
|
aee552c1587f3a5843fa303722e7cc5cd6d7ac58
|
refs/heads/master
| 2023-08-24T02:30:13.878793
| 2023-08-23T19:01:46
| 2023-08-23T19:01:46
| 9,413,691
| 129
| 22
|
BSD-3-Clause
| 2023-09-06T19:51:00
| 2013-04-13T13:02:29
|
Python
|
UTF-8
|
Python
| false
| false
| 4,296
|
py
|
xmldialect.py
|
"""
Function to obtain the language dialect used by XML source code.
"""
# Copyright (c) 2016-2023, Thomas Aglassinger.
# All rights reserved. Distributed under the BSD License.
import logging
import re
import xml.sax
# TODO #10: Replace regex for DTD by working DTD handler.
#: Regular expression to obtain DTD.
_DTD_REGEX = re.compile(r'<!DOCTYPE\s+(?P<name>[a-zA-Z][a-zA-Z-]*)\s+PUBLIC\s+"(?P<public_id>.+)"')
_REGEX_PATTERNS_AND_DIALECTS = ((".*DocBook.*", "DocBook XML"),)
_REGEXES_AND_DIALECTS = [(re.compile(pattern), dialect) for pattern, dialect in _REGEX_PATTERNS_AND_DIALECTS]
for public_id_regex, dialect in _REGEX_PATTERNS_AND_DIALECTS:
assert public_id_regex is not None
assert dialect is not None
assert dialect.strip() != ""
#: Regex to detect Sax error messages with uninformative paths like '<unknown>'.
_SAX_MESSAGE_WITHOUT_PATH_PATTERN = re.compile(r"^<.+>(?P<message_without_path>:\d+:\d+.+)")
_log = logging.getLogger("pygount")
class SaxParserDone(Exception): # noqa: N818
"""
Pseudo error to indicate that the Sax parser ist done.
"""
pass
class XmlDialectHandler(xml.sax.ContentHandler, xml.sax.handler.DTDHandler):
def __init__(self, max_element_count=100):
super().__init__()
self.dialect = None
self._path = ""
self._element_count = 0
self._max_element_count = max_element_count
def _set_dialect_and_stop_parsing(self, dialect):
self.dialect = dialect
raise SaxParserDone(f"language detected: {dialect}")
def startElement(self, name, attrs): # noqa: N802
self._element_count += 1
if self._element_count == self._max_element_count:
raise SaxParserDone(f"no language found after parsing {self._element_count} elements")
self._path += "/" + name
xmlns = attrs.get("xmlns", "")
if (self._path == "/project") and ("name" in attrs):
self._set_dialect_and_stop_parsing("Ant")
elif (self._path in ("/book/title", "/chapter/title")) or (xmlns == "http://docbook.org/ns/docbook"):
self._set_dialect_and_stop_parsing("DocBook XML")
elif xmlns == "http://xmlns.jcp.org/xml/ns/javaee":
self._set_dialect_and_stop_parsing("JavaEE XML")
elif xmlns.startswith("http://maven.apache.org/POM"):
self._set_dialect_and_stop_parsing("Maven")
elif xmlns.startswith("http://www.netbeans.org/ns/project/"):
self._set_dialect_and_stop_parsing("NetBeans Project")
def endElement(self, name): # noqa: N802
self._path = self._path[: -len(name) - 1]
def xml_dialect(xml_path, xml_code):
# TODO #10: Remove hack to obtain DTD using a regex instead of a DTDHandler.
dtd_match = _DTD_REGEX.match(xml_code)
if dtd_match is not None:
public_id = dtd_match.group("public_id")
for public_id_regex, dialect in _REGEXES_AND_DIALECTS:
if public_id_regex.match(public_id):
return dialect
xml_dialect_handler = XmlDialectHandler()
parser = xml.sax.make_parser()
parser.setContentHandler(xml_dialect_handler)
parser.setFeature(xml.sax.handler.feature_external_ges, False)
parser.setFeature(xml.sax.handler.feature_external_pes, False)
parser.setFeature(xml.sax.handler.feature_validation, False)
try:
parser.feed(xml_code)
# NOTE: We can only call close() when the parser has finished,
# otherwise close() raises a SAXException('parser finished').
parser.close()
except SaxParserDone:
# Language has been determined or the parser has given up.
pass
except (ValueError, xml.sax.SAXException) as error:
# NOTE: ValueError is raised on unknown url type.
error_message = str(error)
message_without_path_match = _SAX_MESSAGE_WITHOUT_PATH_PATTERN.match(error_message)
if message_without_path_match is not None:
# HACK: Replace uninformative sax path like '<unknown>' with actual XML path.
error_message = xml_path + message_without_path_match.group("message_without_path")
_log.warning(error_message)
except OSError as error:
_log.warning("%s: cannot analyze XML dialect: %s", xml_path, error)
return xml_dialect_handler.dialect
|
0449a2ad00b2bdecff059345f71bcb31f15971c8
|
849364a9b65ac32feab67dd3bb86119a0102f048
|
/tests/test24/t1.py
|
d9fae6ddfce1ba03c60c7254c8ac37c4b7aea1f8
|
[] |
no_license
|
zokis/Python--Faster-Way
|
585b46e50cc70c6b4f3b026d3b82ba2705f6fc6b
|
8f11e9246198c6bc3c0c58668674d75188c966ae
|
refs/heads/gh-pages
| 2022-07-28T18:50:54.342599
| 2015-07-02T19:43:18
| 2015-07-02T19:43:18
| 12,438,963
| 200
| 31
| null | 2018-03-25T16:12:12
| 2013-08-28T16:35:19
|
HTML
|
UTF-8
|
Python
| false
| false
| 96
|
py
|
t1.py
|
def a():
a = [[1, 2, 3], [2, 3, 4], [4, 5, 6]]
b = {x[1]: x[2] for x in a}
return b
|
62150c010d82eba1c6bc8a54cb3a3a9bd117ef69
|
6436d1e6c23f9f43a8025889dc4414a3ad66acf2
|
/Assets/Python/Contrib/CvStrategyOverlay.py
|
b20a20e72db03d18c674aae1701945a468f6ff96
|
[
"MIT"
] |
permissive
|
dguenms/Dawn-of-Civilization
|
b710195c4f46fe11d9229182c3b1e07b77f42637
|
a305e7846d085d6edf1e9c472e8dfceee1c07dd4
|
refs/heads/develop
| 2023-09-04T04:57:00.086384
| 2023-09-01T15:24:28
| 2023-09-01T15:24:28
| 45,362,597
| 116
| 121
|
MIT
| 2023-02-08T00:18:53
| 2015-11-01T23:52:28
|
C++
|
UTF-8
|
Python
| false
| false
| 16,903
|
py
|
CvStrategyOverlay.py
|
#-------------------------------------------------------------------------------
# Name: CvStrategyOverlay.py
# Purpose: Draws the strategy overlay itself.
# CvOverlayScreen.py does the editing.
# Contains:
# -Dot Mapper
# -Categorized signs
#
# Author: Del69, EmperorFool
#
# Created: 11/12/2008
#-------------------------------------------------------------------------------
from CvPythonExtensions import *
import BugCore
import BugPath
import BugUtil
import CvOverlayScreenUtils
import PlayerUtil
import SdToolKit
COLOR_KEYS = None
PALETTE_WIDTH = None
gc = CyGlobalContext()
StratLayerOpt = BugCore.game.StrategyOverlay
g_layers = {}
def init(paletteWidth=3, paletteColors=None):
global COLOR_KEYS, PALETTE_WIDTH
# setup palette width
if paletteWidth:
PALETTE_WIDTH = paletteWidth
else:
PALETTE_WIDTH = 10
# setup palette colors
if paletteColors:
COLOR_KEYS = paletteColors
else:
PALETTE_WIDTH = 10 # override because it has 127 colors
COLOR_KEYS = []
try:
for index in range(200):
info = gc.getColorInfo(index)
COLOR_KEYS.append(info.getType())
except:
pass
# create layers
DotMapLayer()
def getLayer(id):
return g_layers[id]
def callEachLayer(func, *args):
for layer in g_layers.itervalues():
func(layer, *args)
## Event Handlers
def onGameStart(argsList):
def callReset(layer):
layer.reset()
callEachLayer(callReset)
def onLoad(argsList):
def callRead(layer):
layer.read()
callEachLayer(callRead)
if StratLayerOpt.isShowDotMap():
getDotMap().redrawCities()
def onPreSave(argsList):
def callWrite(layer):
layer.write()
callEachLayer(callWrite)
def onBeginActivePlayerTurn(args):
def callBeginActivePlayerTurn(layer, ePlayer):
layer.onBeginActivePlayerTurn(ePlayer)
callEachLayer(callBeginActivePlayerTurn, args[0])
def onSwitchHotSeatPlayer(args):
def callSwitchHotSeatPlayer(layer, ePlayer):
layer.onSwitchHotSeatPlayer(ePlayer)
callEachLayer(callSwitchHotSeatPlayer, args[0])
MSG_ADD_CITY = 500
MSG_REMOVE_CITY = 501
def onModNetMessage(args):
iData1, iData2, iData3, iData4, iData5 = args
if iData1 == MSG_ADD_CITY:
getDotMap().addCityMessage(iData2, iData3, iData4, iData5)
elif iData1 == MSG_REMOVE_CITY:
getDotMap().removeCityMessage(iData2, iData3)
else:
return 0
return 1
def onEnabledOptionChanged(option, value):
pass
## Base Strategy Layer Class
class StrategyLayer(object):
"""
Provides common functionality for all of the strategy layers.
"""
def __init__(self, id):
self.MOD_SAVE_ID = "StrategyOverlay"
self.INVISIBLE_COLOR = NiColorA(0, 0, 0, 0)
self.id = id
self.visible = False
self.editing = False
self.dirty = False
g_layers[id] = self
self.reset()
def reset(self):
"""
Resets the data to a blank state and clears the dirty flag.
"""
self.dirty = False
def read(self):
"""
Reads the data from the game and clears the dirty flag.
"""
self.dirty = False
def write(self):
"""
Writes the data to the game and clears the dirty flag.
"""
self.dirty = False
def toggleVisibility(self):
if self.visible:
self.hide()
else:
self.show()
def show(self):
if not self.visible:
self.visible = True
return True
return False
def hide(self):
if self.visible:
self.freeze()
self.visible = False
return True
return False
def toggleEditing(self):
if not self.editing:
self.edit()
else:
self.freeze()
def edit(self):
if not self.editing:
self.show()
self.editing = True
return True
return False
def freeze(self):
if self.editing:
self.editing = False
return True
return False
def onBeginActivePlayerTurn(self, ePlayer):
pass
def onSwitchHotSeatPlayer(self, ePlayer):
pass
## ----------------------------------------------------------------------
## DOT MAP
## ----------------------------------------------------------------------
DOTMAP_LAYER = "DotMap"
X, Y = 0, 1 # used in point tuples instead of creating a new class
g_DotMap = None
class City:
"""
Holds the data for a single dot-mapped city.
"""
def __init__(self, point, color, layer):
self.point = point
self.color = color
self.layer = layer
def __eq__(self, other):
return self.point == other.point and self.color == other.color
def __str__(self):
return "(%d,%d) on %d" % (self.point[X], self.point[Y], self.layer)
def isAt(self, point):
return self.point == point
def samePoint(self, other):
return self.point == other.point
def sameColor(self, other):
return self.color == other.color
def sameLayer(self, other):
return self.layer == other.layer
def getDotMap():
global g_DotMap
if g_DotMap is None:
BugUtil.error("CvStrategyOverlay has not been initialized")
return g_DotMap
def hideDotMap(args=None):
getDotMap().hide()
StratLayerOpt.setShowDotMap(False)
def toggleDotMapVisibility(args=None):
getDotMap().toggleVisibility()
StratLayerOpt.setShowDotMap(getDotMap().visible)
def toggleDotMapEditMode(args=None):
getDotMap().toggleEditing()
if not getDotMap().editing and not StratLayerOpt.isShowDotMap():
getDotMap().hide()
def onShowDotMapOptionChanged(option, value):
if value:
getDotMap().show()
else:
getDotMap().hide()
def onDotMapOptionChanged(option, value):
getDotMap().optionChanged(option, value)
class DotMapLayer(StrategyLayer):
"""
Draws city crosses of different colors so the user can create a dot-map.
"""
def __init__(self):
super(DotMapLayer, self).__init__(DOTMAP_LAYER)
global g_DotMap
g_DotMap = self
# constants
self.CITY_SAVE_ID = "CityDataDict"
self.HIGHLIGHT_CROSS_LAYER = 8
self.FIRST_CROSS_LAYER = 9
self.NUM_CROSS_LAYERS = 36 #len(COLOR_KEYS)
self.DOT_LAYER = PlotLandscapeLayers.PLOT_LANDSCAPE_LAYER_NUMPAD_HELP
self.NO_DOT_STYLE = PlotStyles.PLOT_STYLE_NONE
self.MAX_DOT_STYLE = PlotStyles.PLOT_STYLE_WAVES
self.BFC_OFFSETS = []
for x in range(-2, 3):
for y in range(-2, 3):
if abs(x) != 2 or abs(y) != 2:
self.BFC_OFFSETS.append((x, y))
# default options
self.CROSS_ALPHA = 50.0
self.DOT_ALPHA = 50.0
self.HIGHLIGHT_CROSS_ALPHA = 100.0
self.HIGHLIGHT_DOT_ALPHA = 100.0
self.DRAW_DOTS = True
self.DOT_STYLE = PlotStyles.PLOT_STYLE_DOT_TARGET
self.readOptions()
# state
self.highlightedCity = None
def reset(self):
self.cities = {}
self.dirty = False
def read(self):
data = SdToolKit.sdGetGlobal(self.MOD_SAVE_ID, self.CITY_SAVE_ID)
self.clearCityLayers()
if data is not None:
self.cities = self.updateData(data)
self.dirty = False
else:
self.reset()
def updateData(self, data):
"""
Upgrade previous data formats to latest format.
"""
if len(data) == 0:
# empty, don't care
return data
for key, value in data.iteritems():
if isinstance(key, int):
# data in latest format
return data
else:
# old format, convert below
break
# find first living, human player and assign all data to them
# if none found, assign to player 0
for player in PlayerUtil.players(alive=True, human=True):
ePlayer = player.getID()
break
else:
ePlayer = 0
newData = {}
cities = {}
newData[ePlayer] = cities
for point, (color, layer) in data.iteritems():
# use new point-based layer scheme
grid = 6
layer = (point[X] % grid) * grid + (point[Y] % grid)
cities[point] = City(point, color, layer)
return newData
def write(self):
if self.dirty:
SdToolKit.sdSetGlobal(self.MOD_SAVE_ID, self.CITY_SAVE_ID, self.cities)
self.dirty = False
def show(self):
if super(DotMapLayer, self).show():
self.redrawCities()
def hide(self):
if super(DotMapLayer, self).hide():
self.clearCityLayers()
def edit(self):
if super(DotMapLayer, self).edit():
CvOverlayScreenUtils.showOverlayScreen()
def freeze(self):
if super(DotMapLayer, self).freeze():
self.unhighlightCity()
CvOverlayScreenUtils.hideOverlayScreen()
def onBeginActivePlayerTurn(self, ePlayer):
if StratLayerOpt.isShowDotMap():
self.show()
def onSwitchHotSeatPlayer(self, ePlayer):
self.hide()
def hasCities(self, ePlayer):
return ePlayer in self.cities
def hasCity(self, ePlayer, point):
return self.hasCities(ePlayer) and point in self.cities[ePlayer]
def getCities(self, ePlayer):
if self.hasCities(ePlayer):
return self.cities[ePlayer]
cities = {}
self.cities[ePlayer] = cities
return cities
def getCity(self, ePlayer, point):
if self.hasCities(ePlayer):
cities = self.cities[ePlayer]
if point in cities:
return cities[point]
return None
def iterCities(self, ePlayer):
"""
Iterates over the player's cities.
"""
if self.hasCities(ePlayer):
for city in self.getCities(ePlayer).itervalues():
yield city
def addCityAt(self, point, color, layer):
"""
Sends a message to add a city for the active player at the given point.
"""
CyMessageControl().sendModNetMessage(MSG_ADD_CITY, PlayerUtil.getActivePlayerID(), point[X] * 1000 + point[Y], color, layer)
def addCityMessage(self, ePlayer, xy, color, layer):
"""
Processes a message to add a city.
"""
x = xy / 1000
y = xy % 1000
city = City((x, y), color, layer)
self.addCity(ePlayer, city)
def addCity(self, ePlayer, city):
"""
Adds the city to the data set and draws its dot and cross.
"""
if self.hasCity(ePlayer, city.point):
oldCity = self.getCity(ePlayer, city.point)
if city == oldCity:
return
BugUtil.debug("DotMap - replacing city at %s", city.point)
self.removeCity(ePlayer, oldCity)
BugUtil.debug("DotMap - adding city %s", city)
self.getCities(ePlayer)[city.point] = city
self.dirty = True
if ePlayer == PlayerUtil.getActivePlayerID():
self.drawCity(city, self.CROSS_ALPHA, self.DOT_ALPHA)
def removeCityAt(self, point):
"""
Sends a message to remove the active player's city at the given point.
"""
ePlayer = PlayerUtil.getActivePlayerID()
if self.hasCity(ePlayer, point):
CyMessageControl().sendModNetMessage(MSG_REMOVE_CITY, ePlayer, point[X] * 1000 + point[Y], -1, -1)
else:
self.freeze()
def removeCityMessage(self, ePlayer, xy):
"""
Processes a message to remove a city.
"""
x = xy / 1000
y = xy % 1000
self.removeCity(ePlayer, self.getCity(ePlayer, (x, y)))
def removeCity(self, ePlayer, city):
"""
Removes the city from the data set and erases its dot and cross.
"""
if city:
BugUtil.debug("DotMap - removing city %s", city)
del self.getCities(ePlayer)[city.point]
self.dirty = True
if ePlayer == PlayerUtil.getActivePlayerID():
self.redrawCrosses(city.layer)
self.eraseDot(city, self.DOT_ALPHA)
else:
BugUtil.warn("City doesn't exist")
def highlightCity(self, point, color):
"""
Highlights the given city location by drawing it using the given color on the highlight layer.
Unhighlights the currently highlighted city if there is one.
If there is no city there (N), the new city is drawn (C).
If the city is on the same layer (S), nothing is done (N). --> WC
If the city is on a different layer (D), the city's layer is redrawn without it (W) and the new city is drawn (C).
"""
city = City(point, color, self.HIGHLIGHT_CROSS_LAYER)
if self.highlightedCity:
if self.highlightedCity == city:
return
else:
self.unhighlightCity()
self.highlightedCity = city
ePlayer = PlayerUtil.getActivePlayerID()
existingCity = self.getCity(ePlayer, point)
if existingCity is not None:
self.redrawCrosses(existingCity.layer, point)
self.eraseDot(existingCity, self.DOT_ALPHA)
self.drawCross(city, self.HIGHLIGHT_CROSS_ALPHA)
def unhighlightCity(self):
"""
Removes the highlight from the existing city location if there is one.
If there is no city there (N), the current layer is redrawn (L) and the dot is erased (d).
If the city is on the same layer (S), nothing is done (N). --> LC
If the city is on a different layer (D), the current layer is redrawn (L) and the city is drawn (C).
"""
if self.highlightedCity:
point = self.highlightedCity.point
self.clearHighlightCrossLayer()
ePlayer = PlayerUtil.getActivePlayerID()
city = self.getCity(ePlayer, point)
if city is not None:
self.drawCity(city, self.CROSS_ALPHA, self.DOT_ALPHA)
self.highlightedCity = None
def redrawCities(self):
"""
Erases all city layers and draws all of the cities.
"""
self.clearCityLayers()
self.drawCities()
def redrawCrosses(self, layer, skip=None):
"""
Erases the given layer and draws all city crosses in that layer.
"""
self.clearCrossLayer(layer)
self.drawCrosses(layer, skip)
def redrawDots(self):
"""
Erases and redraws all city dots as they are all in the same layer.
"""
self.clearDotLayer()
self.drawDots()
def drawCities(self, skip=None):
"""
Draws all of the cities except skip, if given.
"""
crossAlpha = self.CROSS_ALPHA
dotAlpha = self.DOT_ALPHA
for city in self.iterCities(PlayerUtil.getActivePlayerID()):
if not city.isAt(skip):
self.drawCity(city, crossAlpha, dotAlpha)
def drawCrosses(self, layer=None, skip=None):
"""
Draws the cross for every city in the given layer.
"""
crossAlpha = self.CROSS_ALPHA
for city in self.iterCities(PlayerUtil.getActivePlayerID()):
if not city.isAt(skip):
if layer is None or layer == city.layer:
self.drawCross(city, crossAlpha)
def drawDots(self, skip=None):
"""
Draws the dot for every city.
"""
dotAlpha = self.DOT_ALPHA
for city in self.iterCities(PlayerUtil.getActivePlayerID()):
if not city.isAt(skip):
self.drawDot(city, dotAlpha)
def drawCity(self, city, crossAlpha, dotAlpha):
"""
Draws the cross and dot for a single city.
"""
self.drawCross(city, crossAlpha)
self.drawDot(city, dotAlpha)
def drawCross(self, city, alpha):
"""
Draws the cross for a single city.
"""
x, y = city.point
color = gc.getColorInfo(city.color).getType()
layer = city.layer
for dx, dy in self.BFC_OFFSETS:
CyEngine().fillAreaBorderPlotAlt(x + dx, y + dy, layer, color, alpha)
def drawDot(self, city, alpha):
"""
Draws the dot for a single city.
"""
if self.DRAW_DOTS:
x, y = city.point
colorInfo = gc.getColorInfo(city.color)
if BugPath.isMac():
color = colorInfo.getColor()
CyEngine().addColoredPlot(x, y, NiColorA(color.r, color.g, color.b, alpha), self.DOT_LAYER)
else:
CyEngine().addColoredPlotAlt(x, y, self.DOT_STYLE, self.DOT_LAYER, colorInfo.getType(), alpha)
def eraseDot(self, city, alpha):
"""
Erases the dot for a single city.
"""
if self.DRAW_DOTS:
x, y = city.point
if BugPath.isMac():
CyEngine().addColoredPlot(x, y, self.INVISIBLE_COLOR, self.DOT_LAYER)
else:
CyEngine().addColoredPlotAlt(x, y, self.NO_DOT_STYLE, self.DOT_LAYER, "COLOR_BLACK", alpha)
def clearCityLayers(self):
"""
Erases all city crosses and dots.
"""
self.clearHighlightCrossLayer()
for index in range(self.NUM_CROSS_LAYERS):
self.clearCrossLayer(index + self.FIRST_CROSS_LAYER)
self.clearDotLayer()
def clearHighlightCrossLayer(self):
"""
Clears the indexed border layer.
"""
self.clearCrossLayer(self.HIGHLIGHT_CROSS_LAYER)
def clearCrossLayer(self, layer):
"""
Clears the indexed border layer.
"""
CyEngine().clearAreaBorderPlots(layer)
def clearDotLayer(self):
"""
Clears all the dots from screen.
"""
CyEngine().clearColoredPlots(self.DOT_LAYER)
def percentToAlpha(self, percent):
return min(100, max(0, percent)) / 100.0
def readOptions(self):
self.CROSS_ALPHA = self.percentToAlpha(StratLayerOpt.getDotMapBrightness())
self.DOT_ALPHA = self.percentToAlpha(StratLayerOpt.getDotMapBrightness())
self.HIGHLIGHT_CROSS_ALPHA = self.percentToAlpha(StratLayerOpt.getDotMapHighlightBrightness())
self.HIGHLIGHT_DOT_ALPHA = self.percentToAlpha(StratLayerOpt.getDotMapHighlightBrightness())
self.DRAW_DOTS = StratLayerOpt.isDotMapDrawDots()
self.DOT_STYLE = min(self.MAX_DOT_STYLE, max(0, StratLayerOpt.getDotMapDotIcon()))
def optionChanged(self, option, value):
"""
Redraws the layer if it is currently visible.
"""
self.unhighlightCity()
self.readOptions()
if self.visible:
self.redrawCities()
|
db8ca0ac6272924438e7f43731c29581f9556570
|
5aef4e8880f9f12dbdde246140a836d3f2e5bb2a
|
/dj_db_conn_pool/backends/postgresql/base.py
|
911d0956444ded6bfa5a5613370569e8000c6148
|
[
"MIT"
] |
permissive
|
altairbow/django-db-connection-pool
|
dd76159b6713c57eef42b57c7fb49e9c5813b1ae
|
08d4a5d7288979ef743674e52b9d1cbd796e3d86
|
refs/heads/master
| 2023-08-31T11:24:19.735528
| 2023-08-20T10:15:02
| 2023-08-20T10:15:02
| 164,835,376
| 137
| 26
|
MIT
| 2023-06-02T21:01:27
| 2019-01-09T09:50:42
|
Python
|
UTF-8
|
Python
| false
| false
| 439
|
py
|
base.py
|
# coding: utf-8
from django.db.backends.postgresql import base
from dj_db_conn_pool.backends.postgresql.mixins import PGDatabaseWrapperMixin
class DatabaseWrapper(PGDatabaseWrapperMixin, base.DatabaseWrapper):
def get_new_connection(self, conn_params):
connection = super().get_new_connection(conn_params)
if not connection.info:
connection.info = connection.connection.info
return connection
|
1d9e00a31506da14c054193bff1193f8d3b120c7
|
25daa9604b83ddc199764309c39da106a5313c22
|
/test/test_acceptance_arg_parsing.py
|
67135ec94a3532e6ca63abbdcd2079780ab4023b
|
[
"MIT"
] |
permissive
|
ThoughtWorksInc/CD4ML-Scenarios
|
83d3f162a2ddbb7e02662d03f769feb8978b4de9
|
a9200df2f926f7e398dd820b99a11515c9a3eacb
|
refs/heads/master
| 2023-05-12T07:11:39.677610
| 2022-05-31T14:14:57
| 2022-05-31T14:14:57
| 246,649,538
| 127
| 327
|
MIT
| 2023-05-01T23:38:18
| 2020-03-11T18:26:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
test_acceptance_arg_parsing.py
|
import datetime
import os
from pathlib import Path
from cd4ml.filenames import get_model_files
from scripts import acceptance as acceptance_script
def test_acceptance_with_model_id():
model_id = acceptance_script.parse_arguments(['my_test_model_id'])
assert model_id == 'my_test_model_id'
earlier_time = int(datetime.datetime(2020, 8, 29, 12, 0, 0).timestamp())
later_time = int(datetime.datetime(2020, 8, 29, 14, 0, 0).timestamp())
def test_acceptance_with_no_arguments(tmp_path):
os.environ["CD4ML_DATA_DIR"] = str(tmp_path)
files = get_model_files('', base_data_dir=tmp_path)
base_results_directory = files['results_folder']
earlier_folder = Path(base_results_directory, "earlier")
earlier_folder.mkdir(parents=True)
os.utime(earlier_folder, (earlier_time, earlier_time))
latest_folder = Path(base_results_directory, "later")
latest_folder.mkdir(parents=True)
os.utime(earlier_folder, (later_time, later_time))
model_id = acceptance_script.parse_arguments([])
del os.environ["CD4ML_DATA_DIR"]
assert model_id == 'later'
|
f56c9e93fd4b577091707466cadab16c9da7bb70
|
33f805792e79a9ef1d577699b983031521d5b6c9
|
/tapiriik/services/rollback.py
|
251f5f2caf175763bfcb2c8fe19c68893653ac1f
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
cpfair/tapiriik
|
0dce9599400579d33acbbdaba16806256270d0a3
|
c67e9848e67f515e116bb19cd4dd479e8414de4d
|
refs/heads/master
| 2023-08-28T10:17:11.070324
| 2023-07-25T00:59:33
| 2023-07-25T00:59:33
| 7,812,229
| 1,519
| 343
|
Apache-2.0
| 2022-10-24T16:52:34
| 2013-01-25T02:43:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,062
|
py
|
rollback.py
|
from tapiriik.database import db
from tapiriik.auth import User
from tapiriik.services import Service
import datetime
import logging
import json
from bson.objectid import ObjectId
logger = logging.getLogger(__name__)
class RollbackTask:
def __new__(cls, dbRec):
if not dbRec:
return None
return super(RollbackTask, cls).__new__(cls)
def __init__(self, dbRec):
self.__dict__.update(dbRec)
def _create(user):
# Pull all the records that need to be rolled back
logger.info("Finding activities for %s" % user["_id"])
conns = User.GetConnectionRecordsByUser(user)
my_services = [conn.Service.ID for conn in conns]
my_ext_ids = [conn.ExternalID for conn in conns]
logger.info("Scanning uploads table for %s accounts with %s extids" % (my_services, my_ext_ids))
uploads = db.uploaded_activities.find({"Service": {"$in": my_services}, "UserExternalID": {"$in": my_ext_ids}})
pending_deletions = {}
for upload in uploads:
svc = upload["Service"]
upload_id = upload["ExternalID"]
svc_ext_id = upload["UserExternalID"]
# Filter back down to the pairing we actually need
if my_services.index(svc) != my_ext_ids.index(svc_ext_id):
continue
if svc not in pending_deletions:
pending_deletions[svc] = []
pending_deletions[svc].append(upload_id)
# Another case of "I should have an ORM"
return RollbackTask({"PendingDeletions": pending_deletions})
def Create(user):
task = RollbackTask._create(user)
uid = db.rollback_tasks.insert({"PendingDeletions": task.PendingDeletions, "Created": datetime.datetime.utcnow(), "UserID": user["_id"]})
logger.info("Created rollback task %s" % uid)
task._id = uid
return task
def Get(id):
dbRec = db.rollback_tasks.find_one({"_id": ObjectId(id)})
if not dbRec:
return
return RollbackTask(dbRec)
def json(self):
# Augment with the requisite URLs
self.ActivityURLs = {svc: {} for svc in self.PendingDeletions.keys()}
for svc_id, urls in self.ActivityURLs.items():
svc = Service.FromID(svc_id)
for upload in self.PendingDeletions[svc_id]:
try:
urls[upload] = svc.UserUploadedActivityURL(upload)
except NotImplementedError:
pass
self.PendingDeletionCount = sum([len(v) for k, v in self.PendingDeletions.items()])
dthandler = lambda obj: obj.isoformat() if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date) else str(obj)
return json.dumps(self.__dict__, default=dthandler)
def Schedule(self):
db.rollback_tasks.update({"_id": self._id}, {"$set": {"Scheduled": datetime.datetime.utcnow()}})
from rollback_worker import schedule_rollback_task
schedule_rollback_task(str(self._id))
def Execute(self):
logger.info("Starting rollback %s" % self._id)
deletion_status = {}
user = User.Get(self.UserID)
for svc_id, upload_ids in self.PendingDeletions.items():
svcrec = User.GetConnectionRecord(user, svc_id)
deletion_status[svc_id] = {}
if not svcrec.Service.SupportsActivityDeletion:
continue
for upload_id in upload_ids:
logger.info("Deleting activity %s on %s" % (upload_id, svc_id))
try:
svcrec.Service.DeleteActivity(svcrec, upload_id)
except Exception as e:
deletion_status[svc_id][str(upload_id)] = False
logger.exception("Deletion failed - %s" % e)
else:
deletion_status[svc_id][str(upload_id)] = True
db.rollback_tasks.update({"_id": self._id}, {"$set": {"DeletionStatus": deletion_status}})
logger.info("Finished rollback %s" % self._id)
|
758b5c905f6ab4347635dbb6141c8b1578697296
|
d2621d10d6d0aa4fcecbb11c281e3dd680b985fc
|
/test/pytest/test_handler.py
|
a14af050a4f102fc66f68636b895ef435bf16551
|
[
"Apache-2.0"
] |
permissive
|
pytorch/serve
|
7b562a4d6372e77ce28fc71a5b8d5455c6f02290
|
242895c6b4596c4119ec09d6139e627c5dd696b6
|
refs/heads/master
| 2023-08-31T05:24:10.950144
| 2023-08-31T02:49:22
| 2023-08-31T02:49:22
| 212,488,700
| 3,689
| 895
|
Apache-2.0
| 2023-09-13T22:34:31
| 2019-10-03T03:17:43
|
Java
|
UTF-8
|
Python
| false
| false
| 13,802
|
py
|
test_handler.py
|
import ast
import json
import logging
import os
import numpy as np
import pytest
import requests
import test_utils
import torch
REPO_ROOT = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../")
)
snapshot_file_kf = os.path.join(REPO_ROOT, "test", "config_kf.properties")
snapshot_file_tf = os.path.join(REPO_ROOT, "test", "config_ts.properties")
data_file_mnist = os.path.join(
REPO_ROOT, "examples", "image_classifier", "mnist", "test_data", "1.png"
)
input_json_mnist = os.path.join(
REPO_ROOT, "kubernetes", "kserve", "kf_request_json", "v1", "mnist.json"
)
input_json_mmf = os.path.join(
REPO_ROOT, "examples", "MMF-activity-recognition", "372CC.info.json"
)
logger = logging.getLogger(__name__)
def getAPIS(snapshot_file):
MANAGEMENT_API = "http://127.0.0.1:8081"
INFERENCE_API = "http://127.0.0.1:8080"
with open(snapshot_file, "r") as fp:
lines = fp.readlines()
for line in lines:
line = line.rstrip("\n")
if "management_address" in line:
MANAGEMENT_API = line.split("=")[1]
if "inference_address" in line:
INFERENCE_API = line.split("=")[1]
return (MANAGEMENT_API, INFERENCE_API)
KF_MANAGEMENT_API, KF_INFERENCE_API = getAPIS(snapshot_file_kf)
TF_MANAGEMENT_API, TF_INFERENCE_API = getAPIS(snapshot_file_tf)
def setup_module(module):
test_utils.torchserve_cleanup()
response = requests.get(
"https://torchserve.pytorch.org/mar_files/mnist.mar", allow_redirects=True
)
with open(os.path.join(test_utils.MODEL_STORE, "mnist.mar"), "wb") as f:
f.write(response.content)
def teardown_module(module):
test_utils.torchserve_cleanup()
def mnist_model_register_using_non_existent_handler_then_scale_up(synchronous=False):
"""
Validates that snapshot.cfg is created when management apis are invoked.
"""
response = requests.post(
TF_MANAGEMENT_API + "/models?handler=nehandler&url=mnist.mar"
)
# Scale up workers
if synchronous:
params = (
("min_worker", "2"),
("synchronous", "True"),
)
else:
params = (("min_worker", "2"),)
response = requests.put(TF_MANAGEMENT_API + "/models/mnist", params=params)
# Check if workers got scaled
response = requests.get(TF_MANAGEMENT_API + "/models/mnist")
return response
def mnist_model_register_and_scale_using_non_existent_handler_synchronous():
# Register & Scale model
response = mnist_model_register_using_non_existent_handler_then_scale_up(
synchronous=True
)
mnist_list = json.loads(response.content)
try:
# Workers should not scale up
assert len(mnist_list[0]["workers"]) == 0
finally:
# UnRegister mnist model
test_utils.unregister_model("mnist")
def mnist_model_register_and_scale_using_non_existent_handler_asynchronous():
# Register & Scale model
response = mnist_model_register_using_non_existent_handler_then_scale_up()
mnist_list = json.loads(response.content)
try:
# Workers should not scale up
assert len(mnist_list[0]["workers"]) == 0
finally:
# UnRegister mnist model
test_utils.unregister_model("mnist")
def run_inference_using_url_with_data(purl=None, pfiles=None, ptimeout=120):
print(f"purl={purl}")
print(f"pfiles={pfiles}")
if purl is None and pfiles is None:
return
print(f"purl1={purl}")
print(f"pfiles1={pfiles}")
try:
response = requests.post(url=purl, files=pfiles, timeout=ptimeout)
except:
print(f"sent echo_stream rep=none")
return None
else:
print(f"sent echo_stream rep={response}")
return response
def run_inference_using_url_with_data_json(purl=None, json_input=None, ptimeout=120):
if purl is None and pfiles is None:
return
try:
response = requests.post(url=purl, json=json_input, timeout=ptimeout)
except:
return None
else:
return response
def test_mnist_model_register_and_inference_on_valid_model():
"""
Validates that snapshot.cfg is created when management apis are invoked.
"""
test_utils.start_torchserve(no_config_snapshots=True)
test_utils.register_model("mnist", "mnist.mar")
files = {
"data": (data_file_mnist, open(data_file_mnist, "rb")),
}
response = run_inference_using_url_with_data(
TF_INFERENCE_API + "/predictions/mnist", files
)
assert (json.loads(response.content)) == 1
test_utils.unregister_model("mnist")
def test_mnist_model_register_using_non_existent_handler_with_nonzero_workers():
"""
Validates that a model cannot be registered with a non existent handler if
the initial number of workers is greater than zero.
"""
response = requests.post(
TF_MANAGEMENT_API
+ "/models?handler=nehandlermodels&initial_workers=1&url=mnist.mar"
)
if (
json.loads(response.content)["code"] == 500
and json.loads(response.content)["type"] == "InternalServerException"
):
assert True, (
"Internal Server Exception, "
"Cannot register model with non existent handler with non zero workers"
)
else:
assert False, (
"Something is not right!! Successfully registered model with "
"non existent handler with non zero workers"
)
test_utils.unregister_model("mnist")
def test_mnist_model_register_scale_inference_with_non_existent_handler():
response = mnist_model_register_using_non_existent_handler_then_scale_up()
mnist_list = json.loads(response.content)
assert len(mnist_list[0]["workers"]) > 1
files = {
"data": (data_file_mnist, open(data_file_mnist, "rb")),
}
response = run_inference_using_url_with_data(
TF_INFERENCE_API + "/predictions/mnist", files
)
if response is None:
assert True, "Inference failed as the handler is non existent"
else:
if json.loads(response.content) == 1:
assert False, (
"Something is not right!! Somehow Inference passed "
"despite passing non existent handler"
)
def test_mnist_model_register_and_inference_on_valid_model_explain():
"""
Validates that snapshot.cfg is created when management apis are invoked.
"""
test_utils.start_torchserve(no_config_snapshots=True)
test_utils.register_model("mnist", "mnist.mar")
files = {
"data": (data_file_mnist, open(data_file_mnist, "rb")),
}
response = run_inference_using_url_with_data(
TF_INFERENCE_API + "/explanations/mnist", files
)
assert np.array(json.loads(response.content)).shape == (1, 28, 28)
test_utils.unregister_model("mnist")
def test_kserve_mnist_model_register_and_inference_on_valid_model():
"""
Validates that snapshot.cfg is created when management apis are invoked for kserve.
"""
test_utils.start_torchserve(snapshot_file=snapshot_file_kf)
test_utils.register_model("mnist", "mnist.mar")
with open(input_json_mnist, "r") as f:
s = f.read()
s = s.replace("'", '"')
data = json.loads(s)
response = run_inference_using_url_with_data_json(
KF_INFERENCE_API + "/v1/models/mnist:predict", data
)
assert (json.loads(response.content)["predictions"][0]) == 2
test_utils.unregister_model("mnist")
def test_kserve_mnist_model_register_scale_inference_with_non_existent_handler():
response = mnist_model_register_using_non_existent_handler_then_scale_up()
mnist_list = json.loads(response.content)
assert len(mnist_list[0]["workers"]) > 1
with open(input_json_mnist, "r") as f:
s = f.read()
s = s.replace("'", '"')
data = json.loads(s)
response = run_inference_using_url_with_data_json(
KF_INFERENCE_API + "/v1/models/mnist:predict", data
)
if response is None:
assert True, "Inference failed as the handler is non existent"
else:
if json.loads(response.content) == 1:
assert False, (
"Something is not right!! Somehow Inference passed "
"despite passing non existent handler"
)
def test_kserve_mnist_model_register_and_inference_on_valid_model_explain():
"""
Validates the kserve model explanations.
"""
test_utils.start_torchserve(snapshot_file=snapshot_file_kf)
test_utils.register_model("mnist", "mnist.mar")
with open(input_json_mnist, "r") as f:
s = f.read()
s = s.replace("'", '"')
data = json.loads(s)
response = run_inference_using_url_with_data_json(
KF_INFERENCE_API + "/v1/models/mnist:explain", data
)
assert np.array(json.loads(response.content)["explanations"]).shape == (
1,
1,
28,
28,
)
test_utils.unregister_model("mnist")
@pytest.mark.skipif(
os.environ.get("TS_RUN_IN_DOCKER", False),
reason="Test to be run outside docker",
)
def test_huggingface_bert_batch_inference():
batch_size = 2
batch_delay = 10000 # 10 seconds
params = (
("model_name", "BERTSeqClassification"),
("url", "https://torchserve.pytorch.org/mar_files/BERTSeqClassification.mar"),
("initial_workers", "1"),
("batch_size", str(batch_size)),
("max_batch_delay", str(batch_delay)),
)
test_utils.start_torchserve(no_config_snapshots=True)
test_utils.register_model_with_params(params)
input_text = os.path.join(
REPO_ROOT,
"examples",
"Huggingface_Transformers",
"Seq_classification_artifacts",
"sample_text.txt",
)
# Make 2 curl requests in parallel with &
# curl --header \"X-Forwarded-For: 1.2.3.4\" won't work since you can't access local host anymore
response = os.popen(
f"curl http://127.0.0.1:8080/predictions/BERTSeqClassification -T {input_text} & curl http://127.0.0.1:8080/predictions/BERTSeqClassification -T {input_text}"
)
response = response.read()
## Assert that 2 responses are returned from the same batch
assert response == "Not AcceptedNot Accepted"
test_utils.unregister_model("BERTSeqClassification")
@pytest.mark.skip(reason="MMF doesn't support PT 1.10 yet")
def test_MMF_activity_recognition_model_register_and_inference_on_valid_model():
test_utils.start_torchserve(snapshot_file=snapshot_file_tf)
test_utils.register_model(
"MMF_activity_recognition_v2",
"https://torchserve.pytorch.org/mar_files/MMF_activity_recognition_v2.mar",
)
os.system(
"wget https://mmfartifacts.s3-us-west-2.amazonaws.com/372CC.mp4 -P ../../examples/MMF-activity-recognition"
)
input_json = "../../examples/MMF-activity-recognition/372CC.info.json"
with open(input_json) as jsonfile:
info = json.load(jsonfile)
files = {
"data": open("../../examples/MMF-activity-recognition/372CC.mp4", "rb"),
"script": info["script"],
"labels": info["action_labels"],
}
response = run_inference_using_url_with_data(
TF_INFERENCE_API + "/v1/models/MMF_activity_recognition_v2:predict",
pfiles=files,
)
response = response.content.decode("utf-8")
response = ast.literal_eval(response)
response = [n.strip() for n in response]
assert response == [
"Sitting at a table",
"Someone is sneezing",
"Watching a laptop or something on a laptop",
]
test_utils.unregister_model("MMF_activity_recognition_v2")
def test_huggingface_bert_model_parallel_inference():
number_of_gpus = torch.cuda.device_count()
check = os.popen(f"curl http://localhost:8081/models")
print(check)
if number_of_gpus > 1:
batch_size = 1
batch_delay = 5000 # 10 seconds
params = (
("model_name", "Textgeneration"),
(
"url",
"https://bert-mar-file.s3.us-west-2.amazonaws.com/Textgeneration.mar",
),
("initial_workers", "1"),
("batch_size", str(batch_size)),
("max_batch_delay", str(batch_delay)),
)
test_utils.start_torchserve(no_config_snapshots=True)
test_utils.register_model_with_params(params)
input_text = os.path.join(
REPO_ROOT,
"examples",
"Huggingface_Transformers",
"Text_gen_artifacts",
"sample_text_captum_input.txt",
)
response = os.popen(
f"curl http://127.0.0.1:8080/predictions/Textgeneration -T {input_text}"
)
response = response.read()
assert (
"Bloomberg has decided to publish a new report on the global economy"
in response
)
test_utils.unregister_model("Textgeneration")
else:
logger.info(
"Running model parallel inference requuires more than one gpu, number of available gpus on thi machine is: ",
number_of_gpus,
)
def test_echo_stream_inference():
test_utils.start_torchserve(no_config_snapshots=True, gen_mar=False)
test_utils.register_model(
"echo_stream", "https://torchserve.pytorch.org/mar_files/echo_stream.mar"
)
response = requests.post(
TF_INFERENCE_API + "/predictions/echo_stream", data="foo", stream=True
)
assert response.headers["Transfer-Encoding"] == "chunked"
prediction = []
for chunk in response.iter_content(chunk_size=None):
if chunk:
prediction.append(chunk.decode("utf-8"))
assert str(" ".join(prediction)) == "hello hello hello hello world "
test_utils.unregister_model("echo_stream")
|
c5dbfbcbbd7961e78535e3c629d5d0a18afb8738
|
d25cbd4234170c7f5443d7764943d316846c8ac4
|
/pygmt/src/solar.py
|
130bb8d936aa30c97cbf70fd952102358f03cfe6
|
[
"BSD-3-Clause"
] |
permissive
|
GenericMappingTools/pygmt
|
6ca3af844cee51c7ec4cafaaba3918c35f871791
|
e4ee800e8045aa5f94ddaf7ad821421d007ab279
|
refs/heads/main
| 2023-08-19T04:39:41.666114
| 2023-08-16T14:25:23
| 2023-08-16T14:25:23
| 85,352,251
| 490
| 191
|
BSD-3-Clause
| 2023-09-14T13:34:26
| 2017-03-17T20:31:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,847
|
py
|
solar.py
|
"""
solar - Plot day-night terminators and twilight.
"""
import pandas as pd
from pygmt.clib import Session
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias
__doctest_skip__ = ["solar"]
@fmt_docstring
@use_alias(
B="frame",
G="fill",
J="projection",
R="region",
V="verbose",
W="pen",
c="panel",
p="perspective",
t="transparency",
)
@kwargs_to_strings(R="sequence", c="sequence_comma", p="sequence")
def solar(self, terminator="d", terminator_datetime=None, **kwargs):
r"""
Plot day-light terminators or twilights.
This function plots the day-night terminator. Alternatively, it can plot
the terminators for civil twilight, nautical twilight, or astronomical
twilight.
Full option list at :gmt-docs:`solar.html`
{aliases}
Parameters
----------
terminator : str
Set the type of terminator displayed. Valid arguments are
``"day_night"``, ``"civil"``, ``"nautical"``, and ``"astronomical"``,
which can be set with either the full name or the first letter of the
name [Default is ``"day_night"``].
Refer to https://en.wikipedia.org/wiki/Twilight for the definitions of
different types of twilight.
terminator_datetime : str or datetime object
Set the UTC date and time of the displayed terminator
[Default is the current UTC date and time]. It can be
passed as a string or Python datetime object.
{region}
{projection}
{frame}
fill : str
Set color or pattern for filling terminators [Default is no fill].
pen : str
Set pen attributes for lines [Default is ``"0.25p,black,solid"``].
{verbose}
{panel}
{perspective}
{transparency}
Example
-------
>>> # import the Python module "datetime"
>>> import datetime
>>> import pygmt
>>> # create a datetime object at 8:52:18 on June 24, 1997 (time in UTC)
>>> date = datetime.datetime(
... year=1997, month=6, day=24, hour=8, minute=52, second=18
... )
>>> # create a new plot with pygmt.Figure()
>>> fig = pygmt.Figure()
>>> # create a map of the Earth with the coast method
>>> fig.coast(
... land="darkgreen", water="lightblue", projection="W10c", region="d"
... )
>>> fig.solar(
... # set the terminator to "day_night"
... terminator="day_night",
... # pass the datetime object
... terminator_datetime=date,
... # fill the night-section with navyblue at 75% transparency
... fill="navyblue@75",
... # draw the terminator with a 1-point black line
... pen="1p,black",
... )
>>> # show the plot
>>> fig.show()
"""
kwargs = self._preprocess(**kwargs) # pylint: disable=protected-access
if kwargs.get("T") is not None:
raise GMTInvalidInput(
"Use 'terminator' and 'terminator_datetime' instead of 'T'."
)
if terminator not in [
"day_night",
"civil",
"nautical",
"astronomical",
"d",
"c",
"n",
"a",
]:
raise GMTInvalidInput(
f"Unrecognized solar terminator type '{terminator}'. Valid values "
"are 'day_night', 'civil', 'nautical', and 'astronomical'."
)
kwargs["T"] = terminator[0]
if terminator_datetime:
try:
datetime_string = pd.to_datetime(terminator_datetime).strftime(
"%Y-%m-%dT%H:%M:%S.%f"
)
except ValueError as verr:
raise GMTInvalidInput("Unrecognized datetime format.") from verr
kwargs["T"] += f"+d{datetime_string}"
with Session() as lib:
lib.call_module(module="solar", args=build_arg_string(kwargs))
|
b8bdc2ffded36e168201d35e3ec1fd91c2b57e36
|
0af79436ea048460c5ec5925ff29ed8a0a3914d2
|
/examples/train_springs.py
|
bd3c15a0e538b39a07d905854011253e8ebe25c8
|
[] |
no_license
|
mfinzi/LieConv
|
c85528155c0117e8c6e0f94c17d284c6bed9439b
|
93affb1806100a848fdf7065d52bc69aff91405e
|
refs/heads/master
| 2023-05-14T06:31:31.443143
| 2023-04-27T02:36:24
| 2023-04-27T02:36:24
| 242,906,925
| 260
| 33
| null | 2023-03-07T22:16:47
| 2020-02-25T04:10:22
|
Python
|
UTF-8
|
Python
| false
| false
| 2,380
|
py
|
train_springs.py
|
import copy, warnings
from oil.tuning.args import argupdated_config
from oil.datasetup.datasets import split_dataset
from oil.tuning.study import train_trial
import torch
from torch.utils.data import DataLoader
from torch.optim import Adam
from oil.utils.utils import LoaderTo, islice, FixedNumpySeed, cosLr
from lie_conv.datasets import SpringDynamics
from lie_conv import datasets
from lie_conv.dynamicsTrainer import IntegratedDynamicsTrainer, FC, HLieResNet
import lie_conv.lieGroups as lieGroups
from lie_conv.lieGroups import Tx
from lie_conv import dynamicsTrainer
#from lie_conv.dynamics_trial import DynamicsTrial
try:
import lie_conv.graphnets as graphnets
except ImportError:
import lie_conv.lieConv as graphnets
warnings.warn('Failed to import graphnets. Please install using \
`pip install .[GN]` for this functionality', ImportWarning)
def makeTrainer(*,network=FC,net_cfg={},lr=1e-2,n_train=3000,regen=False,dataset=SpringDynamics,
dtype=torch.float32,device=torch.device('cuda'),bs=200,num_epochs=2,
trainer_config={}):
# Create Training set and model
splits = {'train':n_train,'val':200,'test':2000}
dataset = dataset(n_systems=10000,regen=regen)
with FixedNumpySeed(0):
datasets = split_dataset(dataset,splits)
model = network(sys_dim=dataset.sys_dim,d=dataset.space_dim,**net_cfg).to(device=device,dtype=dtype)
# Create train and Dev(Test) dataloaders and move elems to gpu
dataloaders = {k:LoaderTo(DataLoader(v,batch_size=min(bs,n_train),num_workers=0,shuffle=(k=='train')),
device=device,dtype=dtype) for k,v in datasets.items()}
dataloaders['Train'] = islice(dataloaders['train'],len(dataloaders['val']))
# Initialize optimizer and learning rate schedule
opt_constr = lambda params: Adam(params, lr=lr)
lr_sched = cosLr(num_epochs)
return IntegratedDynamicsTrainer(model,dataloaders,opt_constr,lr_sched,
log_args={'timeFrac':1/4,'minPeriod':0.0},**trainer_config)
Trial = train_trial(makeTrainer)
if __name__=='__main__':
defaults = copy.deepcopy(makeTrainer.__kwdefaults__)
defaults['save']=False
defaults['trainer_config']['early_stop_metric']='val_MSE'
print(Trial(argupdated_config(defaults,namespace=(dynamicsTrainer,lieGroups,datasets,graphnets))))
|
8582acce576a3baef291d4c85432eab53a7bf38e
|
cad91ae76d2746a6c28ddda0f33a58f9d461378f
|
/TensorFlow2/Segmentation/Contrib/UNet3P/data_generators/tf_data_generator.py
|
8f35a1b84898ed7a7689de5d8f3510d42e7a41ad
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/DeepLearningExamples
|
fe677521e7e2a16e3cb0b77e358f9aab72f8c11a
|
a5388a45f71a949639b35cc5b990bd130d2d8164
|
refs/heads/master
| 2023-08-31T20:57:08.798455
| 2023-08-23T10:09:12
| 2023-08-23T10:09:12
| 131,881,622
| 11,838
| 3,124
| null | 2023-08-28T16:57:33
| 2018-05-02T17:04:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,786
|
py
|
tf_data_generator.py
|
"""
Tensorflow data generator class.
"""
import tensorflow as tf
import numpy as np
from omegaconf import DictConfig
from utils.general_utils import get_data_paths
from utils.images_utils import prepare_image, prepare_mask
class DataGenerator(tf.keras.utils.Sequence):
"""
Generate batches of data for model by reading images and their
corresponding masks using TensorFlow Sequence Generator.
There are two options you can either pass directory path or list.
In case of directory, it should contain relative path of images/mask
folder from project root path.
In case of list of images, every element should contain absolute path
for each image and mask.
Because this generator is also used for prediction, so during testing you can
set mask path to None if mask are not available for visualization.
"""
def __init__(self, cfg: DictConfig, mode: str):
"""
Initialization
"""
self.cfg = cfg
self.mode = mode
self.batch_size = self.cfg.HYPER_PARAMETERS.BATCH_SIZE
# set seed for reproducibility
np.random.seed(cfg.SEED)
# check mask are available or not
self.mask_available = False if cfg.DATASET[mode].MASK_PATH is None or str(
cfg.DATASET[mode].MASK_PATH).lower() == "none" else True
data_paths = get_data_paths(cfg, mode, self.mask_available)
self.images_paths = data_paths[0]
if self.mask_available:
self.mask_paths = data_paths[1]
# self.images_paths.sort() # no need for sorting
self.on_epoch_end()
def __len__(self):
"""
Denotes the number of batches per epoch
"""
# Tensorflow problem: on_epoch_end is not being called at the end
# of each epoch, so forcing on_epoch_end call
self.on_epoch_end()
return int(
np.floor(
len(self.images_paths) / self.batch_size
)
)
def on_epoch_end(self):
"""
Updates indexes after each epoch
"""
self.indexes = np.arange(len(self.images_paths))
if self.cfg.PREPROCESS_DATA.SHUFFLE[self.mode].VALUE:
np.random.shuffle(self.indexes)
def __getitem__(self, index):
"""
Generate one batch of data
"""
# Generate indexes of the batch
indexes = self.indexes[
index * self.batch_size:(index + 1) * self.batch_size
]
# Generate data
return self.__data_generation(indexes)
def __data_generation(self, indexes):
"""
Generates batch data
"""
# create empty array to store batch data
batch_images = np.zeros(
(
self.cfg.HYPER_PARAMETERS.BATCH_SIZE,
self.cfg.INPUT.HEIGHT,
self.cfg.INPUT.WIDTH,
self.cfg.INPUT.CHANNELS
)
).astype(np.float32)
if self.mask_available:
batch_masks = np.zeros(
(
self.cfg.HYPER_PARAMETERS.BATCH_SIZE,
self.cfg.INPUT.HEIGHT,
self.cfg.INPUT.WIDTH,
self.cfg.OUTPUT.CLASSES
)
).astype(np.float32)
for i, index in enumerate(indexes):
# extract path from list
img_path = self.images_paths[int(index)]
if self.mask_available:
mask_path = self.mask_paths[int(index)]
# prepare image for model by resizing and preprocessing it
image = prepare_image(
img_path,
self.cfg.PREPROCESS_DATA.RESIZE,
self.cfg.PREPROCESS_DATA.IMAGE_PREPROCESSING_TYPE,
)
if self.mask_available:
# prepare image for model by resizing and preprocessing it
mask = prepare_mask(
mask_path,
self.cfg.PREPROCESS_DATA.RESIZE,
self.cfg.PREPROCESS_DATA.NORMALIZE_MASK,
)
# numpy to tensorflow conversion
if self.mask_available:
image, mask = tf.numpy_function(
self.tf_func,
[image, mask],
[tf.float32, tf.int32]
)
else:
image = tf.numpy_function(
self.tf_func,
[image, ],
[tf.float32, ]
)
# set shape attributes which was lost during Tf conversion
image.set_shape(
[
self.cfg.INPUT.HEIGHT,
self.cfg.INPUT.WIDTH,
self.cfg.INPUT.CHANNELS
]
)
batch_images[i] = image
if self.mask_available:
# height x width --> height x width x output classes
if self.cfg.OUTPUT.CLASSES == 1:
mask = tf.expand_dims(mask, axis=-1)
else:
# convert mask into one hot vectors
mask = tf.one_hot(
mask,
self.cfg.OUTPUT.CLASSES,
dtype=tf.int32
)
mask.set_shape(
[
self.cfg.INPUT.HEIGHT,
self.cfg.INPUT.WIDTH,
self.cfg.OUTPUT.CLASSES
]
)
batch_masks[i] = mask
if self.mask_available:
return batch_images, batch_masks
else:
return batch_images,
@staticmethod
def tf_func(*args):
return args
|
5b383d3f8fd40b1e362149e32a8e19f89301a5b3
|
157d84f8aafc76ba9ea0dbbf08ede744966b4250
|
/tests/integration/cattletest/core/test_user_preferences.py
|
4b79b521f7f1923aafcbe7787fa9015b0d87a7ca
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
rancher/cattle
|
81d165a0339a41950561fe534c7529ec74203c56
|
82d154a53f4089fecfb9f320caad826bb4f6055f
|
refs/heads/v1.6
| 2023-08-27T20:19:31.989806
| 2020-05-01T18:15:55
| 2020-05-01T20:11:28
| 18,023,059
| 487
| 233
|
Apache-2.0
| 2022-01-03T18:07:33
| 2014-03-23T00:19:52
|
Java
|
UTF-8
|
Python
| false
| false
| 3,604
|
py
|
test_user_preferences.py
|
from common_fixtures import * # NOQA
from gdapi import ApiError
@pytest.fixture(scope='module')
def user_client(context):
return context.user_client
def _user_preference(client, name=None):
if name is None:
name = random_str()
preference = client.wait_success(client.create_user_preference(
name=name, value=random_str()))
got_preference = client.by_id('userPreference', preference.id)
assert preference.id == got_preference.id
assert name == got_preference.name
assert preference.value == got_preference.value
return got_preference
def make_prefs(client):
pref_ids = []
for x in range(0, 5):
pref_ids.append(
_user_preference(client, name=random_str()).id)
return set(pref_ids)
def get_prefs_ids(client, all=False):
pref_ids = []
for pref in client.list_user_preference(all=all):
pref_ids.append(pref.id)
return set(pref_ids)
def test_create_user_preference(user_client):
_user_preference(user_client)
def test_delete_user_preference(user_client):
preference = _user_preference(user_client)
preference = user_client.wait_success(preference.deactivate())
preference = user_client.wait_success(preference.remove())
preference = user_client.wait_success(preference.purge())
preference = user_client.by_id('userPreference', preference.id)
assert preference.state == 'purged'
preference = _user_preference(user_client)
preference = user_client.wait_success(preference.remove())
assert preference.state == 'removed'
preference = user_client.wait_success(preference.purge())
assert preference.state == 'purged'
def test_update_user_preference(user_client):
preference = _user_preference(user_client)
new_value = random_str()
user_client.update(preference, value=new_value)
got_preference = user_client.by_id('userPreference', preference.id)
assert got_preference.value == new_value
def test_update_user_preference_pass_name(user_client):
preference = _user_preference(user_client)
new_value = random_str()
user_client.update(preference, name=preference.name, value=new_value)
got_preference = user_client.by_id('userPreference', preference.id)
assert got_preference.value == new_value
def test_unique_user_preference(user_client, new_context):
rand_str = random_str()
_user_preference(user_client, name=rand_str)
with pytest.raises(ApiError) as e:
_user_preference(user_client, name=rand_str)
assert e.value.error.status == 422
_user_preference(new_context.user_client, name=rand_str)
with pytest.raises(ApiError) as e:
_user_preference(new_context.user_client, name=rand_str)
assert e.value.error.status == 422
def test_all_filter_user_preference(admin_user_client, request):
ctx1 = new_context(admin_user_client, request)
ctx2 = new_context(admin_user_client, request)
ctx1_prefs = make_prefs(ctx1.user_client)
ctx2_prefs = make_prefs(ctx2.user_client)
got_ctx1_prefs = get_prefs_ids(ctx1.user_client)
got_ctx2_prefs = get_prefs_ids(ctx2.user_client)
assert len(ctx1_prefs & got_ctx1_prefs) == len(ctx1_prefs)
assert len(ctx2_prefs & got_ctx2_prefs) == len(ctx2_prefs)
assert len(got_ctx1_prefs & got_ctx2_prefs) == 0
admin_prefs = get_prefs_ids(admin_user_client)
all_prefs = get_prefs_ids(admin_user_client, all=True)
assert len(admin_prefs) != len(all_prefs)
assert admin_prefs <= all_prefs
assert ctx1_prefs | ctx2_prefs <= all_prefs
assert len((ctx1_prefs | ctx2_prefs) & admin_prefs) == 0
|
0e1b7b26289fc6202093f7ca343050293a2e06e0
|
f16cf8b9123fcc03fa985285e97a46bbc2904eaa
|
/detectron/modeling/rfcn_heads.py
|
5b54addfcba549cae3b0e744e81237753753d7cb
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
facebookresearch/Detectron
|
0b9fd0ece5819d62c8ad6f5caa0f7322749eca26
|
2246d34b47b59d80ad4ef3df9c2964e6300a0740
|
refs/heads/main
| 2023-09-04T00:21:36.128859
| 2023-08-25T22:10:24
| 2023-08-25T22:10:24
| 105,919,803
| 28,477
| 6,452
|
Apache-2.0
| 2023-05-25T06:01:04
| 2017-10-05T17:32:00
|
Python
|
UTF-8
|
Python
| false
| false
| 3,265
|
py
|
rfcn_heads.py
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from detectron.core.config import cfg
from detectron.utils.c2 import const_fill
from detectron.utils.c2 import gauss_fill
# ---------------------------------------------------------------------------- #
# R-FCN outputs and losses
# ---------------------------------------------------------------------------- #
def add_rfcn_outputs(model, blob_in, dim_in, dim_reduce, spatial_scale):
if dim_reduce is not None:
# Optional dim reduction
blob_in = model.Conv(
blob_in,
'conv_dim_reduce',
dim_in,
dim_reduce,
kernel=1,
pad=0,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
blob_in = model.Relu(blob_in, blob_in)
dim_in = dim_reduce
# Classification conv
model.Conv(
blob_in,
'conv_cls',
dim_in,
model.num_classes * cfg.RFCN.PS_GRID_SIZE**2,
kernel=1,
pad=0,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
# Bounding-box regression conv
num_bbox_reg_classes = (
2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else model.num_classes
)
model.Conv(
blob_in,
'conv_bbox_pred',
dim_in,
4 * num_bbox_reg_classes * cfg.RFCN.PS_GRID_SIZE**2,
kernel=1,
pad=0,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
# Classification PS RoI pooling
model.net.PSRoIPool(
['conv_cls', 'rois'], ['psroipooled_cls', '_mapping_channel_cls'],
group_size=cfg.RFCN.PS_GRID_SIZE,
output_dim=model.num_classes,
spatial_scale=spatial_scale
)
model.AveragePool(
'psroipooled_cls', 'cls_score_4d', kernel=cfg.RFCN.PS_GRID_SIZE
)
model.net.Reshape(
'cls_score_4d', ['cls_score', '_cls_scores_shape'],
shape=(-1, cfg.MODEL.NUM_CLASSES)
)
if not model.train:
model.Softmax('cls_score', 'cls_prob', engine='CUDNN')
# Bbox regression PS RoI pooling
model.net.PSRoIPool(
['conv_bbox_pred', 'rois'],
['psroipooled_bbox', '_mapping_channel_bbox'],
group_size=cfg.RFCN.PS_GRID_SIZE,
output_dim=4 * num_bbox_reg_classes,
spatial_scale=spatial_scale
)
model.AveragePool(
'psroipooled_bbox', 'bbox_pred', kernel=cfg.RFCN.PS_GRID_SIZE
)
|
8ebb0fcbcf19303cf900659e2d91c263798cb230
|
afbae26b958b5ef20548402a65002dcc8e55b66a
|
/release/stubs.min/Autodesk/Revit/DB/__init___parts/IntersectionResultArray.py
|
9b1ad2e41c320de91858ad8aa5750d97fcfeb4c5
|
[
"MIT"
] |
permissive
|
gtalarico/ironpython-stubs
|
d875cb8932c7644f807dc6fde9dd513d159e4f5c
|
c7f6a6cb197e3949e40a4880a0b2a44e72d0a940
|
refs/heads/master
| 2023-07-12T01:43:47.295560
| 2022-05-23T18:12:06
| 2022-05-23T18:12:06
| 95,340,553
| 235
| 88
|
NOASSERTION
| 2023-07-05T06:36:28
| 2017-06-25T05:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,696
|
py
|
IntersectionResultArray.py
|
class IntersectionResultArray(APIObject,IDisposable,IEnumerable):
"""
An array that can contain any type of object.
IntersectionResultArray()
"""
def Append(self,item):
""" Append(self: IntersectionResultArray,item: IntersectionResult) """
pass
def Clear(self):
"""
Clear(self: IntersectionResultArray)
Removes every item from the array,rendering it empty.
"""
pass
def Dispose(self):
""" Dispose(self: IntersectionResultArray,A_0: bool) """
pass
def ForwardIterator(self):
"""
ForwardIterator(self: IntersectionResultArray) -> IntersectionResultArrayIterator
Retrieve a forward moving iterator to the array.
Returns: Returns a forward moving iterator to the array.
"""
pass
def GetEnumerator(self):
"""
GetEnumerator(self: IntersectionResultArray) -> IEnumerator
Retrieve a forward moving iterator to the array.
Returns: Returns a forward moving iterator to the array.
"""
pass
def Insert(self,item,index):
""" Insert(self: IntersectionResultArray,item: IntersectionResult,index: int) """
pass
def ReleaseManagedResources(self,*args):
""" ReleaseManagedResources(self: APIObject) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: APIObject) """
pass
def ReverseIterator(self):
"""
ReverseIterator(self: IntersectionResultArray) -> IntersectionResultArrayIterator
Retrieve a backward moving iterator to the array.
Returns: Returns a backward moving iterator to the array.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]= """
pass
IsEmpty=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Test to see if the array is empty.
Get: IsEmpty(self: IntersectionResultArray) -> bool
"""
Size=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the number of objects that are in the array.
Get: Size(self: IntersectionResultArray) -> int
"""
|
3c2793c302080d6ef6305de497af835df42e5221
|
04963e5a7a49e4d938a652e96953fadcd21a2ef7
|
/tmrl/custom/custom_memories.py
|
01c3880c031219d56ecd0f745a74436d83efcd49
|
[
"MIT"
] |
permissive
|
trackmania-rl/tmrl
|
efe961224456c1553c6340b2daf7eddb8d4d6525
|
e3254888ae7e865c56236d72fb03311a41c10310
|
refs/heads/master
| 2023-07-27T08:27:27.541628
| 2023-07-23T20:34:10
| 2023-07-23T20:34:10
| 277,973,609
| 287
| 31
|
MIT
| 2023-07-18T08:56:59
| 2020-07-08T02:49:55
|
Python
|
UTF-8
|
Python
| false
| false
| 19,911
|
py
|
custom_memories.py
|
# third-party imports
import numpy as np
import cv2
# local imports
from tmrl.memory import TorchMemory
# LOCAL BUFFER COMPRESSION ==============================
def get_local_buffer_sample_lidar(prev_act, obs, rew, terminated, truncated, info):
"""
Input:
prev_act: action computed from a previous observation and applied to yield obs in the transition (but not influencing the unaugmented observation in real-time envs)
obs, rew, terminated, truncated, info: outcome of the transition
this function creates the object that will actually be stored in local buffers for networking
this is to compress the sample before sending it over the Internet/local network
buffers of such samples will be given as input to the append() method of the memory
the user must define both this function and the append() method of the memory
CAUTION: prev_act is the action that comes BEFORE obs (i.e. prev_obs, prev_act(prev_obs), obs(prev_act))
"""
obs_mod = (obs[0], obs[1][-19:]) # speed and most recent LIDAR only
rew_mod = np.float32(rew)
terminated_mod = terminated
truncated_mod = truncated
return prev_act, obs_mod, rew_mod, terminated_mod, truncated_mod, info
def get_local_buffer_sample_lidar_progress(prev_act, obs, rew, terminated, truncated, info):
"""
Input:
prev_act: action computed from a previous observation and applied to yield obs in the transition (but not influencing the unaugmented observation in real-time envs)
obs, rew, terminated, truncated, info: outcome of the transition
this function creates the object that will actually be stored in local buffers for networking
this is to compress the sample before sending it over the Internet/local network
buffers of such samples will be given as input to the append() method of the memory
the user must define both this function and the append() method of the memory
CAUTION: prev_act is the action that comes BEFORE obs (i.e. prev_obs, prev_act(prev_obs), obs(prev_act))
"""
obs_mod = (obs[0], obs[1], obs[2][-19:]) # speed and most recent LIDAR only
rew_mod = np.float32(rew)
terminated_mod = terminated
truncated_mod = truncated
return prev_act, obs_mod, rew_mod, terminated_mod, truncated_mod, info
def get_local_buffer_sample_tm20_imgs(prev_act, obs, rew, terminated, truncated, info):
"""
Sample compressor for MemoryTMFull
Input:
prev_act: action computed from a previous observation and applied to yield obs in the transition
obs, rew, terminated, truncated, info: outcome of the transition
this function creates the object that will actually be stored in local buffers for networking
this is to compress the sample before sending it over the Internet/local network
buffers of such samples will be given as input to the append() method of the memory
the user must define both this function and the append() method of the memory
CAUTION: prev_act is the action that comes BEFORE obs (i.e. prev_obs, prev_act(prev_obs), obs(prev_act))
"""
prev_act_mod = prev_act
obs_mod = (obs[0], obs[1], obs[2], (obs[3][-1] * 256.0).astype(np.uint8))
rew_mod = rew
terminated_mod = terminated
truncated_mod = truncated
info_mod = info
return prev_act_mod, obs_mod, rew_mod, terminated_mod, truncated_mod, info_mod
# FUNCTIONS ====================================================
def last_true_in_list(li):
for i in reversed(range(len(li))):
if li[i]:
return i
return None
def replace_hist_before_eoe(hist, eoe_idx_in_hist):
"""
Pads the history hist before the End Of Episode (EOE) index.
Previous entries in hist are padded with copies of the first element occurring after EOE.
"""
last_idx = len(hist) - 1
assert eoe_idx_in_hist <= last_idx, f"replace_hist_before_eoe: eoe_idx_in_hist:{eoe_idx_in_hist}, last_idx:{last_idx}"
if 0 <= eoe_idx_in_hist < last_idx:
for i in reversed(range(len(hist))):
if i <= eoe_idx_in_hist:
hist[i] = hist[i + 1]
# SUPPORTED CUSTOM MEMORIES ============================================================================================
class MemoryTM(TorchMemory):
def __init__(self,
memory_size=None,
batch_size=None,
dataset_path="",
imgs_obs=4,
act_buf_len=1,
nb_steps=1,
sample_preprocessor: callable = None,
crc_debug=False,
device="cpu"):
self.imgs_obs = imgs_obs
self.act_buf_len = act_buf_len
self.min_samples = max(self.imgs_obs, self.act_buf_len)
self.start_imgs_offset = max(0, self.min_samples - self.imgs_obs)
self.start_acts_offset = max(0, self.min_samples - self.act_buf_len)
super().__init__(memory_size=memory_size,
batch_size=batch_size,
dataset_path=dataset_path,
nb_steps=nb_steps,
sample_preprocessor=sample_preprocessor,
crc_debug=crc_debug,
device=device)
def append_buffer(self, buffer):
raise NotImplementedError
def __len__(self):
if len(self.data) == 0:
return 0
res = len(self.data[0]) - self.min_samples - 1
if res < 0:
return 0
else:
return res
def get_transition(self, item):
raise NotImplementedError
class MemoryTMLidar(MemoryTM):
def get_transition(self, item):
"""
CAUTION: item is the first index of the 4 images in the images history of the OLD observation
CAUTION: in the buffer, a sample is (act, obs(act)) and NOT (obs, act(obs))
i.e. in a sample, the observation is what step returned after being fed act (and preprocessed)
therefore, in the RTRL setting, act is appended to obs
So we load 5 images from here...
Don't forget the info dict for CRC debugging
"""
idx_last = item + self.min_samples - 1
idx_now = item + self.min_samples
acts = self.load_acts(item)
last_act_buf = acts[:-1]
new_act_buf = acts[1:]
imgs = self.load_imgs(item)
imgs_last_obs = imgs[:-1]
imgs_new_obs = imgs[1:]
# if a reset transition has influenced the observation, special care must be taken
last_eoes = self.data[4][idx_now - self.min_samples:idx_now] # self.min_samples values
last_eoe_idx = last_true_in_list(last_eoes) # last occurrence of True
assert last_eoe_idx is None or last_eoes[last_eoe_idx], f"last_eoe_idx:{last_eoe_idx}"
if last_eoe_idx is not None:
replace_hist_before_eoe(hist=new_act_buf, eoe_idx_in_hist=last_eoe_idx - self.start_acts_offset - 1)
replace_hist_before_eoe(hist=last_act_buf, eoe_idx_in_hist=last_eoe_idx - self.start_acts_offset)
replace_hist_before_eoe(hist=imgs_new_obs, eoe_idx_in_hist=last_eoe_idx - self.start_imgs_offset - 1)
replace_hist_before_eoe(hist=imgs_last_obs, eoe_idx_in_hist=last_eoe_idx - self.start_imgs_offset)
imgs_new_obs = np.ndarray.flatten(imgs_new_obs)
imgs_last_obs = np.ndarray.flatten(imgs_last_obs)
last_obs = (self.data[2][idx_last], imgs_last_obs, *last_act_buf)
new_act = self.data[1][idx_now]
rew = np.float32(self.data[5][idx_now])
new_obs = (self.data[2][idx_now], imgs_new_obs, *new_act_buf)
terminated = self.data[7][idx_now]
truncated = self.data[8][idx_now]
info = self.data[6][idx_now]
return last_obs, new_act, rew, new_obs, terminated, truncated, info
def load_imgs(self, item):
res = self.data[3][(item + self.start_imgs_offset):(item + self.start_imgs_offset + self.imgs_obs + 1)]
return np.stack(res)
def load_acts(self, item):
res = self.data[1][(item + self.start_acts_offset):(item + self.start_acts_offset + self.act_buf_len + 1)]
return res
def append_buffer(self, buffer):
"""
buffer is a list of samples (act, obs, rew, terminated, truncated, info)
don't forget to keep the info dictionary in the sample for CRC debugging
"""
first_data_idx = self.data[0][-1] + 1 if self.__len__() > 0 else 0
d0 = [first_data_idx + i for i, _ in enumerate(buffer.memory)] # indexes
d1 = [b[0] for b in buffer.memory] # actions
d2 = [b[1][0] for b in buffer.memory] # speeds
d3 = [b[1][1] for b in buffer.memory] # lidar
d4 = [b[3] or b[4] for b in buffer.memory] # eoes (terminated or truncated)
d5 = [b[2] for b in buffer.memory] # rewards
d6 = [b[5] for b in buffer.memory] # infos
d7 = [b[3] for b in buffer.memory] # terminated
d8 = [b[4] for b in buffer.memory] # truncated
if self.__len__() > 0:
self.data[0] += d0
self.data[1] += d1
self.data[2] += d2
self.data[3] += d3
self.data[4] += d4
self.data[5] += d5
self.data[6] += d6
self.data[7] += d7
self.data[8] += d8
else:
self.data.append(d0)
self.data.append(d1)
self.data.append(d2)
self.data.append(d3)
self.data.append(d4)
self.data.append(d5)
self.data.append(d6)
self.data.append(d7)
self.data.append(d8)
to_trim = self.__len__() - self.memory_size
if to_trim > 0:
self.data[0] = self.data[0][to_trim:]
self.data[1] = self.data[1][to_trim:]
self.data[2] = self.data[2][to_trim:]
self.data[3] = self.data[3][to_trim:]
self.data[4] = self.data[4][to_trim:]
self.data[5] = self.data[5][to_trim:]
self.data[6] = self.data[6][to_trim:]
self.data[7] = self.data[7][to_trim:]
self.data[8] = self.data[8][to_trim:]
return self
class MemoryTMLidarProgress(MemoryTM):
def get_transition(self, item):
"""
CAUTION: item is the first index of the 4 images in the images history of the OLD observation
CAUTION: in the buffer, a sample is (act, obs(act)) and NOT (obs, act(obs))
i.e. in a sample, the observation is what step returned after being fed act (and preprocessed)
therefore, in the RTRL setting, act is appended to obs
So we load 5 images from here...
Don't forget the info dict for CRC debugging
"""
idx_last = item + self.min_samples - 1
idx_now = item + self.min_samples
acts = self.load_acts(item)
last_act_buf = acts[:-1]
new_act_buf = acts[1:]
imgs = self.load_imgs(item)
imgs_last_obs = imgs[:-1]
imgs_new_obs = imgs[1:]
# if a reset transition has influenced the observation, special care must be taken
last_eoes = self.data[4][idx_now - self.min_samples:idx_now] # self.min_samples values
last_eoe_idx = last_true_in_list(last_eoes) # last occurrence of True
assert last_eoe_idx is None or last_eoes[last_eoe_idx], f"last_eoe_idx:{last_eoe_idx}"
if last_eoe_idx is not None:
replace_hist_before_eoe(hist=new_act_buf, eoe_idx_in_hist=last_eoe_idx - self.start_acts_offset - 1)
replace_hist_before_eoe(hist=last_act_buf, eoe_idx_in_hist=last_eoe_idx - self.start_acts_offset)
replace_hist_before_eoe(hist=imgs_new_obs, eoe_idx_in_hist=last_eoe_idx - self.start_imgs_offset - 1)
replace_hist_before_eoe(hist=imgs_last_obs, eoe_idx_in_hist=last_eoe_idx - self.start_imgs_offset)
imgs_new_obs = np.ndarray.flatten(imgs_new_obs)
imgs_last_obs = np.ndarray.flatten(imgs_last_obs)
last_obs = (self.data[2][idx_last], self.data[7][idx_last], imgs_last_obs, *last_act_buf)
new_act = self.data[1][idx_now]
rew = np.float32(self.data[5][idx_now])
new_obs = (self.data[2][idx_now], self.data[7][idx_now], imgs_new_obs, *new_act_buf)
terminated = self.data[8][idx_now]
truncated = self.data[9][idx_now]
info = self.data[6][idx_now]
return last_obs, new_act, rew, new_obs, terminated, truncated, info
def load_imgs(self, item):
res = self.data[3][(item + self.start_imgs_offset):(item + self.start_imgs_offset + self.imgs_obs + 1)]
return np.stack(res)
def load_acts(self, item):
res = self.data[1][(item + self.start_acts_offset):(item + self.start_acts_offset + self.act_buf_len + 1)]
return res
def append_buffer(self, buffer):
"""
buffer is a list of samples (act, obs, rew, truncated, terminated, info)
don't forget to keep the info dictionary in the sample for CRC debugging
"""
first_data_idx = self.data[0][-1] + 1 if self.__len__() > 0 else 0
d0 = [first_data_idx + i for i, _ in enumerate(buffer.memory)] # indexes
d1 = [b[0] for b in buffer.memory] # actions
d2 = [b[1][0] for b in buffer.memory] # speeds
d3 = [b[1][2] for b in buffer.memory] # lidar
d4 = [b[3] or b[4] for b in buffer.memory] # eoes
d5 = [b[2] for b in buffer.memory] # rewards
d6 = [b[5] for b in buffer.memory] # infos
d7 = [b[1][1] for b in buffer.memory] # progress
d8 = [b[3] for b in buffer.memory] # terminated
d9 = [b[4] for b in buffer.memory] # truncated
if self.__len__() > 0:
self.data[0] += d0
self.data[1] += d1
self.data[2] += d2
self.data[3] += d3
self.data[4] += d4
self.data[5] += d5
self.data[6] += d6
self.data[7] += d7
self.data[8] += d8
self.data[9] += d9
else:
self.data.append(d0)
self.data.append(d1)
self.data.append(d2)
self.data.append(d3)
self.data.append(d4)
self.data.append(d5)
self.data.append(d6)
self.data.append(d7)
self.data.append(d8)
self.data.append(d9)
to_trim = self.__len__() - self.memory_size
if to_trim > 0:
self.data[0] = self.data[0][to_trim:]
self.data[1] = self.data[1][to_trim:]
self.data[2] = self.data[2][to_trim:]
self.data[3] = self.data[3][to_trim:]
self.data[4] = self.data[4][to_trim:]
self.data[5] = self.data[5][to_trim:]
self.data[6] = self.data[6][to_trim:]
self.data[7] = self.data[7][to_trim:]
self.data[8] = self.data[8][to_trim:]
self.data[9] = self.data[9][to_trim:]
return self
class MemoryTMFull(MemoryTM):
def get_transition(self, item):
"""
CAUTION: item is the first index of the 4 images in the images history of the OLD observation
CAUTION: in the buffer, a sample is (act, obs(act)) and NOT (obs, act(obs))
i.e. in a sample, the observation is what step returned after being fed act (and preprocessed)
therefore, in the RTRL setting, act is appended to obs
So we load 5 images from here...
Don't forget the info dict for CRC debugging
"""
idx_last = item + self.min_samples - 1
idx_now = item + self.min_samples
acts = self.load_acts(item)
last_act_buf = acts[:-1]
new_act_buf = acts[1:]
imgs = self.load_imgs(item)
imgs_last_obs = imgs[:-1]
imgs_new_obs = imgs[1:]
# if a reset transition has influenced the observation, special care must be taken
last_eoes = self.data[4][idx_now - self.min_samples:idx_now] # self.min_samples values
last_eoe_idx = last_true_in_list(last_eoes) # last occurrence of True
assert last_eoe_idx is None or last_eoes[last_eoe_idx], f"last_eoe_idx:{last_eoe_idx}"
if last_eoe_idx is not None:
replace_hist_before_eoe(hist=new_act_buf, eoe_idx_in_hist=last_eoe_idx - self.start_acts_offset - 1)
replace_hist_before_eoe(hist=last_act_buf, eoe_idx_in_hist=last_eoe_idx - self.start_acts_offset)
replace_hist_before_eoe(hist=imgs_new_obs, eoe_idx_in_hist=last_eoe_idx - self.start_imgs_offset - 1)
replace_hist_before_eoe(hist=imgs_last_obs, eoe_idx_in_hist=last_eoe_idx - self.start_imgs_offset)
last_obs = (self.data[2][idx_last], self.data[7][idx_last], self.data[8][idx_last], imgs_last_obs, *last_act_buf)
new_act = self.data[1][idx_now]
rew = np.float32(self.data[5][idx_now])
new_obs = (self.data[2][idx_now], self.data[7][idx_now], self.data[8][idx_now], imgs_new_obs, *new_act_buf)
terminated = self.data[9][idx_now]
truncated = self.data[10][idx_now]
info = self.data[6][idx_now]
return last_obs, new_act, rew, new_obs, terminated, truncated, info
def load_imgs(self, item):
res = self.data[3][(item + self.start_imgs_offset):(item + self.start_imgs_offset + self.imgs_obs + 1)]
return np.stack(res).astype(np.float32) / 256.0
def load_acts(self, item):
res = self.data[1][(item + self.start_acts_offset):(item + self.start_acts_offset + self.act_buf_len + 1)]
return res
def append_buffer(self, buffer):
"""
buffer is a list of samples ( act, obs, rew, terminated, truncated, info)
don't forget to keep the info dictionary in the sample for CRC debugging
"""
first_data_idx = self.data[0][-1] + 1 if self.__len__() > 0 else 0
d0 = [first_data_idx + i for i, _ in enumerate(buffer.memory)] # indexes
d1 = [b[0] for b in buffer.memory] # actions
d2 = [b[1][0] for b in buffer.memory] # speeds
d3 = [b[1][3] for b in buffer.memory] # images
d4 = [b[3] or b[4] for b in buffer.memory] # eoes
d5 = [b[2] for b in buffer.memory] # rewards
d6 = [b[5] for b in buffer.memory] # infos
d7 = [b[1][1] for b in buffer.memory] # gears
d8 = [b[1][2] for b in buffer.memory] # rpms
d9 = [b[3] for b in buffer.memory] # terminated
d10 = [b[4] for b in buffer.memory] # truncated
if self.__len__() > 0:
self.data[0] += d0
self.data[1] += d1
self.data[2] += d2
self.data[3] += d3
self.data[4] += d4
self.data[5] += d5
self.data[6] += d6
self.data[7] += d7
self.data[8] += d8
self.data[9] += d9
self.data[10] += d10
else:
self.data.append(d0)
self.data.append(d1)
self.data.append(d2)
self.data.append(d3)
self.data.append(d4)
self.data.append(d5)
self.data.append(d6)
self.data.append(d7)
self.data.append(d8)
self.data.append(d9)
self.data.append(d10)
to_trim = self.__len__() - self.memory_size
if to_trim > 0:
self.data[0] = self.data[0][to_trim:]
self.data[1] = self.data[1][to_trim:]
self.data[2] = self.data[2][to_trim:]
self.data[3] = self.data[3][to_trim:]
self.data[4] = self.data[4][to_trim:]
self.data[5] = self.data[5][to_trim:]
self.data[6] = self.data[6][to_trim:]
self.data[7] = self.data[7][to_trim:]
self.data[8] = self.data[8][to_trim:]
self.data[9] = self.data[9][to_trim:]
self.data[10] = self.data[10][to_trim:]
return self
|
c900b2c5af6ea06db7502bafe0d5e4a47742b5a5
|
bbd69601912a3361d788efd03a47f9d4e3bac09e
|
/docs/sphinx/rest_substitutions/snippets/python/converted/wx.xml.XmlDocument.2.py
|
0b7377588bdec7f487281ddd6f9f6e13346a177c
|
[] |
no_license
|
wxWidgets/Phoenix
|
56929484460a0399a8f1d9582bc77c20aa14748d
|
a1184286703cf24c4b88e5bc14cf2979c1b1ea00
|
refs/heads/master
| 2023-09-01T07:10:17.437093
| 2023-08-31T05:38:01
| 2023-08-31T05:38:01
| 5,078,061
| 2,268
| 677
| null | 2023-09-09T17:06:59
| 2012-07-17T06:22:25
|
Python
|
UTF-8
|
Python
| false
| false
| 247
|
py
|
wx.xml.XmlDocument.2.py
|
doc = wx.xml.XmlDocument()
doc.Load("myfile.xml", "UTF-8", wx.xml.XMLDOC_KEEP_WHITESPACE_NODES)
# myfile2.xml will be identical to myfile.xml saving it self way:
doc.Save("myfile2.xml", wx.xml.XML_NO_INDENTATION)
|
08c6468b5f7ceaef9c0a11375bb7ddf6e8f00d94
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/motech/fhir/admin.py
|
f9be833356fc2cebbaa82b8a7fa528cb27e61c9d
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 3,276
|
py
|
admin.py
|
import json
from django.contrib import admin
from corehq.motech.fhir.models import (
FHIRImportConfig,
FHIRImportResourceProperty,
FHIRImportResourceType,
FHIRResourceProperty,
FHIRResourceType,
ResourceTypeRelationship,
)
class FHIRResourcePropertyInline(admin.TabularInline):
model = FHIRResourceProperty
verbose_name_plural = 'FHIR resource properties'
fields = ('calculated_value_source', 'value_source_config',)
readonly_fields = ('calculated_value_source',)
def calculated_value_source(self, obj):
if not (obj.case_property and obj.jsonpath):
return ''
value_source_config = {
'case_property': obj.case_property.name,
'jsonpath': obj.jsonpath,
}
if obj.value_map:
value_source_config['value_map'] = obj.value_map
return json.dumps(value_source_config, indent=2)
class FHIRResourceTypeAdmin(admin.ModelAdmin):
model = FHIRResourceType
list_display = (
'domain',
'name',
'case_type',
)
list_display_links = (
'domain',
'name',
'case_type',
)
list_filter = ('domain',)
# Allows for creating resource properties without having to deal
# with domains.
inlines = [FHIRResourcePropertyInline]
def has_add_permission(self, request):
# Domains are difficult to manage with this interface. Create
# using the Data Dictionary, and edit in Admin.
return False
class FHIRImportConfigAdmin(admin.ModelAdmin):
list_display = (
'domain',
'connection_settings',
'frequency',
)
list_display_links = (
'domain',
'connection_settings',
'frequency',
)
list_filter = ('domain',)
list_select_related = ('connection_settings',)
class FHIRImportResourcePropertyInline(admin.TabularInline):
model = FHIRImportResourceProperty
verbose_name_plural = 'FHIR Importer resource properties'
fields = ('value_source_config',)
class FHIRImportResourceTypeAdmin(admin.ModelAdmin):
model = FHIRImportResourceType
list_display = (
'domain',
'name',
'case_type',
)
list_display_links = (
'domain',
'name',
'case_type',
)
list_filter = ('import_config__domain',)
list_select_related = ('import_config',)
inlines = [FHIRImportResourcePropertyInline]
def domain(self, obj):
return obj.import_config.domain
class ResourceTypeRelationshipAdmin(admin.ModelAdmin):
model = ResourceTypeRelationship
list_display = (
'domain',
'resource_type',
'related_resource_type',
)
list_display_links = (
'domain',
'resource_type',
'related_resource_type',
)
list_filter = ('resource_type__import_config__domain',)
list_select_related = ('resource_type__import_config',)
def domain(self, obj):
return obj.resource_type.domain
admin.site.register(FHIRResourceType, FHIRResourceTypeAdmin)
admin.site.register(FHIRImportConfig, FHIRImportConfigAdmin)
admin.site.register(FHIRImportResourceType, FHIRImportResourceTypeAdmin)
admin.site.register(ResourceTypeRelationship, ResourceTypeRelationshipAdmin)
|
4f89070bd88ddcba1b89a0e5571571dc277fd9ee
|
10cb11f83e1c8b51b9d72c28d6259a56ff1a97c8
|
/samcli/lib/iac/exceptions.py
|
ed84517f6e583a3cb14b6f4dcfa8a540f3bea50d
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] |
permissive
|
aws/aws-sam-cli
|
6d4411aacf7f861e75e5cf4882a32858797a276d
|
b297ff015f2b69d7c74059c2d42ece1c29ea73ee
|
refs/heads/develop
| 2023-08-30T23:28:36.179932
| 2023-08-30T21:58:26
| 2023-08-30T21:58:26
| 92,205,085
| 1,402
| 470
|
Apache-2.0
| 2023-09-14T21:14:23
| 2017-05-23T18:16:23
|
Python
|
UTF-8
|
Python
| false
| false
| 542
|
py
|
exceptions.py
|
"""
IaC Exceptions
"""
from typing import Optional
from samcli.commands.exceptions import UserException
class InvalidIaCPluginException(UserException):
def __init__(self, files: Optional[list] = None):
if files is None:
files = []
msg = "Could not determine the plugin type from the provided files:\n\n{files}"
UserException.__init__(self, msg.format(files=", ".join(files)))
class InvalidProjectTypeException(UserException):
def __init__(self, msg):
UserException.__init__(self, msg)
|
4ce1c71d46e791c499f87df45c0a5630db92e50e
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/blink/web_tests/wpt_internal/prerender/PRESUBMIT.py
|
a289442b61f38b14d16432f03504819b27a1318d
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"MIT",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 1,733
|
py
|
PRESUBMIT.py
|
# Copyright 2021 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for prerender in Web Platform Tests.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import os
def _CheckFileTimeoutMetaTags(f):
"""Checks if the given file has timeout meta tags."""
new_contents = f.NewContents()
for line in new_contents:
if 'name="timeout" content="long"' in line:
return True
return False
def _CheckTimeoutMetaTags(input_api, output_api):
""" This function ensures that all WPTs for prerendering have meta tags
to lengthen test timeout as some tests can possibly run out of time
on windows platform.
"""
results = []
def file_filter(f):
return (f.LocalPath().endswith(('html'))
and (os.path.join('resources', '') not in f.LocalPath()))
for f in input_api.AffectedFiles(include_deletes=False,
file_filter=file_filter):
if not _CheckFileTimeoutMetaTags(f):
results.append(
output_api.PresubmitError(
('Missing long timeout. '
'Add `<meta name="timeout" content="long">` to %s.') %
f.LocalPath()))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CheckTimeoutMetaTags(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CheckTimeoutMetaTags(input_api, output_api))
return results
|
fe31e98d6102914b2601b7ee01917bf01fd49e0d
|
c50ee4849d767a4d94a4472d839dfdba790b2493
|
/client/bindings/nodejs/binding.gyp
|
2cb6226f2d34b280fd66e045c95d062d450cde76
|
[
"Apache-2.0"
] |
permissive
|
iotaledger/iota.rs
|
1c18aa48a54d033511e0fa14b1037f72e87412b4
|
e906ef3737f8c2b9ec910e466ec1d1c3be9a3b14
|
refs/heads/develop
| 2023-08-31T09:31:51.068322
| 2023-08-23T07:37:28
| 2023-08-23T07:37:28
| 135,950,775
| 222
| 143
|
Apache-2.0
| 2023-08-23T07:37:29
| 2018-06-04T00:12:51
|
Rust
|
UTF-8
|
Python
| false
| false
| 176
|
gyp
|
binding.gyp
|
{
"targets": [
{
"target_name": "index",
'defines': [
"NAPI_VERSION=<(napi_build_version)",
],
"win_delay_load_hook": "true",
}
]
}
|
b84cfafdef6297420a8bf42ac99f6b53094e4ba6
|
4198532417feb21d023d7ba525a74555186b3e89
|
/trailscraper/iam.py
|
ddf58bbfd1b5b56c855046a2c696a280f536edff
|
[
"Apache-2.0"
] |
permissive
|
flosell/trailscraper
|
375207753c668c78e166381e7e086d1adb950482
|
b9edb1483ca8e9ce6ac8540a63334c37abd6242d
|
refs/heads/master
| 2023-09-04T07:27:52.593647
| 2023-09-04T01:34:25
| 2023-09-04T01:40:20
| 112,079,872
| 726
| 32
|
Apache-2.0
| 2023-09-11T01:57:25
| 2017-11-26T12:15:52
|
Python
|
UTF-8
|
Python
| false
| false
| 6,054
|
py
|
iam.py
|
"""Classes to deal with IAM Policies"""
import json
import os
import re
import six
from toolz import pipe
from toolz.curried import groupby as groupbyz
from toolz.curried import map as mapz
BASE_ACTION_PREFIXES = ["Describe", "Create", "Delete", "Update", "Detach", "Attach", "List", "Put", "Get", ]
# pylint: disable=invalid-name
class BaseElement:
"""Base Class for all IAM Policy classes"""
def json_repr(self):
"""JSON representation of the class"""
raise NotImplementedError
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.json_repr() == other.json_repr()
return False
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.json_repr())
def __repr__(self):
return str(self.json_repr())
class Action(BaseElement):
"""Action in an IAM Policy."""
def __init__(self, prefix, action):
self.action = action
self.prefix = prefix
def json_repr(self):
return ':'.join([self.prefix, self.action])
def _base_action(self):
without_prefix = self.action
for prefix in BASE_ACTION_PREFIXES:
without_prefix = re.sub(prefix, "", without_prefix)
without_plural = re.sub(r"s$", "", without_prefix)
return without_plural
def matching_actions(self, allowed_prefixes):
"""Return a matching create action for this Action"""
if not allowed_prefixes:
allowed_prefixes = BASE_ACTION_PREFIXES
potential_matches = [Action(prefix=self.prefix, action=action_prefix + self._base_action())
for action_prefix in allowed_prefixes]
potential_matches += [Action(prefix=self.prefix, action=action_prefix + self._base_action() + "s")
for action_prefix in allowed_prefixes]
return [potential_match
for potential_match in potential_matches
if potential_match in known_iam_actions(self.prefix) and potential_match != self]
class Statement(BaseElement):
"""Statement in an IAM Policy."""
def __init__(self, Action, Effect, Resource): # pylint: disable=redefined-outer-name
self.Action = Action # pylint: disable=invalid-name
self.Effect = Effect # pylint: disable=invalid-name
self.Resource = Resource # pylint: disable=invalid-name
def json_repr(self):
return {
'Action': self.Action,
'Effect': self.Effect,
'Resource': self.Resource,
}
def merge(self, other):
"""Merge two statements into one."""
if self.Effect != other.Effect:
raise ValueError(f"Trying to combine two statements with differing effects: {self.Effect} {other.Effect}")
effect = self.Effect
actions = list(sorted(set(self.Action + other.Action), key=lambda action: action.json_repr()))
resources = list(sorted(set(self.Resource + other.Resource)))
return Statement(
Effect=effect,
Action=actions,
Resource=resources,
)
def __action_list_strings(self):
return "-".join([a.json_repr() for a in self.Action])
def __lt__(self, other):
if self.Effect != other.Effect:
return self.Effect < other.Effect
if self.Action != other.Action:
# pylint: disable=W0212
return self.__action_list_strings() < other.__action_list_strings()
return "".join(self.Resource) < "".join(other.Resource)
class PolicyDocument(BaseElement):
"""IAM Policy Doument."""
def __init__(self, Statement, Version="2012-10-17"): # pylint: disable=redefined-outer-name
self.Version = Version # pylint: disable=invalid-name
self.Statement = Statement # pylint: disable=invalid-name
def json_repr(self):
return {
'Version': self.Version,
'Statement': self.Statement
}
def to_json(self):
"""Render object into IAM Policy JSON"""
return json.dumps(self.json_repr(), cls=IAMJSONEncoder, indent=4, sort_keys=True)
class IAMJSONEncoder(json.JSONEncoder):
"""JSON Encoder using the json_repr functions"""
def default(self, o): # pylint: disable=method-hidden
if hasattr(o, 'json_repr'):
return o.json_repr()
return json.JSONEncoder.default(self, o)
def _parse_action(action):
parts = action.split(":")
return Action(parts[0], parts[1])
def _parse_statement(statement):
return Statement(Action=[_parse_action(action) for action in statement['Action']],
Effect=statement['Effect'],
Resource=statement['Resource'])
def _parse_statements(json_data):
# TODO: jsonData could also be dict, aka one statement; similar things happen in the rest of the policy pylint: disable=fixme
# https://github.com/flosell/iam-policy-json-to-terraform/blob/fafc231/converter/decode.go#L12-L22
return [_parse_statement(statement) for statement in json_data]
def parse_policy_document(stream):
"""Parse a stream of JSON data to a PolicyDocument object"""
if isinstance(stream, six.string_types):
json_dict = json.loads(stream)
else:
json_dict = json.load(stream)
return PolicyDocument(_parse_statements(json_dict['Statement']), Version=json_dict['Version'])
def all_known_iam_permissions():
"Return a list of all known IAM actions"
with open(os.path.join(os.path.dirname(__file__), 'known-iam-actions.txt'), encoding="UTF-8") as iam_file:
return {line.rstrip('\n') for line in iam_file.readlines()}
def known_iam_actions(prefix):
"""Return known IAM actions for a prefix, e.g. all ec2 actions"""
# This could be memoized for performance improvements
knowledge = pipe(all_known_iam_permissions(),
mapz(_parse_action),
groupbyz(lambda x: x.prefix))
return knowledge.get(prefix, [])
|
43f290e32cd5a4e88d46596e5de8a780a24cb581
|
65d613ef216e674b6063e2716855aa47b3ad777d
|
/code/tests/test_message_property.py
|
a7f9d4f15384ab3b9ca957126a9c5fe4d63f4ef0
|
[
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
dsuch/pymqi
|
c7adb7d199baa0a55265da6ac3e6866f0d2e907e
|
de161f70d71fac9380c9d5e11479d9d9fcd3873f
|
refs/heads/main
| 2023-01-20T12:06:36.844340
| 2023-01-08T04:42:31
| 2023-01-08T04:42:31
| 9,715,588
| 112
| 81
|
NOASSERTION
| 2022-10-15T14:35:17
| 2013-04-27T14:14:02
|
Python
|
UTF-8
|
Python
| false
| false
| 9,678
|
py
|
test_message_property.py
|
"""Test setting message properties and getting its values
"""
import unittest
import config
import utils
import pymqi
class TestMP(unittest.TestCase):
def setUp(self):
self.msg_prop_name = b"test_name"
self.msg_prop_value_str = "test_valuetest_valuetest_valuetest_valuetest_value"
self.msg_prop_value_bytes = b"test_valuetest_valuetest_valuetest_valuetest_value"
self.msg_prop_value_bool = True
self.msg_prop_value_int8 = -127
self.msg_prop_value_int16 = -32768
self.msg_prop_value_int32 = -2147483647
self.msg_prop_value_int64 = -9223372036854775808
self.msg_prop_value_float32 = 1.1754943508222875e-38
self.msg_prop_value_float64 = 2.2250738585072014e-308
# max length of queue names is 48 characters
self.queue_name = "{prefix}MSG.PROP.QUEUE".format(prefix=config.MQ.QUEUE.PREFIX)
self.queue_manager = config.MQ.QM.NAME
self.channel = config.MQ.QM.CHANNEL
self.host = config.MQ.QM.HOST
self.port = config.MQ.QM.PORT
self.user = config.MQ.QM.USER
self.password = config.MQ.QM.PASSWORD
self.conn_info = "{0}({1})".format(self.host, self.port)
self.qmgr = pymqi.QueueManager(None)
self.qmgr.connectTCPClient(self.queue_manager, pymqi.CD(), self.channel, self.conn_info, self.user, self.password)
self.create_queue(self.queue_name)
def tearDown(self):
"""Delete the created objects.
"""
if self.queue_name:
self.delete_queue(self.queue_name)
self.qmgr.disconnect()
def create_queue(self, queue_name):
queue_type = pymqi.CMQC.MQQT_LOCAL
max_depth = 5000
args = {pymqi.CMQC.MQCA_Q_NAME: utils.py3str2bytes(queue_name),
pymqi.CMQC.MQIA_Q_TYPE: queue_type,
pymqi.CMQC.MQIA_MAX_Q_DEPTH: max_depth,
pymqi.CMQCFC.MQIACF_REPLACE: pymqi.CMQCFC.MQRP_YES}
pcf = pymqi.PCFExecute(self.qmgr, response_wait_interval=120000)
pcf.MQCMD_CREATE_Q(args)
pcf.disconnect
def delete_queue(self, queue_name):
pcf = pymqi.PCFExecute(self.qmgr, response_wait_interval=120000)
args = {pymqi.CMQC.MQCA_Q_NAME: utils.py3str2bytes(queue_name),
pymqi.CMQCFC.MQIACF_PURGE: pymqi.CMQCFC.MQPO_YES}
pcf.MQCMD_DELETE_Q(args)
def get_value_length(self, property_type, property_value = ''):
value_length = 0
if property_type == pymqi.CMQC.MQTYPE_BOOLEAN:
value_length = 4
elif property_type == pymqi.CMQC.MQTYPE_BYTE_STRING:
value_length=len(property_value)
elif property_type == pymqi.CMQC.MQTYPE_INT8:
value_length = 1
elif property_type == pymqi.CMQC.MQTYPE_INT16:
value_length = 2
elif property_type == pymqi.CMQC.MQTYPE_INT32:
value_length = 4
elif property_type == pymqi.CMQC.MQTYPE_INT64:
value_length = 8
elif property_type == pymqi.CMQC.MQTYPE_FLOAT32:
value_length = 4
elif property_type == pymqi.CMQC.MQTYPE_FLOAT64:
value_length = 8
elif property_type == pymqi.CMQC.MQTYPE_STRING:
value_length=len(property_value)
elif property_type == pymqi.CMQC.MQTYPE_NULL:
value_length == 0
return value_length
def work_with_property(self, property_value, property_type):
messageHandle_get = None
queue_get = None
queue_put = None
try:
value_length = self.get_value_length(property_type, property_value)
cmho_put = pymqi.CMHO()
messageHandle_put = pymqi.MessageHandle(self.qmgr, cmho_put)
messageHandle_put.properties.set(self.msg_prop_name, property_value,
value_length=value_length,
property_type=property_type)
pmo = pymqi.PMO(Version=pymqi.CMQC.MQPMO_CURRENT_VERSION)
pmo.OriginalMsgHandle = messageHandle_put.msg_handle
md_put = pymqi.MD(Version=pymqi.CMQC.MQMD_CURRENT_VERSION)
queue_put = pymqi.Queue(self.qmgr, self.queue_name, pymqi.CMQC.MQOO_OUTPUT)
queue_put.put(b'', md_put, pmo)
queue_put.close()
gmo = pymqi.GMO(Version=pymqi.CMQC.MQGMO_CURRENT_VERSION)
gmo.Options = pymqi.CMQC.MQGMO_NO_WAIT | pymqi.CMQC.MQGMO_PROPERTIES_IN_HANDLE
gmo.MatchOptions = pymqi.CMQC.MQMO_MATCH_MSG_ID
cmho_get = pymqi.CMHO(Version=pymqi.CMQC.MQCMHO_CURRENT_VERSION)
messageHandle_get = pymqi.MessageHandle(self.qmgr, cmho_get)
gmo.MsgHandle = messageHandle_get.msg_handle
md_get = pymqi.MD()
md_get.MsgId = md_put.MsgId
queue_get = pymqi.Queue(self.qmgr, self.queue_name, pymqi.CMQC.MQOO_INPUT_AS_Q_DEF)
queue_get.get(None, md_get, gmo)
finally:
if queue_put:
if queue_put.get_handle():
queue_put.close()
if queue_get:
if queue_get.get_handle():
queue_get.close()
return messageHandle_get
def get_property_value(self, messageHandle_get,
property_type=pymqi.CMQC.MQTYPE_AS_SET,
value_length=None,
property_modify=False):
if not value_length:
value_length = self.get_value_length(property_type)
if property_modify:
property_name = self.msg_prop_name*2
else:
property_name = self.msg_prop_name
return messageHandle_get.properties.get(property_name,
property_type=property_type,
max_value_length=value_length)
############################################################################
#
# Real Tests start here
#
############################################################################
def test_message_properties_short(self):
messageHandle_get = self.work_with_property(self.msg_prop_value_bytes, pymqi.CMQC.MQTYPE_BYTE_STRING)
try:
messageHandle_get.properties.get(self.msg_prop_name, max_value_length=len(self.msg_prop_value_bytes)//2)
except pymqi.MQMIError as e:
self.assertEqual(e.reason, pymqi.CMQC.MQRC_PROPERTY_VALUE_TOO_BIG, e)
def test_message_properties_byte(self):
messageHandle_get = self.work_with_property(self.msg_prop_value_bytes, pymqi.CMQC.MQTYPE_BYTE_STRING)
value = messageHandle_get.properties.get(self.msg_prop_name, max_value_length=len(self.msg_prop_value_bytes))
self.assertEqual(self.msg_prop_value_bytes, value)
def test_message_properties_str(self):
messageHandle_get = self.work_with_property(self.msg_prop_value_str, pymqi.CMQC.MQTYPE_STRING)
value = messageHandle_get.properties.get(self.msg_prop_name, max_value_length=len(self.msg_prop_value_str))
self.assertEqual(self.msg_prop_value_str, value)
def test_message_properties_bool(self):
messageHandle_get = self.work_with_property(self.msg_prop_value_bool, pymqi.CMQC.MQTYPE_BOOLEAN)
value = self.get_property_value(messageHandle_get)
self.assertEqual(self.msg_prop_value_bool, value)
def test_message_properties_int8(self):
messageHandle_get = self.work_with_property(self.msg_prop_value_int8, pymqi.CMQC.MQTYPE_INT8)
value = self.get_property_value(messageHandle_get)
self.assertEqual(self.msg_prop_value_int8, value)
def test_message_properties_int16(self):
messageHandle_get = self.work_with_property(self.msg_prop_value_int16, pymqi.CMQC.MQTYPE_INT16)
value = self.get_property_value(messageHandle_get)
self.assertEqual(self.msg_prop_value_int16, value)
def test_message_properties_int32(self):
messageHandle_get = self.work_with_property(self.msg_prop_value_int32, pymqi.CMQC.MQTYPE_INT32)
value = self.get_property_value(messageHandle_get)
self.assertEqual(self.msg_prop_value_int32, value)
def test_message_properties_int64(self):
messageHandle_get = self.work_with_property(self.msg_prop_value_int64, pymqi.CMQC.MQTYPE_INT64)
value = self.get_property_value(messageHandle_get)
self.assertEqual(self.msg_prop_value_int64, value)
def test_message_properties_float32(self):
messageHandle_get = self.work_with_property(self.msg_prop_value_float32, pymqi.CMQC.MQTYPE_FLOAT32)
value = self.get_property_value(messageHandle_get)
self.assertEqual(self.msg_prop_value_float32, value)
def test_message_properties_float64(self):
messageHandle_get = self.work_with_property(self.msg_prop_value_float64, pymqi.CMQC.MQTYPE_FLOAT64)
value = self.get_property_value(messageHandle_get)
self.assertEqual(self.msg_prop_value_float64, value)
def test_message_properties_null(self):
messageHandle_get = self.work_with_property(None, pymqi.CMQC.MQTYPE_NULL)
value = self.get_property_value(messageHandle_get)
self.assertEqual(None, value)
def test_message_properties_nonexist(self):
messageHandle_get = self.work_with_property(None, pymqi.CMQC.MQTYPE_NULL)
try:
value = self.get_property_value(messageHandle_get, property_modify=True)
except pymqi.MQMIError as e:
self.assertEqual(e.reason, pymqi.CMQC.MQRC_PROPERTY_NOT_AVAILABLE, e)
if __name__ == "__main__":
unittest.main()
|
3491e8f7bf9bec1c5dd0a3371152e7604c0c41e3
|
c618bbf2719431999b1007461df0865bab60c883
|
/dali/python/nvidia/dali/plugin/pytorch.py
|
9a7b6ccb0c847f6b0c84cd3b64541058801517a6
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/DALI
|
3d0d061135d19e092647e6522046b2ff23d4ef03
|
92ebbe5c20e460050abd985acb590e6c27199517
|
refs/heads/main
| 2023-09-04T01:53:59.033608
| 2023-09-01T13:45:03
| 2023-09-01T13:45:03
| 135,768,037
| 4,851
| 648
|
Apache-2.0
| 2023-09-12T18:00:22
| 2018-06-01T22:18:01
|
C++
|
UTF-8
|
Python
| false
| false
| 37,512
|
py
|
pytorch.py
|
# Copyright (c) 2017-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.backend import TensorGPU, TensorListGPU
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
from nvidia.dali import types
from nvidia.dali.plugin.base_iterator import _DaliBaseIterator
from nvidia.dali.plugin.base_iterator import LastBatchPolicy
import torch
import torch.utils.dlpack as torch_dlpack
import ctypes
import numpy as np
to_torch_type = {
types.DALIDataType.FLOAT: torch.float32,
types.DALIDataType.FLOAT64: torch.float64,
types.DALIDataType.FLOAT16: torch.float16,
types.DALIDataType.UINT8: torch.uint8,
types.DALIDataType.INT8: torch.int8,
types.DALIDataType.BOOL: torch.bool,
types.DALIDataType.INT16: torch.int16,
types.DALIDataType.INT32: torch.int32,
types.DALIDataType.INT64: torch.int64
}
def feed_ndarray(dali_tensor, arr, cuda_stream=None):
"""
Copy contents of DALI tensor to PyTorch's Tensor.
Parameters
----------
`dali_tensor` : nvidia.dali.backend.TensorCPU or nvidia.dali.backend.TensorGPU
Tensor from which to copy
`arr` : torch.Tensor
Destination of the copy
`cuda_stream` : torch.cuda.Stream, cudaStream_t or any value that can be cast to cudaStream_t.
CUDA stream to be used for the copy
(if not provided, an internal user stream will be selected)
In most cases, using pytorch's current stream is expected (for example,
if we are copying to a tensor allocated with torch.zeros(...))
"""
dali_type = to_torch_type[dali_tensor.dtype]
assert dali_type == arr.dtype, ("The element type of DALI Tensor/TensorList"
" doesn't match the element type of the target PyTorch Tensor: "
"{} vs {}".format(dali_type, arr.dtype))
assert dali_tensor.shape() == list(arr.size()), \
("Shapes do not match: DALI tensor has size {0}, but PyTorch Tensor has size {1}".
format(dali_tensor.shape(), list(arr.size())))
cuda_stream = types._raw_cuda_stream(cuda_stream)
# turn raw int to a c void pointer
c_type_pointer = ctypes.c_void_p(arr.data_ptr())
if isinstance(dali_tensor, (TensorGPU, TensorListGPU)):
stream = None if cuda_stream is None else ctypes.c_void_p(cuda_stream)
dali_tensor.copy_to_external(c_type_pointer, stream, non_blocking=True)
else:
dali_tensor.copy_to_external(c_type_pointer)
return arr
class DALIGenericIterator(_DaliBaseIterator):
"""
General DALI iterator for PyTorch. It can return any number of
outputs from the DALI pipeline in the form of PyTorch's Tensors.
Parameters
----------
pipelines : list of nvidia.dali.Pipeline
List of pipelines to use
output_map : list of str
List of strings which maps consecutive outputs
of DALI pipelines to user specified name.
Outputs will be returned from iterator as dictionary
of those names.
Each name should be distinct
size : int, default = -1
Number of samples in the shard for the wrapped pipeline (if there is more than
one it is a sum)
Providing -1 means that the iterator will work until StopIteration is raised
from the inside of iter_setup(). The options `last_batch_policy` and
`last_batch_padded` don't work in such case. It works with only one pipeline inside
the iterator.
Mutually exclusive with `reader_name` argument
reader_name : str, default = None
Name of the reader which will be queried to the shard size, number of shards and
all other properties necessary to count properly the number of relevant and padded
samples that iterator needs to deal with. It automatically sets
`last_batch_padded` accordingly to match the reader's configuration.
auto_reset : string or bool, optional, default = False
Whether the iterator resets itself for the next epoch or it requires reset() to be
called explicitly.
It can be one of the following values:
* ``"no"``, ``False`` or ``None`` - at the end of epoch StopIteration is raised
and reset() needs to be called
* ``"yes"`` or ``"True"``- at the end of epoch StopIteration is raised but reset()
is called internally automatically
dynamic_shape : any, optional,
Parameter used only for backward compatibility.
fill_last_batch : bool, optional, default = None
**Deprecated** Please use ``last_batch_policy`` instead
Whether to fill the last batch with data up to 'self.batch_size'.
The iterator would return the first integer multiple
of self._num_gpus * self.batch_size entries which exceeds 'size'.
Setting this flag to False will cause the iterator to return
exactly 'size' entries.
last_batch_policy: optional, default = LastBatchPolicy.FILL
What to do with the last batch when there are not enough samples in the epoch
to fully fill it. See :meth:`nvidia.dali.plugin.base_iterator.LastBatchPolicy`
last_batch_padded : bool, optional, default = False
Whether the last batch provided by DALI is padded with the last sample
or it just wraps up. In the conjunction with ``last_batch_policy`` it tells
if the iterator returning last batch with data only partially filled with
data from the current epoch is dropping padding samples or samples from
the next epoch. If set to ``False`` next
epoch will end sooner as data from it was consumed but dropped. If set to
True next epoch would be the same length as the first one. For this to happen,
the option `pad_last_batch` in the reader needs to be set to True as well.
It is overwritten when `reader_name` argument is provided
prepare_first_batch : bool, optional, default = True
Whether DALI should buffer the first batch right after the creation of the iterator,
so one batch is already prepared when the iterator is prompted for the data
Example
-------
With the data set ``[1,2,3,4,5,6,7]`` and the batch size 2:
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = True -> last batch = ``[7]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = False -> last batch = ``[7]``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = True -> last batch = ``[7, 7]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = False -> last batch = ``[7, 1]``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = True -> last batch = ``[5, 6]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = False -> last batch = ``[5, 6]``,
next iteration will return ``[2, 3]``
"""
def __init__(self,
pipelines,
output_map,
size=-1,
reader_name=None,
auto_reset=False,
fill_last_batch=None,
dynamic_shape=False,
last_batch_padded=False,
last_batch_policy=LastBatchPolicy.FILL,
prepare_first_batch=True):
# check the assert first as _DaliBaseIterator would run the prefetch
assert len(set(output_map)) == len(output_map), "output_map names should be distinct"
self._output_categories = set(output_map)
self.output_map = output_map
_DaliBaseIterator.__init__(self,
pipelines,
size,
reader_name,
auto_reset,
fill_last_batch,
last_batch_padded,
last_batch_policy,
prepare_first_batch=prepare_first_batch)
self._first_batch = None
if self._prepare_first_batch:
try:
self._first_batch = DALIGenericIterator.__next__(self)
# call to `next` sets _ever_consumed to True but if we are just calling it from
# here we should set if to False again
self._ever_consumed = False
except StopIteration:
assert False, "It seems that there is no data in the pipeline. This may happen " \
"if `last_batch_policy` is set to PARTIAL and the requested batch size is " \
"greater than the shard size."
def __next__(self):
self._ever_consumed = True
if self._first_batch is not None:
batch = self._first_batch
self._first_batch = None
return batch
# Gather outputs
outputs = self._get_outputs()
data_batches = [None for i in range(self._num_gpus)]
for i in range(self._num_gpus):
dev_id = self._pipes[i].device_id
# initialize dict for all output categories
category_outputs = dict()
# segregate outputs into categories
for j, out in enumerate(outputs[i]):
category_outputs[self.output_map[j]] = out
# Change DALI TensorLists into Tensors
category_tensors = dict()
category_shapes = dict()
for category, out in category_outputs.items():
category_tensors[category] = out.as_tensor()
category_shapes[category] = category_tensors[category].shape()
category_torch_type = dict()
category_device = dict()
torch_gpu_device = None
torch_cpu_device = torch.device('cpu')
# check category and device
for category in self._output_categories:
category_torch_type[category] = to_torch_type[category_tensors[category].dtype]
if type(category_tensors[category]) is TensorGPU:
if not torch_gpu_device:
torch_gpu_device = torch.device('cuda', dev_id)
category_device[category] = torch_gpu_device
else:
category_device[category] = torch_cpu_device
pyt_tensors = dict()
for category in self._output_categories:
pyt_tensors[category] = torch.empty(category_shapes[category],
dtype=category_torch_type[category],
device=category_device[category])
data_batches[i] = pyt_tensors
# Copy data from DALI Tensors to torch tensors
for category, tensor in category_tensors.items():
if isinstance(tensor, (TensorGPU, TensorListGPU)):
# Using same cuda_stream used by torch.zeros to set the memory
stream = torch.cuda.current_stream(device=pyt_tensors[category].device)
feed_ndarray(tensor, pyt_tensors[category], cuda_stream=stream)
else:
feed_ndarray(tensor, pyt_tensors[category])
self._schedule_runs()
self._advance_and_check_drop_last()
if self._reader_name:
if_drop, left = self._remove_padded()
if np.any(if_drop):
output = []
for batch, to_copy in zip(data_batches, left):
batch = batch.copy()
for category in self._output_categories:
batch[category] = batch[category][0:to_copy]
output.append(batch)
return output
else:
if self._last_batch_policy == LastBatchPolicy.PARTIAL and (
self._counter > self._size) and self._size > 0:
# First calculate how much data is required to return exactly self._size entries.
diff = self._num_gpus * self.batch_size - (self._counter - self._size)
# Figure out how many GPUs to grab from.
numGPUs_tograb = int(np.ceil(diff / self.batch_size))
# Figure out how many results to grab from the last GPU
# (as a fractional GPU batch may be required to bring us
# right up to self._size).
mod_diff = diff % self.batch_size
data_fromlastGPU = mod_diff if mod_diff else self.batch_size
# Grab the relevant data.
# 1) Grab everything from the relevant GPUs.
# 2) Grab the right data from the last GPU.
# 3) Append data together correctly and return.
output = data_batches[0:numGPUs_tograb]
output[-1] = output[-1].copy()
for category in self._output_categories:
output[-1][category] = output[-1][category][0:data_fromlastGPU]
return output
return data_batches
class DALIClassificationIterator(DALIGenericIterator):
"""
DALI iterator for classification tasks for PyTorch. It returns 2 outputs
(data and label) in the form of PyTorch's Tensor.
Calling
.. code-block:: python
DALIClassificationIterator(pipelines, reader_name)
is equivalent to calling
.. code-block:: python
DALIGenericIterator(pipelines, ["data", "label"], reader_name)
Parameters
----------
pipelines : list of nvidia.dali.Pipeline
List of pipelines to use
size : int, default = -1
Number of samples in the shard for the wrapped pipeline (if there is more than
one it is a sum)
Providing -1 means that the iterator will work until StopIteration is raised
from the inside of iter_setup(). The options `last_batch_policy` and
`last_batch_padded` don't work in such case. It works with only one pipeline inside
the iterator.
Mutually exclusive with `reader_name` argument
reader_name : str, default = None
Name of the reader which will be queried to the shard size, number of shards and
all other properties necessary to count properly the number of relevant and padded
samples that iterator needs to deal with. It automatically sets
`last_batch_padded` accordingly to match the reader's configuration.
auto_reset : string or bool, optional, default = False
Whether the iterator resets itself for the next epoch or it requires reset() to be
called explicitly.
It can be one of the following values:
* ``"no"``, ``False`` or ``None`` - at the end of epoch StopIteration is raised
and reset() needs to be called
* ``"yes"`` or ``"True"``- at the end of epoch StopIteration is raised but reset()
is called internally automatically
dynamic_shape : any, optional,
Parameter used only for backward compatibility.
fill_last_batch : bool, optional, default = None
**Deprecated** Please use ``last_batch_policy`` instead
Whether to fill the last batch with data up to 'self.batch_size'.
The iterator would return the first integer multiple
of self._num_gpus * self.batch_size entries which exceeds 'size'.
Setting this flag to False will cause the iterator to return
exactly 'size' entries.
last_batch_policy: optional, default = LastBatchPolicy.FILL
What to do with the last batch when there are not enough samples in the epoch
to fully fill it. See :meth:`nvidia.dali.plugin.base_iterator.LastBatchPolicy`
last_batch_padded : bool, optional, default = False
Whether the last batch provided by DALI is padded with the last sample
or it just wraps up. In the conjunction with ``last_batch_policy`` it tells
if the iterator returning last batch with data only partially filled with
data from the current epoch is dropping padding samples or samples from
the next epoch. If set to ``False`` next
epoch will end sooner as data from it was consumed but dropped. If set to
True next epoch would be the same length as the first one. For this to happen,
the option `pad_last_batch` in the reader needs to be set to True as well.
It is overwritten when `reader_name` argument is provided
prepare_first_batch : bool, optional, default = True
Whether DALI should buffer the first batch right after the creation of the iterator,
so one batch is already prepared when the iterator is prompted for the data
Example
-------
With the data set ``[1,2,3,4,5,6,7]`` and the batch size 2:
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = True -> last batch = ``[7]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = False -> last batch = ``[7]``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = True -> last batch = ``[7, 7]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = False -> last batch = ``[7, 1]``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = True -> last batch = ``[5, 6]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = False -> last batch = ``[5, 6]``,
next iteration will return ``[2, 3]``
"""
def __init__(self,
pipelines,
size=-1,
reader_name=None,
auto_reset=False,
fill_last_batch=None,
dynamic_shape=False,
last_batch_padded=False,
last_batch_policy=LastBatchPolicy.FILL,
prepare_first_batch=True):
super(DALIClassificationIterator, self).__init__(pipelines, ["data", "label"],
size,
reader_name=reader_name,
auto_reset=auto_reset,
fill_last_batch=fill_last_batch,
dynamic_shape=dynamic_shape,
last_batch_padded=last_batch_padded,
last_batch_policy=last_batch_policy,
prepare_first_batch=prepare_first_batch)
class DALIRaggedIterator(_DaliBaseIterator):
"""
General DALI iterator for PyTorch with ragged tensors.
It can return any number of outputs from the DALI pipeline
in the form of per GPU dictionaries.
These dictionaries consisting of PyTorch Tensors
(for outputs marked as DALIRaggedIterator.DENSE_TAG),
sparse COO PyTorch Tensors
(for outputs marked as DALIRaggedIterator.SPARSE_COO_TAG)
and list of PyTorch Tensors
(for outputs marked as DALIRaggedIterator.SPARSE_LIST_TAG).
Parameters
----------
pipelines : list of nvidia.dali.Pipeline
List of pipelines to use
size : int, default = -1
Number of samples in the shard for the wrapped pipeline (if there is more than
one it is a sum)
Providing -1 means that the iterator will work until StopIteration is raised
from the inside of iter_setup(). The options `last_batch_policy` and
`last_batch_padded` don't work in such case. It works with only one pipeline inside
the iterator.
Mutually exclusive with `reader_name` argument
reader_name : str, default = None
Name of the reader which will be queried to the shard size, number of shards and
all other properties necessary to count properly the number of relevant and padded
samples that iterator needs to deal with. It automatically sets `last_batch_policy`
to PARTIAL when the FILL is used, and `last_batch_padded` accordingly to match
the reader's configuration
output_types : list of str, optional, default = None
List of tags indicating whether the pipeline(s) output batch is
uniform (all the samples have the same size) or not. Batch output marked
as the former will be returned as a single PyTorch Tensor, the latter
will be returned as a specified sparse PyTorch Tensor format.
Must be either DALIRaggedIterator.DENSE_TAG
or DALIRaggedIterator.SPARSE_LIST_TAG
or DALIRaggedIterator.SPARSE_COO_TAG
Length of output_types must match the number of output of the pipeline(s).
If not set, all outputs are considered to be marked with
DALIRaggedIterator.DENSE_TAG.
For now sparse mode supports only list of tensors and COO sparse tensor format.
auto_reset : string or bool, optional, default = False
Whether the iterator resets itself for the next epoch or it requires reset() to be
called explicitly.
It can be one of the following values:
* ``"no"``, ``False`` or ``None`` - at the end of epoch StopIteration is raised
and reset() needs to be called
* ``"yes"`` or ``"True"``- at the end of epoch StopIteration is raised but reset()
is called internally automatically
fill_last_batch : bool, optional, default = None
**Deprecated** Please use ``last_batch_policy`` instead
Whether to fill the last batch with data up to 'self.batch_size'.
The iterator would return the first integer multiple
of self._num_gpus * self.batch_size entries which exceeds 'size'.
Setting this flag to False will cause the iterator to return
exactly 'size' entries.
last_batch_policy: optional, default = LastBatchPolicy.FILL
What to do with the last batch when there are not enough samples in the epoch
to fully fill it. See :meth:`nvidia.dali.plugin.base_iterator.LastBatchPolicy`
last_batch_padded : bool, optional, default = False
Whether the last batch provided by DALI is padded with the last sample
or it just wraps up. In the conjunction with ``last_batch_policy`` it tells
if the iterator returning last batch with data only partially filled with
data from the current epoch is dropping padding samples or samples from
the next epoch (it doesn't literally drop but sets ``pad`` field of ndarray
so the following code could use it to drop the data). If set to ``False`` next
epoch will end sooner as data from it was consumed but dropped. If set to
True next epoch would be the same length as the first one. For this to happen,
the option `pad_last_batch` in the reader needs to be set to True as well.
It is overwritten when `reader_name` argument is provided
prepare_first_batch : bool, optional, default = True
Whether DALI should buffer the first batch right after the creation of the iterator,
so one batch is already prepared when the iterator is prompted for the data
Example
-------
With the data set ``[1,2,3,4,5,6,7]`` and the batch size 2:
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = True ->
last batch = ``[7]``, next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = False ->
last batch = ``[7]``, next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = True ->
last batch = ``[7, 7]``, next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = False ->
last batch = ``[7, 1]``, next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = True ->
last batch = ``[5, 6]``, next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = False ->
last batch = ``[5, 6]``, next iteration will return ``[2, 3]``
"""
def __init__(self,
pipelines,
output_map,
size=-1,
reader_name=None,
output_types=None,
auto_reset=False,
fill_last_batch=None,
dynamic_shape=False,
last_batch_padded=False,
last_batch_policy=LastBatchPolicy.FILL,
prepare_first_batch=True):
# check the assert first as _DaliBaseIterator would run the prefetch
self._output_tags = {
DALIRaggedIterator.DENSE_TAG,
DALIRaggedIterator.SPARSE_LIST_TAG,
DALIRaggedIterator.SPARSE_COO_TAG
}
assert len(set(output_map)) == len(output_map), "output_map names should be distinct"
assert output_types is None or set(output_types) <= self._output_tags, \
"Only DENSE_TAG, SPARSE_LIST_TAG and SPARSE_COO_TAG are allowed"
self.output_map = output_map
self._outputs_types = output_types
super(DALIRaggedIterator, self).__init__(
pipelines,
size,
reader_name,
auto_reset,
fill_last_batch,
last_batch_padded,
last_batch_policy,
prepare_first_batch
)
self._first_batch = None
if self._prepare_first_batch:
try:
self._first_batch = self._first_batch = DALIRaggedIterator.__next__(self)
# call to `next` sets _ever_consumed to True but if we are just calling it from
# here we should set if to False again
self._ever_consumed = False
except StopIteration:
assert False, "It seems that there is no data in the pipeline. This may happen " \
"if `last_batch_policy` is set to PARTIAL and the requested batch size is " \
"greater than the shard size."
def __next__(self):
self._ever_consumed = True
if self._first_batch is not None:
batch = self._first_batch
self._first_batch = None
return batch
# Gather outputs
dali_outputs = self._get_outputs()
data_batches = [None for i in range(self._num_gpus)]
for i in range(self._num_gpus):
dev_id = self._pipes[i].device_id
# initialize dict for all output categories
category_outputs = dict()
# segregate outputs into categories
for j, out in enumerate(dali_outputs[i]):
category_outputs[self.output_map[j]] = out
# Change DALI TensorLists into Tensors
category_tensors = dict()
category_shapes = dict()
category_torch_type = dict()
category_device = dict()
torch_gpu_device = None
torch_cpu_device = torch.device('cpu')
for j, (category, out) in enumerate(category_outputs.items()):
if self._outputs_types is None or \
self._outputs_types[j] == DALIRaggedIterator.DENSE_TAG:
category_tensors[category] = out.as_tensor()
category_shapes[category] = category_tensors[category].shape()
else:
category_tensors[category] = [x for x in out]
category_shapes[category] = [x.shape() for x in out]
# check dtype
category_torch_type[category] = to_torch_type[out.dtype]
# check device
if type(out) is TensorListGPU:
if not torch_gpu_device:
torch_gpu_device = torch.device('cuda', dev_id)
category_device[category] = torch_gpu_device
else:
category_device[category] = torch_cpu_device
pyt_tensors = dict()
for j, category in enumerate(self.output_map):
if self._outputs_types is None or \
self._outputs_types[j] == DALIRaggedIterator.DENSE_TAG:
pyt_tensors[category] = torch.empty(category_shapes[category],
dtype=category_torch_type[category],
device=category_device[category])
else:
pyt_tensors[category] = [
torch.empty(shape,
dtype=category_torch_type[category],
device=category_device[category])
for shape in category_shapes[category]
]
data_batches[i] = pyt_tensors
# Copy data from DALI Tensors to torch tensors
for j, (category, tensor) in enumerate(category_tensors.items()):
if self._outputs_types is None or \
self._outputs_types[j] == DALIRaggedIterator.DENSE_TAG:
if isinstance(tensor, (TensorGPU, TensorListGPU)):
# Using same cuda_stream used by torch.zeros to set the memory
stream = torch.cuda.current_stream(device=pyt_tensors[category].device)
feed_ndarray(tensor, pyt_tensors[category], cuda_stream=stream)
else:
feed_ndarray(tensor, pyt_tensors[category])
else:
for k, single_tensor in enumerate(tensor):
if isinstance(tensor, (TensorGPU, TensorListGPU)):
# Using same cuda_stream used by torch.zeros to set the memory
stream = torch.cuda.current_stream(
device=pyt_tensors[category][k].device
)
feed_ndarray(
single_tensor, pyt_tensors[category][k], cuda_stream=stream
)
else:
feed_ndarray(single_tensor, pyt_tensors[category][k])
if self._outputs_types[j] == DALIRaggedIterator.SPARSE_COO_TAG:
values = torch.hstack(pyt_tensors[category])
indices = [
[(i, j) for j in range(shape[0])]
for i, shape in enumerate(category_shapes[category])
]
indices = [indice for el_indices in indices for indice in el_indices]
indices = torch.LongTensor(indices, device=values.device)
pyt_tensors[category] = torch.sparse_coo_tensor(indices.T, values)
self._schedule_runs()
self._advance_and_check_drop_last()
if self._reader_name:
if_drop, left = self._remove_padded()
if np.any(if_drop):
output = []
for batch, to_copy in zip(data_batches, left):
batch = batch.copy()
for category in self.output_map:
batch[category] = batch[category][0:to_copy]
output.append(batch)
return output
else:
if self._last_batch_policy == LastBatchPolicy.PARTIAL and (
self._counter > self._size) and self._size > 0:
# First calculate how much data is required to return exactly self._size entries.
diff = self._num_gpus * self.batch_size - (self._counter - self._size)
# Figure out how many GPUs to grab from.
numGPUs_tograb = int(np.ceil(diff / self.batch_size))
# Figure out how many results to grab from the last GPU
# (as a fractional GPU batch may be required to bring us
# right up to self._size).
mod_diff = diff % self.batch_size
data_fromlastGPU = mod_diff if mod_diff else self.batch_size
# Grab the relevant data.
# 1) Grab everything from the relevant GPUs.
# 2) Grab the right data from the last GPU.
# 3) Append data together correctly and return.
output = data_batches[0:numGPUs_tograb]
output[-1] = output[-1].copy()
for category in self._output_categories:
output[-1][category] = output[-1][category][0:data_fromlastGPU]
return output
return data_batches
DENSE_TAG = "dense"
SPARSE_LIST_TAG = "sparse_list"
SPARSE_COO_TAG = "sparse_coo"
class TorchPythonFunction(ops.PythonFunctionBase):
schema_name = "TorchPythonFunction"
ops.register_cpu_op('TorchPythonFunction')
ops.register_gpu_op('TorchPythonFunction')
def _torch_stream_wrapper(self, function, *ins):
with torch.cuda.stream(self.stream):
out = function(*ins)
self.stream.synchronize()
return out
def torch_wrapper(self, batch_processing, function, device, *args):
func = function if device == 'cpu' else \
lambda *ins: self._torch_stream_wrapper(function, *ins)
if batch_processing:
return ops.PythonFunction.function_wrapper_batch(func,
self.num_outputs,
torch.utils.dlpack.from_dlpack,
torch.utils.dlpack.to_dlpack,
*args)
else:
return ops.PythonFunction.function_wrapper_per_sample(func,
self.num_outputs,
torch_dlpack.from_dlpack,
torch_dlpack.to_dlpack,
*args)
def __call__(self, *inputs, **kwargs):
pipeline = Pipeline.current()
if pipeline is None:
Pipeline._raise_no_current_pipeline("TorchPythonFunction")
if self.stream is None:
self.stream = torch.cuda.Stream(device=pipeline.device_id)
return super(TorchPythonFunction, self).__call__(*inputs, **kwargs)
def __init__(self, function, num_outputs=1, device='cpu', batch_processing=False, **kwargs):
self.stream = None
super(TorchPythonFunction, self).__init__(impl_name="DLTensorPythonFunctionImpl",
function=lambda *ins:
self.torch_wrapper(batch_processing,
function, device,
*ins),
num_outputs=num_outputs, device=device,
batch_processing=batch_processing, **kwargs)
ops._wrap_op(TorchPythonFunction, "fn", __name__)
|
d40aae5c2950b739b66c9446969ec4fd1cfbe910
|
ff6cc1089cd565f6244b988ca97cd5fc595cb27a
|
/blog/urls.py
|
d0c291863630ca22812e62ac340415eb47f472b9
|
[] |
no_license
|
sajib1066/django-ecommerce
|
d2a6f899c6702a2654d02caec59a957fe2ba5eaf
|
62d4ee15675707078a69c46d4366aa5f10f14e83
|
refs/heads/main
| 2023-04-26T22:00:03.058968
| 2022-08-26T04:16:23
| 2022-08-26T04:16:23
| 224,104,350
| 163
| 69
| null | 2022-11-22T10:37:53
| 2019-11-26T04:44:58
|
CSS
|
UTF-8
|
Python
| false
| false
| 279
|
py
|
urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.blog_page, name='blog'),
path('details/<post_id>', views.post_details, name='post-details'),
path(
'category/<ctg_name>', views.post_by_category, name='post-by-category'
)
]
|
ae5117d5608fd690b4fa40fd15cf4fa9f219a4a2
|
6416b746ee71d897789eab1e450000831674dbd0
|
/src/otx/algorithms/detection/adapters/mmdet/task.py
|
76ac12e02aabf3df5deda80092eca96666e2a747
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/training_extensions
|
c921f83ad52311af96ff45ae0b88d0aecddd855b
|
80454808b38727e358e8b880043eeac0f18152fb
|
refs/heads/develop
| 2023-08-31T06:29:07.229339
| 2023-08-31T01:57:26
| 2023-08-31T01:57:26
| 154,843,614
| 397
| 230
|
Apache-2.0
| 2023-09-14T06:17:01
| 2018-10-26T14:02:29
|
Python
|
UTF-8
|
Python
| false
| false
| 30,939
|
py
|
task.py
|
"""Task of OTX Detection using mmdetection training backend."""
# Copyright (C) 2023 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import glob
import io
import os
import time
from contextlib import nullcontext
from copy import deepcopy
from functools import partial
from typing import Any, Dict, Optional, Union
import torch
from mmcv.runner import wrap_fp16_model
from mmcv.utils import Config, ConfigDict, get_git_hash
from mmdet import __version__
from mmdet.apis import single_gpu_test, train_detector
from mmdet.datasets import build_dataloader, build_dataset, replace_ImageToTensor
from mmdet.models.detectors import DETR, TwoStageDetector
from mmdet.utils import collect_env
from otx.algorithms.common.adapters.mmcv.hooks import LossDynamicsTrackingHook
from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import (
ActivationMapHook,
BaseRecordingForwardHook,
EigenCamHook,
FeatureVectorHook,
)
from otx.algorithms.common.adapters.mmcv.utils import (
adapt_batch_size,
build_data_parallel,
patch_data_pipeline,
patch_from_hyperparams,
)
from otx.algorithms.common.adapters.mmcv.utils.config_utils import (
MPAConfig,
update_or_add_custom_hook,
)
from otx.algorithms.common.adapters.torch.utils import convert_sync_batchnorm
from otx.algorithms.common.configs.configuration_enums import BatchSizeAdaptType
from otx.algorithms.common.configs.training_base import TrainType
from otx.algorithms.common.tasks.nncf_task import NNCFBaseTask
from otx.algorithms.common.utils.data import get_dataset
from otx.algorithms.common.utils.logger import get_logger
from otx.algorithms.detection.adapters.mmdet.configurer import (
DetectionConfigurer,
IncrDetectionConfigurer,
SemiSLDetectionConfigurer,
)
from otx.algorithms.detection.adapters.mmdet.datasets import ImageTilingDataset
from otx.algorithms.detection.adapters.mmdet.hooks.det_class_probability_map_hook import (
DetClassProbabilityMapHook,
MaskRCNNRecordingForwardHook,
)
from otx.algorithms.detection.adapters.mmdet.utils import (
patch_input_preprocessing,
patch_input_shape,
patch_ir_scale_factor,
patch_tiling,
)
from otx.algorithms.detection.adapters.mmdet.utils.builder import build_detector
from otx.algorithms.detection.adapters.mmdet.utils.config_utils import (
should_cluster_anchors,
)
from otx.algorithms.detection.adapters.mmdet.utils.exporter import DetectionExporter
from otx.algorithms.detection.task import OTXDetectionTask
from otx.api.configuration import cfg_helper
from otx.api.configuration.helper.utils import ids_to_strings
from otx.api.entities.datasets import DatasetEntity
from otx.api.entities.explain_parameters import ExplainParameters
from otx.api.entities.inference_parameters import InferenceParameters
from otx.api.entities.model import (
ModelEntity,
ModelPrecision,
)
from otx.api.entities.subset import Subset
from otx.api.entities.task_environment import TaskEnvironment
from otx.api.serialization.label_mapper import label_schema_to_bytes
from otx.api.usecases.tasks.interfaces.export_interface import ExportType
from otx.core.data import caching
logger = get_logger()
# TODO Remove unnecessary pylint disable
# pylint: disable=too-many-lines
class MMDetectionTask(OTXDetectionTask):
"""Task class for OTX detection using mmdetection training backend."""
# pylint: disable=too-many-instance-attributes
def __init__(self, task_environment: TaskEnvironment, output_path: Optional[str] = None):
super().__init__(task_environment, output_path)
self._data_cfg: Optional[Config] = None
self._recipe_cfg: Optional[Config] = None
# pylint: disable=too-many-locals, too-many-branches, too-many-statements
def _init_task(self, dataset: Optional[DatasetEntity] = None, export: bool = False): # noqa
"""Initialize task."""
self._recipe_cfg = MPAConfig.fromfile(os.path.join(self._model_dir, "model.py"))
self._recipe_cfg.domain = self._task_type.domain
self._config = self._recipe_cfg
self.set_seed()
# Belows may go to the configure function
patch_data_pipeline(self._recipe_cfg, self.data_pipeline_path)
# Patch tiling parameters
patch_tiling(self._recipe_cfg, self._hyperparams, dataset)
if not export:
patch_from_hyperparams(self._recipe_cfg, self._hyperparams)
if "custom_hooks" in self.override_configs:
override_custom_hooks = self.override_configs.pop("custom_hooks")
for override_custom_hook in override_custom_hooks:
update_or_add_custom_hook(self._recipe_cfg, ConfigDict(override_custom_hook))
if len(self.override_configs) > 0:
logger.info(f"before override configs merging = {self._recipe_cfg}")
self._recipe_cfg.merge_from_dict(self.override_configs)
logger.info(f"after override configs merging = {self._recipe_cfg}")
# add Cancel training hook
update_or_add_custom_hook(
self._recipe_cfg,
ConfigDict(type="CancelInterfaceHook", init_callback=self.on_hook_initialized),
)
if self._time_monitor is not None:
update_or_add_custom_hook(
self._recipe_cfg,
ConfigDict(
type="OTXProgressHook",
time_monitor=self._time_monitor,
verbose=True,
priority=71,
),
)
self._recipe_cfg.log_config.hooks.append({"type": "OTXLoggerHook", "curves": self._learning_curves})
# Update recipe with caching modules
self._update_caching_modules(self._recipe_cfg.data)
# Loss dynamics tracking
if getattr(self._hyperparams.algo_backend, "enable_noisy_label_detection", False):
LossDynamicsTrackingHook.configure_recipe(self._recipe_cfg, self._output_path)
logger.info("initialized.")
def build_model(
self,
cfg: Config,
fp16: bool = False,
**kwargs,
) -> torch.nn.Module:
"""Build model from model_builder."""
model_builder = getattr(self, "model_builder", build_detector)
model = model_builder(cfg, **kwargs)
if bool(fp16):
wrap_fp16_model(model)
return model
# pylint: disable=too-many-arguments
def configure(self, training=True, ir_options=None, train_dataset=None):
"""Patch mmcv configs for OTX detection settings."""
# deepcopy all configs to make sure
# changes under MPA and below does not take an effect to OTX for clear distinction
recipe_cfg = deepcopy(self._recipe_cfg)
assert recipe_cfg is not None, "'recipe_cfg' is not initialized."
if self._data_cfg is not None:
data_classes = [label.name for label in self._labels]
else:
data_classes = None
model_classes = [label.name for label in self._model_label_schema]
recipe_cfg.work_dir = self._output_path
recipe_cfg.resume = self._resume
if self._train_type == TrainType.Incremental:
configurer = IncrDetectionConfigurer("detection", training)
elif self._train_type == TrainType.Semisupervised:
configurer = SemiSLDetectionConfigurer("detection", training)
else:
configurer = DetectionConfigurer("detection", training)
cfg = configurer.configure(
recipe_cfg,
train_dataset,
self._model_ckpt,
self._data_cfg,
ir_options,
data_classes,
model_classes,
self._hyperparams.learning_parameters.input_size,
)
if should_cluster_anchors(self._recipe_cfg):
if train_dataset is not None:
self._anchors = cfg.model.bbox_head.anchor_generator
elif self._anchors is not None:
self._update_anchors(cfg.model.bbox_head.anchor_generator, self._anchors)
self._config = cfg
return cfg
# pylint: disable=too-many-branches, too-many-statements
def _train_model(
self,
dataset: DatasetEntity,
):
"""Train function in MMDetectionTask."""
logger.info("init data cfg.")
self._data_cfg = ConfigDict(data=ConfigDict())
for cfg_key, subset in zip(
["train", "val", "unlabeled"],
[Subset.TRAINING, Subset.VALIDATION, Subset.UNLABELED],
):
subset = get_dataset(dataset, subset)
if subset and self._data_cfg is not None:
self._data_cfg.data[cfg_key] = ConfigDict(
otx_dataset=subset,
labels=self._labels,
)
self._is_training = True
self._init_task(dataset)
cfg = self.configure(True, None, get_dataset(dataset, Subset.TRAINING))
logger.info("train!")
timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime())
# Environment
logger.info(f"cfg.gpu_ids = {cfg.gpu_ids}, distributed = {cfg.distributed}")
env_info_dict = collect_env()
env_info = "\n".join([(f"{k}: {v}") for k, v in env_info_dict.items()])
dash_line = "-" * 60 + "\n"
logger.info(f"Environment info:\n{dash_line}{env_info}\n{dash_line}")
# Data
datasets = [build_dataset(cfg.data.train)]
# Target classes
if "task_adapt" in cfg:
target_classes = cfg.task_adapt.get("final", [])
else:
target_classes = datasets[0].CLASSES
# Metadata
meta = dict()
meta["env_info"] = env_info
# meta['config'] = cfg.pretty_text
meta["seed"] = cfg.seed
meta["exp_name"] = cfg.work_dir
if cfg.checkpoint_config is not None:
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=target_classes,
)
# Model
model = self.build_model(cfg, fp16=cfg.get("fp16", False))
model.train()
model.CLASSES = target_classes
if cfg.distributed:
convert_sync_batchnorm(model)
validate = bool(cfg.data.get("val", None))
if self._hyperparams.learning_parameters.auto_adapt_batch_size != BatchSizeAdaptType.NONE:
train_func = partial(train_detector, meta=deepcopy(meta), model=deepcopy(model), distributed=False)
adapt_batch_size(
train_func,
cfg,
datasets,
isinstance(self, NNCFBaseTask), # nncf needs eval hooks
not_increase=(self._hyperparams.learning_parameters.auto_adapt_batch_size == BatchSizeAdaptType.SAFE),
)
train_detector(
model,
datasets,
cfg,
distributed=cfg.distributed,
validate=validate,
timestamp=timestamp,
meta=meta,
)
# Save outputs
output_ckpt_path = os.path.join(cfg.work_dir, "latest.pth")
best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, "best_*.pth"))
if len(best_ckpt_path) > 0:
output_ckpt_path = best_ckpt_path[0]
return dict(
final_ckpt=output_ckpt_path,
)
def _infer_model(
self,
dataset: DatasetEntity,
inference_parameters: Optional[InferenceParameters] = None,
):
"""Main infer function."""
original_subset = dataset[0].subset
for item in dataset:
item.subset = Subset.TESTING
self._data_cfg = ConfigDict(
data=ConfigDict(
train=ConfigDict(
otx_dataset=None,
labels=self._labels,
),
test=ConfigDict(
otx_dataset=dataset,
labels=self._labels,
),
)
)
dump_features = True
dump_saliency_map = not inference_parameters.is_evaluation if inference_parameters else True
self._init_task(dataset)
cfg = self.configure(False, None)
logger.info("infer!")
# Data loader
mm_dataset = build_dataset(cfg.data.test)
samples_per_gpu = cfg.data.test_dataloader.get("samples_per_gpu", 1)
# If the batch size and the number of data are not divisible, the metric may score differently.
# To avoid this, use 1 if they are not divisible.
samples_per_gpu = samples_per_gpu if len(mm_dataset) % samples_per_gpu == 0 else 1
dataloader = build_dataloader(
mm_dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.test_dataloader.get("workers_per_gpu", 0),
num_gpus=len(cfg.gpu_ids),
dist=cfg.distributed,
seed=cfg.get("seed", None),
shuffle=False,
)
# Target classes
if "task_adapt" in cfg:
target_classes = cfg.task_adapt.final
if len(target_classes) < 1:
raise KeyError(
f"target_classes={target_classes} is empty check the metadata from model ckpt or recipe "
"configuration"
)
else:
target_classes = mm_dataset.CLASSES
# Model
model = self.build_model(cfg, fp16=cfg.get("fp16", False))
model.CLASSES = target_classes
model.eval()
feature_model = model.model_t if self._train_type == TrainType.Semisupervised else model
model = build_data_parallel(model, cfg, distributed=False)
# InferenceProgressCallback (Time Monitor enable into Infer task)
time_monitor = None
if cfg.get("custom_hooks", None):
time_monitor = [hook.time_monitor for hook in cfg.custom_hooks if hook.type == "OTXProgressHook"]
time_monitor = time_monitor[0] if time_monitor else None
if time_monitor is not None:
# pylint: disable=unused-argument
def pre_hook(module, inp):
time_monitor.on_test_batch_begin(None, None)
def hook(module, inp, outp):
time_monitor.on_test_batch_end(None, None)
model.register_forward_pre_hook(pre_hook)
model.register_forward_hook(hook)
# Check and unwrap ImageTilingDataset object from TaskAdaptEvalDataset
while hasattr(mm_dataset, "dataset") and not isinstance(mm_dataset, ImageTilingDataset):
mm_dataset = mm_dataset.dataset
# Class-wise Saliency map for Single-Stage Detector, otherwise use class-ignore saliency map.
if not dump_saliency_map:
saliency_hook: Union[nullcontext, BaseRecordingForwardHook] = nullcontext()
else:
raw_model = feature_model
if isinstance(raw_model, TwoStageDetector):
height, width, _ = mm_dataset[0]["img_metas"][0].data["img_shape"]
saliency_hook = MaskRCNNRecordingForwardHook(
feature_model,
input_img_shape=(height, width),
normalize=not isinstance(mm_dataset, ImageTilingDataset),
)
elif isinstance(raw_model, DETR):
saliency_hook = ActivationMapHook(feature_model)
else:
saliency_hook = DetClassProbabilityMapHook(
feature_model,
use_cls_softmax=not isinstance(mm_dataset, ImageTilingDataset),
normalize=not isinstance(mm_dataset, ImageTilingDataset),
)
if not dump_features:
feature_vector_hook: Union[nullcontext, BaseRecordingForwardHook] = nullcontext()
else:
feature_vector_hook = FeatureVectorHook(feature_model)
eval_predictions = []
# pylint: disable=no-member
with feature_vector_hook:
with saliency_hook:
eval_predictions = single_gpu_test(model, dataloader)
if isinstance(feature_vector_hook, nullcontext):
feature_vectors = [None] * len(mm_dataset)
else:
feature_vectors = feature_vector_hook.records
if isinstance(saliency_hook, nullcontext):
saliency_maps = [None] * len(mm_dataset)
else:
saliency_maps = saliency_hook.records
for key in ["interval", "tmpdir", "start", "gpu_collect", "save_best", "rule", "dynamic_intervals"]:
cfg.evaluation.pop(key, None)
if isinstance(mm_dataset, ImageTilingDataset):
eval_predictions = mm_dataset.merge(eval_predictions)
# average tile feature vertors for each image
feature_vectors = mm_dataset.merge_vectors(feature_vectors, dump_features)
saliency_maps = mm_dataset.merge_maps(saliency_maps, dump_saliency_map)
metric = None
if inference_parameters and inference_parameters.is_evaluation:
if isinstance(mm_dataset, ImageTilingDataset):
metric = mm_dataset.dataset.evaluate(eval_predictions, **cfg.evaluation)
else:
metric = mm_dataset.evaluate(eval_predictions, **cfg.evaluation)
metric = metric["mAP"] if isinstance(cfg.evaluation.metric, list) else metric[cfg.evaluation.metric]
assert len(eval_predictions) == len(feature_vectors) == len(saliency_maps), (
"Number of elements should be the same, however, number of outputs are "
f"{len(eval_predictions)}, {len(feature_vectors)}, and {len(saliency_maps)}"
)
results = dict(
outputs=dict(
classes=target_classes,
detections=eval_predictions,
metric=metric,
feature_vectors=feature_vectors,
saliency_maps=saliency_maps,
)
)
# TODO: InferenceProgressCallback register
output = results["outputs"]
metric = output["metric"]
predictions = output["detections"]
assert len(output["detections"]) == len(output["feature_vectors"]) == len(output["saliency_maps"]), (
"Number of elements should be the same, however, number of outputs are "
f"{len(output['detections'])}, {len(output['feature_vectors'])}, and {len(output['saliency_maps'])}"
)
prediction_results = zip(predictions, output["feature_vectors"], output["saliency_maps"])
# FIXME. This is temporary solution.
# All task(e.g. classification, segmentation) should change item's type to Subset.TESTING
# when the phase is inference.
for item in dataset:
item.subset = original_subset
return prediction_results, metric
# pylint: disable=too-many-statements
def _export_model(
self,
precision: ModelPrecision,
export_format: ExportType,
dump_features: bool,
):
"""Main export function of OTX MMDetection Task."""
self._data_cfg = ConfigDict(
data=ConfigDict(
train=ConfigDict(
otx_dataset=None,
labels=self._labels,
),
test=ConfigDict(
otx_dataset=None,
labels=self._labels,
),
)
)
self._init_task(export=True)
cfg = self.configure(False, None)
self._precision[0] = precision
export_options: Dict[str, Any] = {}
export_options["deploy_cfg"] = self._init_deploy_cfg(cfg)
assert len(self._precision) == 1
export_options["precision"] = str(self._precision[0])
export_options["type"] = str(export_format)
export_options["deploy_cfg"]["dump_features"] = dump_features
if dump_features:
output_names = export_options["deploy_cfg"]["ir_config"]["output_names"]
if "feature_vector" not in output_names:
output_names.append("feature_vector")
if export_options["deploy_cfg"]["codebase_config"]["task"] != "Segmentation":
if "saliency_map" not in output_names:
output_names.append("saliency_map")
# disable softmax and normalization to merge saliency map for tiles and postprocess them altogether
tiling_detection = "tile_cfg" in cfg
export_options["deploy_cfg"]["softmax_saliency_maps"] = not tiling_detection
export_options["deploy_cfg"]["normalize_saliency_maps"] = not tiling_detection
export_options["model_builder"] = getattr(self, "model_builder", build_detector)
if self._precision[0] == ModelPrecision.FP16:
export_options["deploy_cfg"]["backend_config"]["mo_options"]["flags"].append("--compress_to_fp16")
backend_cfg_backup = {}
if export_format == ExportType.ONNX:
backend_cfg_backup = export_options["deploy_cfg"]["backend_config"]
export_options["deploy_cfg"]["backend_config"] = {"type": "onnxruntime"}
export_options["deploy_cfg"]["ir_config"]["dynamic_axes"]["image"] = {0: "batch"}
exporter = DetectionExporter()
results = exporter.run(
cfg,
**export_options,
)
if export_format == ExportType.ONNX:
results["inference_parameters"] = {}
results["inference_parameters"]["mean_values"] = " ".join(
map(str, backend_cfg_backup["mo_options"]["args"]["--mean_values"])
)
results["inference_parameters"]["scale_values"] = " ".join(
map(str, backend_cfg_backup["mo_options"]["args"]["--scale_values"])
)
return results
def _explain_model(
self,
dataset: DatasetEntity,
explain_parameters: Optional[ExplainParameters] = None,
) -> Dict[str, Any]:
"""Main explain function of MMDetectionTask."""
for item in dataset:
item.subset = Subset.TESTING
self._data_cfg = ConfigDict(
data=ConfigDict(
train=ConfigDict(
otx_dataset=None,
labels=self._labels,
),
test=ConfigDict(
otx_dataset=dataset,
labels=self._labels,
),
)
)
self._init_task()
cfg = self.configure(False, None)
samples_per_gpu = cfg.data.test_dataloader.get("samples_per_gpu", 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
# Data loader
mm_dataset = build_dataset(cfg.data.test)
dataloader = build_dataloader(
mm_dataset,
samples_per_gpu=cfg.data.get("samples_per_gpu", 1),
workers_per_gpu=cfg.data.get("workers_per_gpu", 0),
num_gpus=len(cfg.gpu_ids),
dist=cfg.distributed,
seed=cfg.get("seed", None),
shuffle=False,
)
# Target classes
if "task_adapt" in cfg:
target_classes = cfg.task_adapt.final
if len(target_classes) < 1:
raise KeyError(
f"target_classes={target_classes} is empty check the metadata from model ckpt or recipe "
"configuration"
)
else:
target_classes = mm_dataset.CLASSES
# TODO: Check Inference FP16 Support
model = self.build_model(cfg, fp16=cfg.get("fp16", False))
model.CLASSES = target_classes
model.eval()
feature_model = model.model_t if self._train_type == TrainType.Semisupervised else model
model = build_data_parallel(model, cfg, distributed=False)
# InferenceProgressCallback (Time Monitor enable into Infer task)
time_monitor = None
if cfg.get("custom_hooks", None):
time_monitor = [hook.time_monitor for hook in cfg.custom_hooks if hook.type == "OTXProgressHook"]
time_monitor = time_monitor[0] if time_monitor else None
if time_monitor is not None:
# pylint: disable=unused-argument
def pre_hook(module, inp):
time_monitor.on_test_batch_begin(None, None)
def hook(module, inp, outp):
time_monitor.on_test_batch_end(None, None)
model.register_forward_pre_hook(pre_hook)
model.register_forward_hook(hook)
# Check and unwrap ImageTilingDataset object from TaskAdaptEvalDataset
while hasattr(mm_dataset, "dataset") and not isinstance(mm_dataset, ImageTilingDataset):
mm_dataset = mm_dataset.dataset
per_class_xai_algorithm: Union[partial[MaskRCNNRecordingForwardHook], partial[DetClassProbabilityMapHook]]
if isinstance(feature_model, TwoStageDetector):
height, width, _ = mm_dataset[0]["img_metas"][0].data["img_shape"]
per_class_xai_algorithm = partial(
MaskRCNNRecordingForwardHook, input_img_shape=(width, height), normalize=True
)
else:
per_class_xai_algorithm = partial(
DetClassProbabilityMapHook,
use_cls_softmax=not isinstance(mm_dataset, ImageTilingDataset),
normalize=not isinstance(mm_dataset, ImageTilingDataset),
)
explainer_hook_selector = {
"classwisesaliencymap": per_class_xai_algorithm,
"eigencam": EigenCamHook,
"activationmap": ActivationMapHook,
}
explainer = explain_parameters.explainer if explain_parameters else None
if explainer is not None:
explainer_hook = explainer_hook_selector.get(explainer.lower(), None)
else:
explainer_hook = None
if explainer_hook is None:
raise NotImplementedError(f"Explainer algorithm {explainer} not supported!")
logger.info(f"Explainer algorithm: {explainer}")
eval_predictions = []
with explainer_hook(feature_model) as saliency_hook: # type: ignore
for data in dataloader:
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
eval_predictions.extend(result)
saliency_maps = saliency_hook.records
# In the tiling case, merge saliency map from each tile into united map for image
if isinstance(mm_dataset, ImageTilingDataset):
saliency_maps = mm_dataset.merge_maps(saliency_maps, dump_maps=True)
outputs = dict(detections=eval_predictions, saliency_maps=saliency_maps)
return outputs
# This should be removed
def update_override_configurations(self, config):
"""Update override_configs."""
logger.info(f"update override config with: {config}")
config = ConfigDict(**config)
self.override_configs.update(config)
# This should moved somewhere
def _init_deploy_cfg(self, cfg) -> Union[Config, None]:
base_dir = os.path.abspath(os.path.dirname(self._task_environment.model_template.model_template_path))
if self._hyperparams.tiling_parameters.enable_tile_classifier:
deploy_cfg_path = os.path.join(base_dir, "deployment_tile_classifier.py")
else:
deploy_cfg_path = os.path.join(base_dir, "deployment.py")
deploy_cfg = None
if os.path.exists(deploy_cfg_path):
deploy_cfg = MPAConfig.fromfile(deploy_cfg_path)
patch_input_preprocessing(cfg, deploy_cfg)
patch_input_shape(cfg, deploy_cfg)
patch_ir_scale_factor(deploy_cfg, self._hyperparams)
return deploy_cfg
def save_model(self, output_model: ModelEntity):
"""Save best model weights in DetectionTrainTask."""
logger.info("called save_model")
buffer = io.BytesIO()
hyperparams_str = ids_to_strings(cfg_helper.convert(self._hyperparams, dict, enum_to_str=True))
labels = {label.name: label.color.rgb_tuple for label in self._labels}
model_ckpt = torch.load(self._model_ckpt)
modelinfo = {
"model": model_ckpt,
"config": hyperparams_str,
"labels": labels,
"confidence_threshold": self.confidence_threshold,
"VERSION": 1,
}
if self.config is not None and should_cluster_anchors(self.config):
modelinfo["anchors"] = {}
self._update_anchors(modelinfo["anchors"], self.config.model.bbox_head.anchor_generator)
torch.save(modelinfo, buffer)
output_model.set_data("weights.pth", buffer.getvalue())
output_model.set_data(
"label_schema.json",
label_schema_to_bytes(self._task_environment.label_schema),
)
output_model.precision = self._precision
@staticmethod
def _update_anchors(origin, new):
logger.info("Updating anchors")
origin["heights"] = new["heights"]
origin["widths"] = new["widths"]
# These need to be moved somewhere
def _update_caching_modules(self, data_cfg: Config) -> None:
def _find_max_num_workers(cfg: dict):
num_workers = [0]
for key, value in cfg.items():
if key == "workers_per_gpu" and isinstance(value, int):
num_workers += [value]
elif isinstance(value, dict):
num_workers += [_find_max_num_workers(value)]
return max(num_workers)
def _get_mem_cache_size():
if not hasattr(self._hyperparams.algo_backend, "mem_cache_size"):
return 0
return self._hyperparams.algo_backend.mem_cache_size
max_num_workers = _find_max_num_workers(data_cfg)
mem_cache_size = _get_mem_cache_size()
mode = "multiprocessing" if max_num_workers > 0 else "singleprocessing"
caching.MemCacheHandlerSingleton.create(mode, mem_cache_size)
update_or_add_custom_hook(
self._recipe_cfg,
ConfigDict(type="MemCacheHook", priority="VERY_LOW"),
)
|
b0b1347937a5a2b871c958a1c8aec62d63d6bbf4
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AnttechBlockchainFinancePfPaymentQueryModel.py
|
053496e3a1f6076b22abbb2535071057872b4f59
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,016
|
py
|
AnttechBlockchainFinancePfPaymentQueryModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AnttechBlockchainFinancePfPaymentQueryModel(object):
def __init__(self):
self._buss_refr_no = None
self._financing_id = None
self._platform_id = None
@property
def buss_refr_no(self):
return self._buss_refr_no
@buss_refr_no.setter
def buss_refr_no(self, value):
self._buss_refr_no = value
@property
def financing_id(self):
return self._financing_id
@financing_id.setter
def financing_id(self, value):
self._financing_id = value
@property
def platform_id(self):
return self._platform_id
@platform_id.setter
def platform_id(self, value):
self._platform_id = value
def to_alipay_dict(self):
params = dict()
if self.buss_refr_no:
if hasattr(self.buss_refr_no, 'to_alipay_dict'):
params['buss_refr_no'] = self.buss_refr_no.to_alipay_dict()
else:
params['buss_refr_no'] = self.buss_refr_no
if self.financing_id:
if hasattr(self.financing_id, 'to_alipay_dict'):
params['financing_id'] = self.financing_id.to_alipay_dict()
else:
params['financing_id'] = self.financing_id
if self.platform_id:
if hasattr(self.platform_id, 'to_alipay_dict'):
params['platform_id'] = self.platform_id.to_alipay_dict()
else:
params['platform_id'] = self.platform_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AnttechBlockchainFinancePfPaymentQueryModel()
if 'buss_refr_no' in d:
o.buss_refr_no = d['buss_refr_no']
if 'financing_id' in d:
o.financing_id = d['financing_id']
if 'platform_id' in d:
o.platform_id = d['platform_id']
return o
|
9ae0e033a8972c01df6892e0ee8e7467f05951e0
|
ea6a6a73bb3798f73a986a624a545f8f4965aa3d
|
/utils/HtmlToImgPdfKit.py
|
ac037f0d27b9780587d7bbf42fd3de8ee02d9636
|
[
"MIT"
] |
permissive
|
aeasringnar/django-RESTfulAPI
|
2a25e4695fb50398a5c03d92a58430484ff2d083
|
7353efb2967c2e9ec54e600199eaaead8222c166
|
refs/heads/master
| 2023-04-14T03:58:49.008851
| 2023-03-07T03:01:22
| 2023-03-07T03:01:22
| 210,539,341
| 268
| 50
|
MIT
| 2022-11-22T06:29:13
| 2019-09-24T07:28:44
|
Python
|
UTF-8
|
Python
| false
| false
| 5,859
|
py
|
HtmlToImgPdfKit.py
|
import logging
import os
import time
from enum import Enum
from io import BytesIO
from shutil import copy
from tempfile import mkdtemp
from typing import Any, Tuple, Union
from uuid import uuid4
try:
import imgkit
import pdfkit
except Exception as e:
raise ImportError(
'imgkit pdftkit not found, pelease run pip install imgkit pdfkit')
class DataType(int, Enum):
FILE = 1
STRING = 2
URL = 3
@classmethod
def keys(cls) -> set:
return set(cls.__members__.keys())
@classmethod
def values(cls) -> list:
return list(cls.__members__.values())
class HtmlToImgPdfKit:
IMGKIT_FUC = {
1: imgkit.from_file,
2: imgkit.from_string,
3: imgkit.from_url
}
PDFKIT_FUC = {
1: pdfkit.from_file,
2: pdfkit.from_string,
3: pdfkit.from_url
}
def __init__(self, kit_path: Union[str, None] = None, is_debug: bool = False) -> None:
if kit_path and not os.path.exists(kit_path):
raise FileNotFoundError(
'kit_path is not file path or not found file')
self.imgkit_conf = imgkit.config(
wkhtmltoimage=kit_path) if kit_path else None
self.pdfkit_conf = pdfkit.configuration(
wkhtmltopdf=kit_path) if kit_path else None
self.default_imgkit_options = {
'format': 'jpg',
# 'crop-w': '832', # set image weight
'crop-y': '0',
'crop-x': '0',
'encoding': "UTF-8",
}
# options doc https://wkhtmltopdf.org/usage/wkhtmltopdf.txt
self.default_pdfkit_options = {
"encoding": "UTF-8",
# 'margin-top': '0',
# 'margin-right': '0', # Set the page right margin (default 10mm)
# 'margin-bottom': '0',
# 'margin-left': '0', # Set the page left margin (default 10mm)
}
self.debug = is_debug
self.tmp_path = mkdtemp()
def to_img(self, data: str, data_type: DataType = DataType.FILE, out_path: str = '', options: dict = {}) -> Tuple[bool, Union[BytesIO, None], str]:
'''将HTML生成为图像的方法
args:
data str:传入的数据,可以是文件路径、文本内容、URL,必填。
data_type int:传入的数据类型,可选值为 1 文件地址 2 文本内容 3 URL,必填
out_path str:传入的输出地址,选填。
options dict:传入选项字典,用于设置生成图片的格式,选填。
returns:
(bool, BytesIO/None, str):返回成功标识、文件流/None、异常描述
'''
img_format = options.get(
'format', 'jpg') if 'format' in options else self.default_imgkit_options.get('format')
out_path = f"{os.path.splitext(out_path)[0]}.{img_format}"
self.default_imgkit_options.update(options)
tmp_file_path = os.path.join(self.tmp_path, f"{uuid4()}.{img_format}")
if self.debug:
print(tmp_file_path)
try:
if data_type not in DataType.values():
raise Exception('this data type not found')
handle_fuc = self.IMGKIT_FUC[data_type]
handle_fuc(data, tmp_file_path,
options=self.default_imgkit_options, config=self.imgkit_conf)
if out_path:
copy(tmp_file_path, out_path)
with open(tmp_file_path, 'rb') as f:
return True, BytesIO(f.read()), 'ok'
except Exception as e:
logging.error(e)
logging.exception(e)
return False, None, str(e)
finally:
if os.path.exists(tmp_file_path) and not self.debug:
os.remove(tmp_file_path)
def to_pdf(self, data: str, data_type: DataType = DataType.FILE, out_path: str = '', options: dict = {}) -> Tuple[bool, Union[BytesIO, None], str]:
'''将HTML生成为PDF的方法
args:
data str:传入的数据,可以是文件路径、文本内容、URL,必填。
data_type int:传入的数据类型,可选值为 1 文件地址 2 文本内容 3 URL,必填
out_path str:传入的输出地址,选填。
options dict:传入选项字典,用于设置生成PDF的格式,选填。
returns:
(bool, BytesIO/None, str):返回成功标识、文件流/None、异常描述
'''
self.default_pdfkit_options.update(options)
tmp_file_path = os.path.join(self.tmp_path, f"{uuid4()}.pdf")
if self.debug:
print(tmp_file_path)
try:
if data_type not in DataType.values():
raise Exception('this data type not found')
handle_fuc = self.PDFKIT_FUC[data_type]
handle_fuc(data, tmp_file_path, options=self.default_pdfkit_options,
configuration=self.pdfkit_conf)
if out_path:
copy(tmp_file_path, out_path)
with open(tmp_file_path, 'rb') as f:
return True, BytesIO(f.read()), 'ok'
except Exception as e:
logging.error(e)
logging.exception(e)
return False, None, str(e)
finally:
if os.path.exists(tmp_file_path) and not self.debug:
os.remove(tmp_file_path)
def __del__(self):
try:
if os.path.exists(self.tmp_path) and not self.debug:
os.removedirs(self.tmp_path)
except Exception as e:
logging.error(e)
logging.exception(e)
if __name__ == "__main__":
kit = HtmlToImgPdfKit()
kit.to_img('card.html', DataType.FILE, options={
'crop-w': '526'}, out_path='current.jpg')
kit.to_pdf('card.html', DataType.FILE, out_path='current.pdf')
|
c6a18251a0e1c98a88a0fe34e919fbdd6b78e435
|
edc1134436a79ca883a0d25f3c8dfffc4235c514
|
/pyro/ops/gamma_gaussian.py
|
edffe0755e413b6551cf8277c8041f9a1ebec44a
|
[
"Apache-2.0"
] |
permissive
|
pyro-ppl/pyro
|
2283d8ca528fc090c724a3a6e0f344e505ebbf77
|
0e82cad30f75b892a07e6c9a5f9e24f2cb5d0d81
|
refs/heads/dev
| 2023-08-18T00:35:28.014919
| 2023-08-06T21:01:36
| 2023-08-06T21:01:36
| 94,506,832
| 3,647
| 606
|
Apache-2.0
| 2023-09-14T13:52:14
| 2017-06-16T05:03:47
|
Python
|
UTF-8
|
Python
| false
| false
| 17,941
|
py
|
gamma_gaussian.py
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import torch
from torch.distributions.utils import lazy_property
from torch.nn.functional import pad
from pyro.distributions.multivariate_studentt import MultivariateStudentT
from pyro.distributions.torch import MultivariateNormal
from pyro.distributions.util import broadcast_shape
from pyro.ops.tensor_utils import precision_to_scale_tril
class Gamma:
"""
Non-normalized Gamma distribution.
Gamma(concentration, rate) ~ (concentration - 1) * log(s) - rate * s
"""
def __init__(self, log_normalizer, concentration, rate):
self.log_normalizer = log_normalizer
self.concentration = concentration
self.rate = rate
def log_density(self, s):
"""
Non-normalized log probability of Gamma distribution.
This is mainly used for testing.
"""
return self.log_normalizer + (self.concentration - 1) * s.log() - self.rate * s
def logsumexp(self):
"""
Integrates out the latent variable.
"""
return (
self.log_normalizer
+ torch.lgamma(self.concentration)
- self.concentration * self.rate.log()
)
class GammaGaussian:
"""
Non-normalized GammaGaussian distribution:
GammaGaussian(x, s) ~ (concentration + 0.5 * dim - 1) * log(s)
- rate * s - s * 0.5 * info_vec.T @ inv(precision) @ info_vec)
- s * 0.5 * x.T @ precision @ x + s * x.T @ info_vec,
which will be reparameterized as
GammaGaussian(x, s) =: alpha * log(s) + s * (-0.5 * x.T @ precision @ x + x.T @ info_vec - beta).
The `s` variable plays the role of a mixing variable such that
p(x | s) ~ Gaussian(s * info_vec, s * precision).
Conditioned on `s`, this represents an arbitrary semidefinite quadratic function,
which can be interpreted as a rank-deficient Gaussian distribution.
The precision matrix may have zero eigenvalues, thus it may be impossible
to work directly with the covariance matrix.
:param torch.Tensor log_normalizer: a normalization constant, which is mainly used to keep
track of normalization terms during contractions.
:param torch.Tensor info_vec: information vector, which is a scaled version of the mean
``info_vec = precision @ mean``. We use this represention to make gaussian contraction
fast and stable.
:param torch.Tensor precision: precision matrix of this gaussian.
:param torch.Tensor alpha: reparameterized shape parameter of the marginal Gamma distribution of
`s`. The shape parameter Gamma.concentration is reparameterized by:
alpha = Gamma.concentration + 0.5 * dim - 1
:param torch.Tensor beta: reparameterized rate parameter of the marginal Gamma distribution of
`s`. The rate parameter Gamma.rate is reparameterized by:
beta = Gamma.rate + 0.5 * info_vec.T @ inv(precision) @ info_vec
"""
def __init__(self, log_normalizer, info_vec, precision, alpha, beta):
# NB: using info_vec instead of mean to deal with rank-deficient problem
assert info_vec.dim() >= 1
assert precision.dim() >= 2
assert precision.shape[-2:] == info_vec.shape[-1:] * 2
self.log_normalizer = log_normalizer
self.info_vec = info_vec
self.precision = precision
self.alpha = alpha
self.beta = beta
def dim(self):
return self.info_vec.size(-1)
@lazy_property
def batch_shape(self):
return broadcast_shape(
self.log_normalizer.shape,
self.info_vec.shape[:-1],
self.precision.shape[:-2],
self.alpha.shape,
self.beta.shape,
)
def expand(self, batch_shape):
n = self.dim()
log_normalizer = self.log_normalizer.expand(batch_shape)
info_vec = self.info_vec.expand(batch_shape + (n,))
precision = self.precision.expand(batch_shape + (n, n))
alpha = self.alpha.expand(batch_shape)
beta = self.beta.expand(batch_shape)
return GammaGaussian(log_normalizer, info_vec, precision, alpha, beta)
def reshape(self, batch_shape):
n = self.dim()
log_normalizer = self.log_normalizer.reshape(batch_shape)
info_vec = self.info_vec.reshape(batch_shape + (n,))
precision = self.precision.reshape(batch_shape + (n, n))
alpha = self.alpha.reshape(batch_shape)
beta = self.beta.reshape(batch_shape)
return GammaGaussian(log_normalizer, info_vec, precision, alpha, beta)
def __getitem__(self, index):
"""
Index into the batch_shape of a GammaGaussian.
"""
assert isinstance(index, tuple)
log_normalizer = self.log_normalizer[index]
info_vec = self.info_vec[index + (slice(None),)]
precision = self.precision[index + (slice(None), slice(None))]
alpha = self.alpha[index]
beta = self.beta[index]
return GammaGaussian(log_normalizer, info_vec, precision, alpha, beta)
@staticmethod
def cat(parts, dim=0):
"""
Concatenate a list of GammaGaussians along a given batch dimension.
"""
if dim < 0:
dim += len(parts[0].batch_shape)
args = [
torch.cat([getattr(g, attr) for g in parts], dim=dim)
for attr in ["log_normalizer", "info_vec", "precision", "alpha", "beta"]
]
return GammaGaussian(*args)
def event_pad(self, left=0, right=0):
"""
Pad along event dimension.
"""
lr = (left, right)
info_vec = pad(self.info_vec, lr)
precision = pad(self.precision, lr + lr)
# no change for alpha, beta because we are working with reparameterized version;
# otherwise, we need to change alpha (similar for beta) to
# keep the term (alpha + 0.5 * dim - 1) * log(s) constant
# (note that `dim` has been changed due to padding)
return GammaGaussian(
self.log_normalizer, info_vec, precision, self.alpha, self.beta
)
def event_permute(self, perm):
"""
Permute along event dimension.
"""
assert isinstance(perm, torch.Tensor)
assert perm.shape == (self.dim(),)
info_vec = self.info_vec[..., perm]
precision = self.precision[..., perm][..., perm, :]
return GammaGaussian(
self.log_normalizer, info_vec, precision, self.alpha, self.beta
)
def __add__(self, other):
"""
Adds two GammaGaussians in log-density space.
"""
assert isinstance(other, GammaGaussian)
assert self.dim() == other.dim()
return GammaGaussian(
self.log_normalizer + other.log_normalizer,
self.info_vec + other.info_vec,
self.precision + other.precision,
self.alpha + other.alpha,
self.beta + other.beta,
)
def log_density(self, value, s):
"""
Evaluate the log density of this GammaGaussian at a point value::
alpha * log(s) + s * (-0.5 * value.T @ precision @ value + value.T @ info_vec - beta) + log_normalizer
This is mainly used for testing.
"""
if value.size(-1) == 0:
batch_shape = broadcast_shape(value.shape[:-1], s.shape, self.batch_shape)
return (
self.alpha * s.log()
- self.beta * s
+ self.log_normalizer.expand(batch_shape)
)
result = (-0.5) * self.precision.matmul(value.unsqueeze(-1)).squeeze(-1)
result = result + self.info_vec
result = (value * result).sum(-1)
return self.alpha * s.log() + (result - self.beta) * s + self.log_normalizer
def condition(self, value):
"""
Condition the Gaussian component on a trailing subset of its state.
This should satisfy::
g.condition(y).dim() == g.dim() - y.size(-1)
Note that since this is a non-normalized Gaussian, we include the
density of ``y`` in the result. Thus :meth:`condition` is similar to a
``functools.partial`` binding of arguments::
left = x[..., :n]
right = x[..., n:]
g.log_density(x, s) == g.condition(right).log_density(left, s)
"""
assert isinstance(value, torch.Tensor)
assert value.size(-1) <= self.info_vec.size(-1)
n = self.dim() - value.size(-1)
info_a = self.info_vec[..., :n]
info_b = self.info_vec[..., n:]
P_aa = self.precision[..., :n, :n]
P_ab = self.precision[..., :n, n:]
P_bb = self.precision[..., n:, n:]
b = value
info_vec = info_a - P_ab.matmul(b.unsqueeze(-1)).squeeze(-1)
precision = P_aa
log_normalizer = self.log_normalizer
alpha = self.alpha
beta = (
self.beta
+ 0.5 * P_bb.matmul(b.unsqueeze(-1)).squeeze(-1).mul(b).sum(-1)
- b.mul(info_b).sum(-1)
)
return GammaGaussian(log_normalizer, info_vec, precision, alpha, beta)
def marginalize(self, left=0, right=0):
"""
Marginalizing out variables on either side of the event dimension::
g.marginalize(left=n).event_logsumexp() = g.event_logsumexp()
g.marginalize(right=n).event_logsumexp() = g.event_logsumexp()
and for data ``x``:
g.condition(x).event_logsumexp().log_density(s)
= g.marginalize(left=g.dim() - x.size(-1)).log_density(x, s)
"""
# NB: the easiest way to think about this process is to consider GammaGaussian
# as a Gaussian with precision and info_vec scaled by `s`.
if left == 0 and right == 0:
return self
if left > 0 and right > 0:
raise NotImplementedError
n = self.dim()
n_b = left + right
a = slice(left, n - right) # preserved
b = slice(None, left) if left else slice(n - right, None)
P_aa = self.precision[..., a, a]
P_ba = self.precision[..., b, a]
P_bb = self.precision[..., b, b]
P_b = torch.linalg.cholesky(P_bb)
P_a = torch.linalg.solve_triangular(P_b, P_ba, upper=False)
P_at = P_a.transpose(-1, -2)
precision = P_aa - P_at.matmul(P_a)
info_a = self.info_vec[..., a]
info_b = self.info_vec[..., b]
b_tmp = torch.linalg.solve_triangular(P_b, info_b.unsqueeze(-1), upper=False)
info_vec = info_a
if n_b < n:
info_vec = info_vec - P_at.matmul(b_tmp).squeeze(-1)
alpha = self.alpha - 0.5 * n_b
beta = self.beta - 0.5 * b_tmp.squeeze(-1).pow(2).sum(-1)
log_normalizer = (
self.log_normalizer
+ 0.5 * n_b * math.log(2 * math.pi)
- P_b.diagonal(dim1=-2, dim2=-1).log().sum(-1)
)
return GammaGaussian(log_normalizer, info_vec, precision, alpha, beta)
def compound(self):
"""
Integrates out the latent multiplier `s`. The result will be a
Student-T distribution.
"""
concentration = self.alpha - 0.5 * self.dim() + 1
scale_tril = precision_to_scale_tril(self.precision)
scale_tril_t_u = (
scale_tril.transpose(-1, -2).matmul(self.info_vec.unsqueeze(-1)).squeeze(-1)
)
u_Pinv_u = scale_tril_t_u.pow(2).sum(-1)
rate = self.beta - 0.5 * u_Pinv_u
loc = scale_tril.matmul(scale_tril_t_u.unsqueeze(-1)).squeeze(-1)
scale_tril = scale_tril * (rate / concentration).sqrt().unsqueeze(-1).unsqueeze(
-1
)
return MultivariateStudentT(2 * concentration, loc, scale_tril)
def event_logsumexp(self):
"""
Integrates out all latent state (i.e. operating on event dimensions) of Gaussian component.
"""
n = self.dim()
chol_P = torch.linalg.cholesky(self.precision)
chol_P_u = torch.linalg.solve_triangular(
chol_P, self.info_vec.unsqueeze(-1), upper=False
).squeeze(-1)
u_P_u = chol_P_u.pow(2).sum(-1)
# considering GammaGaussian as a Gaussian with precision = s * precision, info_vec = s * info_vec,
# marginalize x variable, we get
# logsumexp(s) = alpha * log(s) - s * beta + 0.5 n * log(2 pi) + \
# 0.5 s * uPu - 0.5 * log|P| - 0.5 n * log(s)
# use the original parameterization of Gamma, we get
# logsumexp(s) = (concentration - 1) * log(s) - s * rate + 0.5 n * log(2 pi) - 0.5 * |P|
# Note that `(concentration - 1) * log(s) - s * rate` is unnormalized log_prob of
# Gamma(concentration, rate)
concentration = self.alpha - 0.5 * n + 1
rate = self.beta - 0.5 * u_P_u
log_normalizer_tmp = 0.5 * n * math.log(2 * math.pi) - chol_P.diagonal(
dim1=-2, dim2=-1
).log().sum(-1)
return Gamma(self.log_normalizer + log_normalizer_tmp, concentration, rate)
def gamma_and_mvn_to_gamma_gaussian(gamma, mvn):
"""
Convert a pair of Gamma and Gaussian distributions to a GammaGaussian.
p(x | s) ~ Gaussian(s * info_vec, s * precision)
p(s) ~ Gamma(alpha, beta)
p(x, s) ~ GammaGaussian(info_vec, precision, alpha, beta)
:param ~pyro.distributions.Gamma gamma: the mixing distribution
:param ~pyro.distributions.MultivariateNormal mvn: the conditional distribution
when mixing is 1.
:return: A GammaGaussian object.
:rtype: ~pyro.ops.gaussian_gamma.GammaGaussian
"""
assert isinstance(gamma, torch.distributions.Gamma)
assert isinstance(mvn, torch.distributions.MultivariateNormal)
n = mvn.loc.size(-1)
precision = mvn.precision_matrix
info_vec = precision.matmul(mvn.loc.unsqueeze(-1)).squeeze(-1)
# reparameterized version of concentration, rate in GaussianGamma
alpha = gamma.concentration + (0.5 * n - 1)
beta = gamma.rate + 0.5 * (info_vec * mvn.loc).sum(-1)
gaussian_logsumexp = 0.5 * n * math.log(2 * math.pi) + mvn.scale_tril.diagonal(
dim1=-2, dim2=-1
).log().sum(-1)
log_normalizer = -Gamma(
gaussian_logsumexp, gamma.concentration, gamma.rate
).logsumexp()
return GammaGaussian(log_normalizer, info_vec, precision, alpha, beta)
def scale_mvn(mvn, s):
"""
Transforms a MVN distribution to another MVN distribution according to
scale(mvn(loc, precision), s) := mvn(loc, s * precision).
"""
assert isinstance(mvn, torch.distributions.MultivariateNormal)
assert isinstance(s, torch.Tensor)
batch_shape = broadcast_shape(s.shape, mvn.batch_shape)
loc = mvn.loc.expand(batch_shape + (-1,))
# XXX: we might use mvn._unbroadcasted_scale_tril here
scale_tril = mvn.scale_tril / s.sqrt().unsqueeze(-1).unsqueeze(-1)
return MultivariateNormal(loc, scale_tril=scale_tril)
def matrix_and_mvn_to_gamma_gaussian(matrix, mvn):
"""
Convert a noisy affine function to a GammaGaussian, where the noise precision
is scaled by an auxiliary variable `s`. The noisy affine function (conditioned
on `s`) is defined as::
y = x @ matrix + scale(mvn, s).sample()
:param ~torch.Tensor matrix: A matrix with rightmost shape ``(x_dim, y_dim)``.
:param ~pyro.distributions.MultivariateNormal mvn: A multivariate normal distribution.
:return: A GammaGaussian with broadcasted batch shape and ``.dim() == x_dim + y_dim``.
:rtype: ~pyro.ops.gaussian_gamma.GammaGaussian
"""
assert isinstance(mvn, torch.distributions.MultivariateNormal)
assert isinstance(matrix, torch.Tensor)
x_dim, y_dim = matrix.shape[-2:]
assert mvn.event_shape == (y_dim,)
batch_shape = broadcast_shape(matrix.shape[:-2], mvn.batch_shape)
matrix = matrix.expand(batch_shape + (x_dim, y_dim))
mvn = mvn.expand(batch_shape)
P_yy = mvn.precision_matrix
neg_P_xy = matrix.matmul(P_yy)
P_xy = -neg_P_xy
P_yx = P_xy.transpose(-1, -2)
P_xx = neg_P_xy.matmul(matrix.transpose(-1, -2))
precision = torch.cat(
[torch.cat([P_xx, P_xy], -1), torch.cat([P_yx, P_yy], -1)], -2
)
info_y = P_yy.matmul(mvn.loc.unsqueeze(-1)).squeeze(-1)
info_x = -matrix.matmul(info_y.unsqueeze(-1)).squeeze(-1)
info_vec = torch.cat([info_x, info_y], -1)
log_normalizer = -0.5 * y_dim * math.log(2 * math.pi) - mvn.scale_tril.diagonal(
dim1=-2, dim2=-1
).log().sum(-1)
beta = 0.5 * (info_y * mvn.loc).sum(-1)
alpha = beta.new_full(beta.shape, 0.5 * y_dim)
result = GammaGaussian(log_normalizer, info_vec, precision, alpha, beta)
assert result.batch_shape == batch_shape
assert result.dim() == x_dim + y_dim
return result
def gamma_gaussian_tensordot(x, y, dims=0):
"""
Computes the integral over two GammaGaussians:
`(x @ y)((a,c),s) = log(integral(exp(x((a,b),s) + y((b,c),s)), b))`,
where `x` is a gaussian over variables (a,b), `y` is a gaussian over variables
(b,c), (a,b,c) can each be sets of zero or more variables, and `dims` is the size of b.
:param x: a GammaGaussian instance
:param y: a GammaGaussian instance
:param dims: number of variables to contract
"""
assert isinstance(x, GammaGaussian)
assert isinstance(y, GammaGaussian)
na = x.dim() - dims
nb = dims
nc = y.dim() - dims
assert na >= 0
assert nb >= 0
assert nc >= 0
device = x.info_vec.device
perm = torch.cat(
[
torch.arange(na, device=device),
torch.arange(x.dim(), x.dim() + nc, device=device),
torch.arange(na, x.dim(), device=device),
]
)
return (
(x.event_pad(right=nc) + y.event_pad(left=na))
.event_permute(perm)
.marginalize(right=nb)
)
|
b2316bf7ed13240d48406d0237f8c34a82246bd6
|
8b5d61f17ab2e4c158270cf6dda79f9a47870df1
|
/sknetwork/linalg/__init__.py
|
7dd00bda7ce8c28682737d60c5d0ba145ac6c02a
|
[
"BSD-3-Clause"
] |
permissive
|
sknetwork-team/scikit-network
|
55a5ecbbbd2dfc78095aa74f3953c770357cadbb
|
95cec38d56b086b95616d2f1d13a9b98c6c8b534
|
refs/heads/master
| 2023-09-03T21:56:42.345214
| 2023-05-22T14:12:57
| 2023-05-22T14:12:57
| 135,287,970
| 581
| 73
|
NOASSERTION
| 2023-07-21T05:42:25
| 2018-05-29T11:45:42
|
Python
|
UTF-8
|
Python
| false
| false
| 536
|
py
|
__init__.py
|
"""Module of linear algebra."""
from sknetwork.linalg.basics import safe_sparse_dot
from sknetwork.linalg.eig_solver import EigSolver, LanczosEig
from sknetwork.linalg.laplacian import get_laplacian
from sknetwork.linalg.normalization import diagonal_pseudo_inverse, get_norms, normalize
from sknetwork.linalg.operators import Regularizer, Laplacian, Normalizer, CoNeighbor
from sknetwork.linalg.polynome import Polynome
from sknetwork.linalg.sparse_lowrank import SparseLR
from sknetwork.linalg.svd_solver import SVDSolver, LanczosSVD
|
f263a316f84985a5abcd45d3c512dafe44b21dae
|
e75c5412063078c9ea3e7c71a8dc7a2026083a34
|
/astropy/cosmology/_io/tests/test_yaml.py
|
95b2816181e7053fe51f38b55c5182da8c1893cb
|
[
"BSD-3-Clause"
] |
permissive
|
astropy/astropy
|
d6636f24acdf2b18fc3e413ca0c4b1162a63dd41
|
53188c39a23c33b72df5850ec59e31886f84e29d
|
refs/heads/main
| 2023-08-27T18:16:44.061375
| 2023-08-27T16:07:35
| 2023-08-27T16:07:35
| 2,081,289
| 3,922
| 1,935
|
BSD-3-Clause
| 2023-09-14T09:23:26
| 2011-07-21T01:33:49
|
Python
|
UTF-8
|
Python
| false
| false
| 6,910
|
py
|
test_yaml.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import astropy.units as u
from astropy.cosmology import Cosmology, FlatLambdaCDM, Planck18
from astropy.cosmology import units as cu
from astropy.cosmology._io.yaml import (
from_yaml,
to_yaml,
yaml_constructor,
yaml_representer,
)
from astropy.io.misc.yaml import AstropyDumper, dump, load
from .base import ToFromDirectTestBase, ToFromTestMixinBase
##############################################################################
# Test Serializer
def test_yaml_representer():
"""Test :func:`~astropy.cosmology._io.yaml.yaml_representer`."""
# test function `representer`
representer = yaml_representer("!astropy.cosmology.flrw.LambdaCDM")
assert callable(representer)
# test the normal method of dumping to YAML
yml = dump(Planck18)
assert isinstance(yml, str)
assert yml.startswith("!astropy.cosmology.flrw.FlatLambdaCDM")
def test_yaml_constructor():
"""Test :func:`~astropy.cosmology._io.yaml.yaml_constructor`."""
# test function `constructor`
constructor = yaml_constructor(FlatLambdaCDM)
assert callable(constructor)
# it's too hard to manually construct a node, so we only test dump/load
# this is also a good round-trip test
yml = dump(Planck18)
with u.add_enabled_units(cu): # needed for redshift units
cosmo = load(yml)
assert isinstance(cosmo, FlatLambdaCDM)
assert cosmo == Planck18
assert cosmo.meta == Planck18.meta
##############################################################################
# Test Unified I/O
class ToFromYAMLTestMixin(ToFromTestMixinBase):
"""
Tests for a Cosmology[To/From]Format with ``format="yaml"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmologyToFromFormat`` or ``TestCosmology`` for examples.
"""
@pytest.fixture
def xfail_if_not_registered_with_yaml(self, cosmo_cls):
"""
YAML I/O only works on registered classes. So the thing to check is
if this class is registered. If not, :func:`pytest.xfail` this test.
Some of the tests define custom cosmologies. They are not registered.
"""
if cosmo_cls not in AstropyDumper.yaml_representers:
pytest.xfail(
f"Cosmologies of type {cosmo_cls} are not registered with YAML."
)
# ===============================================================
def test_to_yaml(self, cosmo_cls, to_format, xfail_if_not_registered_with_yaml):
"""Test cosmology -> YAML."""
yml = to_format("yaml")
assert isinstance(yml, str) # test type
assert yml.startswith("!" + ".".join(cosmo_cls.__module__.split(".")[:2]))
# e.g. "astropy.cosmology" for built-in cosmologies, or "__main__" for the test
# SubCosmology class defined in ``astropy.cosmology.tests.test_core``.
def test_from_yaml_default(
self, cosmo, to_format, from_format, xfail_if_not_registered_with_yaml
):
"""Test cosmology -> YAML -> cosmology."""
yml = to_format("yaml")
got = from_format(yml, format="yaml") # (cannot autoidentify)
assert got.name == cosmo.name
assert got.meta == cosmo.meta
# it won't error if everything matches up
got = from_format(yml, format="yaml")
assert got == cosmo
assert got.meta == cosmo.meta
# auto-identify test moved because it doesn't work.
# see test_from_yaml_autoidentify
def test_from_yaml_autoidentify(
self, cosmo, to_format, from_format, xfail_if_not_registered_with_yaml
):
"""As a non-path string, it does NOT auto-identifies 'format'.
TODO! this says there should be different types of I/O registries.
not just hacking object conversion on top of file I/O.
"""
assert self.can_autodentify("yaml") is False
# Showing the specific error. The str is interpreted as a file location
# but is too long a file name.
yml = to_format("yaml")
with pytest.raises((FileNotFoundError, OSError)): # OSError in Windows
from_format(yml)
# # TODO! this is a challenging test to write. It's also unlikely to happen.
# def test_fromformat_subclass_partial_info_yaml(self, cosmo):
# """
# Test writing from an instance and reading from that class.
# This works with missing information.
# """
# -----------------------------------------------------
@pytest.mark.parametrize("format", [True, False, None])
def test_is_equivalent_to_yaml(
self, cosmo, to_format, format, xfail_if_not_registered_with_yaml
):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a YAML string. YAML can't be identified without "format" specified.
"""
obj = to_format("yaml")
assert not isinstance(obj, Cosmology)
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is False
def test_is_equivalent_to_yaml_specify_format(
self, cosmo, to_format, xfail_if_not_registered_with_yaml
):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
Same as ``test_is_equivalent_to_yaml`` but with ``format="yaml"``.
"""
assert cosmo.is_equivalent(to_format("yaml"), format="yaml") is True
class TestToFromYAML(ToFromDirectTestBase, ToFromYAMLTestMixin):
"""
Directly test ``to/from_yaml``.
These are not public API and are discouraged from use, in favor of
``Cosmology.to/from_format(..., format="yaml")``, but should be tested
regardless b/c 3rd party packages might use these in their Cosmology I/O.
Also, it's cheap to test.
"""
def setup_class(self):
"""Set up fixtures to use ``to/from_yaml``, not the I/O abstractions."""
self.functions = {"to": to_yaml, "from": from_yaml}
@pytest.fixture(scope="class", autouse=True)
def setup(self):
"""
Setup and teardown for tests.
This overrides from super because `ToFromDirectTestBase` adds a custom
Cosmology ``CosmologyWithKwargs`` that is not registered with YAML.
"""
yield # run tests
def test_from_yaml_autoidentify(self, cosmo, to_format, from_format):
"""
If directly calling the function there's no auto-identification.
So this overrides the test from `ToFromYAMLTestMixin`
"""
|
2be251e82fa318f80af49533683d5db93e800b16
|
6c88b2cea38b2cead9e2402d46a8fc64949c53df
|
/tests/integration/secret_outputs/python/__main__.py
|
7681abd20df31c9ba1684300b0d85a5ce3aaabb3
|
[
"Apache-2.0"
] |
permissive
|
pulumi/pulumi
|
a9b36c32f0cdd445c22f9ca64ce26c9ae5147575
|
46e2753d02d46a1c077930eeccdfe6738f46c0d2
|
refs/heads/master
| 2023-08-19T10:25:49.849189
| 2023-08-16T04:59:07
| 2023-08-16T04:59:07
| 72,477,752
| 17,553
| 1,082
|
Apache-2.0
| 2023-09-14T21:05:35
| 2016-10-31T21:02:47
|
Go
|
UTF-8
|
Python
| false
| false
| 995
|
py
|
__main__.py
|
# Copyright 2016-2020, Pulumi Corporation. All rights reserved.
from pulumi import export, Input, Output, ResourceOptions
from pulumi.dynamic import Resource, ResourceProvider, CreateResult
class Provider(ResourceProvider):
def create(self, props):
return CreateResult("1", {"prefix": props["prefix"]})
class R(Resource):
prefix: Output[str]
def __init__(self, name, prefix: Input[str], opts: ResourceOptions = None):
super().__init__(Provider(), name, {"prefix": prefix}, opts)
without_secret = R("without_secret", prefix=Output.from_input("it's a secret to everybody"))
with_secret = R("with_secret", prefix=Output.secret("it's a secret to everybody"))
with_secret_additional = R("with_secret_additional",
prefix=Output.from_input("it's a secret to everybody"),
opts=ResourceOptions(additional_secret_outputs=["prefix"]))
export("withoutSecret", without_secret)
export("withSecret", with_secret)
export("withSecretAdditional", with_secret_additional)
|
066d0a9430cdabbb1d6f3f9b7c683a09e705ba94
|
e5e0d729f082999a9bec142611365b00f7bfc684
|
/tensorflow/python/framework/constant_op_test.py
|
da0fb64fde6f2b01b9f46f34e503bcb95a5480fd
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/tensorflow
|
ed6294098c7354dfc9f09631fc5ae22dbc278138
|
7cbba04a2ee16d21309eefad5be6585183a2d5a9
|
refs/heads/r1.15.5+nv23.03
| 2023-08-16T22:25:18.037979
| 2023-08-03T22:09:23
| 2023-08-03T22:09:23
| 263,748,045
| 763
| 117
|
Apache-2.0
| 2023-07-03T15:45:19
| 2020-05-13T21:34:32
|
C++
|
UTF-8
|
Python
| false
| false
| 1,832
|
py
|
constant_op_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.constant_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class ConstantOpTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(
dtypes.bfloat16,
dtypes.complex128,
dtypes.complex64,
dtypes.double,
dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.half,
dtypes.int16,
dtypes.int32,
dtypes.int64,
dtypes.int8,
dtypes.qint16,
dtypes.qint32,
dtypes.qint8,
dtypes.quint16,
dtypes.quint8,
dtypes.uint16,
dtypes.uint32,
dtypes.uint64,
dtypes.uint8,
)
def test_convert_string_to_number(self, dtype):
with self.assertRaises(TypeError):
constant_op.constant("hello", dtype)
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
78881c0901a5359c429b26dbfbb1426acfb8fe09
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/tune/tests/test_tuner_restore.py
|
57d74d0f8d77fbf8fea1f54164f0c05334bccea9
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 36,730
|
py
|
test_tuner_restore.py
|
import json
import logging
import os
import shutil
import time
import unittest
import pytest
import ray
from ray import train, tune
from ray.train import (
Checkpoint,
CheckpointConfig,
FailureConfig,
RunConfig,
ScalingConfig,
)
from ray.air._internal.uri_utils import URI
from ray.train.data_parallel_trainer import DataParallelTrainer
from ray.train._internal.storage import (
get_fs_and_path,
_download_from_fs_path,
_upload_to_fs_path,
)
from ray.tune import Callback, Trainable
from ray.tune.execution.experiment_state import _find_newest_experiment_checkpoint
from ray.tune.experiment import Trial
from ray.tune.result_grid import ResultGrid
from ray.tune.schedulers.async_hyperband import ASHAScheduler
from ray.tune.search.optuna import OptunaSearch
from ray.tune.tune_config import TuneConfig
from ray.tune.tuner import Tuner
from ray.train.tests.util import create_dict_checkpoint, load_dict_checkpoint
@pytest.fixture
def propagate_logs():
# Ensure that logs are propagated to ancestor handles. This is required if using the
# caplog fixture with Ray's logging.
# NOTE: This only enables log propagation in the driver process, not the workers!
logger = logging.getLogger("ray")
logger.propagate = True
yield
logger.propagate = False
@pytest.fixture
def ray_start_2_cpus():
address_info = ray.init(num_cpus=2, configure_logging=False)
yield address_info
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.fixture
def ray_shutdown():
yield
ray.shutdown()
@pytest.fixture(scope="module")
def ray_start_4_cpus():
address_info = ray.init(num_cpus=4, configure_logging=False)
yield address_info
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.fixture
def chdir_tmpdir(tmpdir):
old_cwd = os.getcwd()
os.chdir(tmpdir)
yield tmpdir
os.chdir(old_cwd)
def _dummy_train_fn(config):
return 1
def _dummy_train_fn_with_report(config):
train.report({"score": 1})
def _train_fn_sometimes_failing(config):
# Fails if failing is set and marker file exists.
# Hangs if hanging is set and marker file exists.
failing, hanging = config["failing_hanging"]
checkpoint = train.get_checkpoint()
if checkpoint:
checkpoint_dict = load_dict_checkpoint(checkpoint)
state = {"it": checkpoint_dict["it"]}
else:
state = {"it": 0}
for i in range(config.get("num_epochs", 1)):
state["it"] += 1
with create_dict_checkpoint(state) as checkpoint:
train.report(state, checkpoint=checkpoint)
# We fail after reporting num_epochs checkpoints.
if failing and failing.exists():
raise RuntimeError("I am failing")
if hanging and hanging.exists():
time.sleep(60)
state["it"] += 1
with create_dict_checkpoint(state) as checkpoint:
train.report(state, checkpoint=checkpoint)
class _FailOnStats(Callback):
"""Fail when at least num_trials exist and num_finished have finished."""
def __init__(self, num_trials: int, num_finished: int = 0, delay: int = 1):
self.num_trials = num_trials
self.num_finished = num_finished
self.delay = delay
self.fail_at = None
def on_step_begin(self, iteration: int, trials: list, **info):
if self.fail_at and iteration >= self.fail_at:
print(
"Actually failing after delay:",
[(t.status, t.last_result.get("it")) for t in trials],
)
raise RuntimeError("Failing")
if len(trials) < self.num_trials:
return
if (
len([t for t in trials if t.status in [Trial.TERMINATED, Trial.ERROR]])
>= self.num_finished
):
self.fail_at = iteration + self.delay
print(
f"Triggering fail in {self.delay} iterations:",
[(t.status, t.last_result.get("it")) for t in trials],
)
else:
print("Not failing:", [(t.status, t.last_result.get("it")) for t in trials])
class MockData:
def __init__(self):
import numpy as np
self.data = np.random.rand((2 * 1024 * 1024))
def test_tuner_restore_num_trials(ray_start_2_cpus, tmpdir):
"""Number of trials after restoring a finished run should be the same"""
tuner = Tuner(
_dummy_train_fn,
tune_config=TuneConfig(num_samples=4, metric="_metric", mode="max"),
run_config=RunConfig(
name="test_tuner_restore_num_trials", storage_path=str(tmpdir)
),
)
results = tuner.fit()
assert len(results) == 4
assert results.get_best_result().metrics["_metric"] == 1
del tuner
tuner = Tuner.restore(
str(tmpdir / "test_tuner_restore_num_trials"), trainable=_dummy_train_fn
)
# Check restored results
results = tuner.get_results()
assert len(results) == 4
assert results.get_best_result().metrics["_metric"] == 1
results = tuner.fit()
assert len(results) == 4
assert results.get_best_result().metrics["_metric"] == 1
def test_tuner_restore_resume_errored(ray_start_2_cpus, tmpdir):
"""Resuming errored trials should pick up from previous state"""
fail_marker = tmpdir / "fail_marker"
fail_marker.write_text("", encoding="utf-8")
failing_hanging = [
(None, None),
(fail_marker, None),
(None, None),
(fail_marker, None),
]
tuner = Tuner(
_train_fn_sometimes_failing,
tune_config=TuneConfig(
num_samples=1,
),
run_config=RunConfig(
name="test_tuner_restore_resume_errored", storage_path=str(tmpdir)
),
param_space={
"id": tune.grid_search([0, 1, 2, 3]),
# Second and third trial fail
"failing_hanging": tune.sample_from(
lambda config: failing_hanging[config["id"]]
),
},
)
results = tuner.fit()
assert len(results) == 4
assert len(results.errors) == 2
ordered_results = sorted(results, key=lambda r: r.config["id"])
# Second and third trial are at iter 1 because they failed after first report
assert [r.metrics["it"] for r in ordered_results] == [2, 1, 2, 1]
del tuner
fail_marker.remove(ignore_errors=True)
tuner = Tuner.restore(
str(tmpdir / "test_tuner_restore_resume_errored"),
trainable=_train_fn_sometimes_failing,
resume_errored=True,
)
# Check restored results
results = tuner.get_results()
assert len(results) == 4
assert len(results.errors) == 2
# Second and third trial are at iter 1 because they failed after first report
ordered_results = sorted(results, key=lambda r: r.config["id"])
assert [r.metrics["it"] for r in ordered_results] == [2, 1, 2, 1]
# Get new results
results = tuner.fit()
assert len(results) == 4
assert len(results.errors) == 0
ordered_results = sorted(results, key=lambda r: r.config["id"])
# Since the errored trials are being resumed from previous state and then report
# two more times, we should observe 3 here.
assert [r.metrics["it"] for r in ordered_results] == [2, 3, 2, 3]
def test_tuner_restore_restart_errored(ray_start_2_cpus, tmpdir):
"""Restarting errored trials should re-start from scratch"""
fail_marker = tmpdir / "fail_marker"
fail_marker.write_text("", encoding="utf-8")
failing_hanging = [
(None, None),
(fail_marker, None),
(None, None),
(fail_marker, None),
]
tuner = Tuner(
_train_fn_sometimes_failing,
tune_config=TuneConfig(num_samples=1),
run_config=RunConfig(
name="test_tuner_restore_restart_errored",
storage_path=str(tmpdir),
),
param_space={
"id": tune.grid_search([0, 1, 2, 3]),
# Second and third trial fail
"failing_hanging": tune.sample_from(
lambda config: failing_hanging[config["id"]]
),
},
)
results = tuner.fit()
assert len(results) == 4
assert len(results.errors) == 2
ordered_results = sorted(results, key=lambda r: r.config["id"])
assert [r.metrics["it"] for r in ordered_results] == [2, 1, 2, 1]
del tuner
fail_marker.remove(ignore_errors=True)
tuner = Tuner.restore(
str(tmpdir / "test_tuner_restore_restart_errored"),
trainable=_train_fn_sometimes_failing,
restart_errored=True,
)
# Check restored results
results = tuner.get_results()
assert len(results) == 4
assert len(results.errors) == 2
ordered_results = sorted(results, key=lambda r: r.config["id"])
assert [r.metrics["it"] for r in ordered_results] == [2, 1, 2, 1]
# Get new results
results = tuner.fit()
assert len(results) == 4
assert len(results.errors) == 0
ordered_results = sorted(results, key=lambda r: r.config["id"])
# Since the errored trials are being restarted from scratch, they should report 2
assert [r.metrics["it"] for r in ordered_results] == [2, 2, 2, 2]
def test_tuner_resume_unfinished(ray_start_2_cpus, tmpdir, monkeypatch):
"""Resuming unfinished trials should pick up existing state"""
monkeypatch.setenv("TUNE_GLOBAL_CHECKPOINT_S", "0")
# TODO(justinvyu): Setting storage_path to this tempdir causes this test to fail.
# This is because the error raised by the driver callback doesn't let the
# experiment sync happen (from ~/ray_results -> tmpdir). This would also
# be the case for real cloud syncing.
monkeypatch.setenv("RAY_AIR_LOCAL_CACHE_DIR", str(tmpdir))
fail_marker = tmpdir / "fail_marker"
fail_marker.write_text("", encoding="utf-8")
hang_marker = tmpdir / "hang_marker"
hang_marker.write_text("", encoding="utf-8")
tuner = Tuner(
_train_fn_sometimes_failing,
tune_config=TuneConfig(num_samples=1),
run_config=RunConfig(
name="test_tuner_resume_unfinished",
storage_path=str(tmpdir),
failure_config=FailureConfig(fail_fast=False),
callbacks=[_FailOnStats(num_trials=4, num_finished=2, delay=1)],
),
param_space={
# First trial succeeds, second hangs, third fails, fourth hangs
"failing_hanging": tune.grid_search(
[
(None, None),
(None, hang_marker),
(fail_marker, None),
(None, hang_marker),
]
),
},
)
# Catch the FailOnStats error
with pytest.raises(RuntimeError):
tuner.fit()
# After this run we have the following trial states (status, metric):
# [('TERMINATED', 2), ('RUNNING', 1), ('ERROR', 1), ('PENDING', None)]
# Restarting without hanging/failing should lead to the results:
# [2, 3, 1, 2], because:
# the TERMINATED trial is finished (state = 2),
# the RUNNING trial is continued (and picks up from state = 1 for 2 iterations),
# the ERROR trial is not continued (remains at 1 and errored)
# and the PENDING trial has not state, yet.
del tuner
fail_marker.remove(ignore_errors=True)
hang_marker.remove(ignore_errors=True)
tuner = Tuner.restore(
str(tmpdir / "test_tuner_resume_unfinished"),
trainable=_train_fn_sometimes_failing,
)
tuner._local_tuner._run_config.callbacks = None
results = tuner.fit()
assert len(results) == 4
assert len(results.errors) == 1
assert sorted([r.metrics["it"] for r in results]) == sorted([2, 3, 1, 2])
def test_tuner_resume_errored_only(ray_start_2_cpus, tmpdir, monkeypatch):
"""Not resuming unfinished trials (but only errored and pending) should work"""
monkeypatch.setenv("TUNE_GLOBAL_CHECKPOINT_S", "0")
# TODO(justinvyu): Same as above.
monkeypatch.setenv("RAY_AIR_LOCAL_CACHE_DIR", str(tmpdir))
fail_marker = tmpdir / "fail_marker"
fail_marker.write_text("", encoding="utf-8")
hang_marker = tmpdir / "hang_marker"
hang_marker.write_text("", encoding="utf-8")
tuner = Tuner(
_train_fn_sometimes_failing,
tune_config=TuneConfig(num_samples=1),
run_config=RunConfig(
name="test_tuner_resume_errored_only",
failure_config=FailureConfig(fail_fast=False),
callbacks=[_FailOnStats(num_trials=4, num_finished=2, delay=1)],
),
param_space={
# First trial succeeds, second hangs, third fails, fourth hangs.
"failing_hanging": tune.grid_search(
[
(None, None),
(None, hang_marker),
(fail_marker, None),
(None, hang_marker),
]
),
},
)
# Catch the FailOnStats error
with pytest.raises(RuntimeError):
tuner.fit()
# After this run we have the following trial states (status, metric):
# [('TERMINATED', 2), ('RUNNING', 1), ('ERROR', 1), ('PENDING', None)]
# Restarting without continuing existing trials should lead to the results
# [2, 1, 3, 0], because
# the TERMINATED trial is finished (state = 2),
# the RUNNING trial is not continued (marked as terminated),
# the ERROR trial is not continued (remains at 1 and errored)
# and the PENDING trial is not continued (marked as terminated).
del tuner
fail_marker.remove(ignore_errors=True)
hang_marker.remove(ignore_errors=True)
tuner = Tuner.restore(
str(tmpdir / "test_tuner_resume_errored_only"),
trainable=_train_fn_sometimes_failing,
resume_unfinished=False,
resume_errored=True,
)
tuner._local_tuner._run_config.callbacks = None
results = tuner.fit()
assert len(results) == 4
assert len(results.errors) == 0
assert sorted([r.metrics.get("it", 0) for r in results]) == sorted([2, 1, 3, 0])
def _test_tuner_restore_from_cloud(
tmpdir, configure_storage_path, storage_path, monkeypatch
):
"""Check that restoring Tuner() objects from cloud storage works"""
monkeypatch.setenv("RAY_AIR_LOCAL_CACHE_DIR", str(tmpdir / "ray_results"))
tuner = Tuner(
_dummy_train_fn,
run_config=RunConfig(name="exp_dir", storage_path=configure_storage_path),
)
tuner.fit()
check_path = tmpdir / "check_save"
fs, fs_path = get_fs_and_path(storage_path)
_download_from_fs_path(fs=fs, fs_path=fs_path, local_path=str(check_path))
remote_contents = os.listdir(check_path / "exp_dir")
assert "tuner.pkl" in remote_contents
prev_cp = _find_newest_experiment_checkpoint(str(check_path / "exp_dir"))
prev_lstat = os.lstat(prev_cp)
(tmpdir / "ray_results").remove(ignore_errors=True)
tuner2 = Tuner.restore(
str(URI(storage_path) / "exp_dir"), trainable=_dummy_train_fn
)
results = tuner2.fit()
assert results[0].metrics["_metric"] == 1
local_contents = os.listdir(tmpdir / "ray_results" / "exp_dir")
assert "tuner.pkl" in local_contents
after_cp = _find_newest_experiment_checkpoint(
str(tmpdir / "ray_results" / "exp_dir")
)
after_lstat = os.lstat(after_cp)
# Experiment checkpoint was updated
assert os.path.basename(prev_cp) != os.path.basename(after_cp)
# Old experiment checkpoint still exists in dir
assert os.path.basename(prev_cp) in local_contents
# Contents changed
assert prev_lstat.st_size != after_lstat.st_size
# Overwriting should work
tuner3 = Tuner.restore(
str(URI(storage_path) / "exp_dir"), trainable=_dummy_train_fn
)
tuner3.fit()
def test_tuner_restore_from_cloud_manual_path(
ray_start_2_cpus, tmpdir, mock_s3_bucket_uri, monkeypatch
):
_test_tuner_restore_from_cloud(
tmpdir,
configure_storage_path=mock_s3_bucket_uri,
storage_path=mock_s3_bucket_uri,
monkeypatch=monkeypatch,
)
@pytest.mark.skip("Hanging due to some problem with ray storage.")
def test_tuner_restore_from_cloud_ray_storage(
ray_shutdown, tmpdir, mock_s3_bucket_uri, monkeypatch
):
ray.init(num_cpus=2, configure_logging=False, storage=mock_s3_bucket_uri)
_test_tuner_restore_from_cloud(
tmpdir / "local",
configure_storage_path=None,
storage_path=mock_s3_bucket_uri,
monkeypatch=monkeypatch,
)
# TODO(justinvyu): [fallback_to_latest]
@pytest.mark.skip("Fallback to latest checkpoint is not implemented.")
@pytest.mark.parametrize(
"storage_path",
[None, "/tmp/ray_results"],
)
def test_tuner_restore_latest_available_checkpoint(
ray_start_2_cpus, monkeypatch, tmpdir, storage_path
):
"""Resuming errored trials should pick up from previous state"""
@pytest.mark.parametrize("retry_num", [0, 2])
def test_restore_retry(ray_start_2_cpus, tmpdir, monkeypatch, retry_num):
"""Test retrying restore on a trial level by setting `TUNE_RESTORE_RETRY_NUM`."""
class MockTrainable(Trainable):
"""A trainable that can generate one failure during training and
another `config["retry_num_to_fail"]` times during restoring."""
def setup(self, config):
self.idx = 0
self.tag_file_path = config["tag_file_path"]
self.retry_num_to_fail = config.get("retry_num_to_fail", 2)
self._is_restored = False
def step(self):
time.sleep(1)
if self.idx == 0 and self._is_restored:
raise RuntimeError(
"===== Restored trial cannot start from scratch ====="
)
elif self.idx == 2 and not self._is_restored:
raise RuntimeError("===== First run fails at idx=2 =====")
self.idx += 1
return {"score": self.idx}
def save_checkpoint(self, checkpoint_dir):
path = os.path.join(checkpoint_dir, "checkpoint")
with open(path, "w") as f:
f.write(json.dumps({"idx": self.idx}))
def load_checkpoint(self, checkpoint_dir):
self._is_restored = True
with open(self.tag_file_path, "r") as f:
retried_num = json.loads(f.read())["retried_num"]
with open(self.tag_file_path, "w") as f:
f.write(json.dumps({"retried_num": retried_num + 1}))
if retried_num < self.retry_num_to_fail:
raise RuntimeError(f"===== Failing restore #{retried_num + 1} =====")
with open(os.path.join(checkpoint_dir, "checkpoint"), "r") as f:
self.idx = json.loads(f.read())["idx"]
# Set environment variable just for this test
with unittest.mock.patch.dict(
os.environ, {"TUNE_RESTORE_RETRY_NUM": str(retry_num)}
):
tag_file = os.path.join(tmpdir, "tag")
# set up tag file
with open(tag_file, "w") as f:
f.write(json.dumps({"retried_num": 0}))
tuner = Tuner(
MockTrainable,
run_config=RunConfig(
name="tryout_restore",
stop={"training_iteration": 5},
storage_path=str(tmpdir),
failure_config=FailureConfig(max_failures=1),
checkpoint_config=CheckpointConfig(checkpoint_frequency=1),
),
param_space={"tag_file_path": tag_file},
)
results = tuner.fit()
[result] = list(results)
if retry_num > 0:
assert result.metrics["score"] == 5
else:
assert result.metrics["score"] == 2
def test_restore_overwrite_trainable(ray_start_2_cpus, tmpdir):
"""Test validation for trainable compatibility, when re-specifying a trainable
on restore."""
def train_func_1(config):
data = {"data": config["data"]}
with create_dict_checkpoint(data) as checkpoint:
train.report(data, checkpoint=checkpoint)
raise RuntimeError("Failing!")
tuner = Tuner(
train_func_1,
run_config=RunConfig(name="overwrite_trainable", storage_path=str(tmpdir)),
param_space={"data": 1},
)
tuner.fit()
del tuner
# Can't overwrite with a different Trainable type
with pytest.raises(ValueError):
tuner = Tuner.restore(
str(tmpdir / "overwrite_trainable"),
trainable="__fake",
resume_errored=True,
)
# Can't overwrite with a different Trainable name
def train_func_2(config):
raise RuntimeError("Should not run...")
with pytest.raises(ValueError):
tuner = Tuner.restore(
str(tmpdir / "overwrite_trainable"),
trainable=train_func_2,
resume_errored=True,
)
# Can technically change trainable code (not recommended!)
def train_func_1(config):
checkpoint = train.get_checkpoint()
assert checkpoint and load_dict_checkpoint(checkpoint)["data"] == config["data"]
tuner = Tuner.restore(
str(tmpdir / "overwrite_trainable"),
trainable=train_func_1,
resume_errored=True,
)
results = tuner.fit()
assert not results.errors
@pytest.mark.parametrize("use_function_trainable", [True, False])
def test_restore_with_parameters(ray_start_2_cpus, tmp_path, use_function_trainable):
"""Tests Tuner restoration for a `tune.with_parameters` wrapped trainable."""
if not use_function_trainable:
# TODO(justinvyu): [class_trainable]
pytest.skip("Class trainable not supported yet if FF is enabled.")
def train_func(config, data_str=None, data_obj=None):
assert data_str is not None and data_obj is not None
fail_marker = config.pop("fail_marker", None)
config["failing_hanging"] = (fail_marker, None)
_train_fn_sometimes_failing(config)
class FailingTrainable(Trainable):
def setup(self, config, data_str=None, data_obj=None):
assert data_str is not None and data_obj is not None
self.idx = 0
self.fail_marker = config.get("fail_marker", None)
def step(self):
if self.fail_marker and self.fail_marker.exists():
raise RuntimeError("==== Run is failing ====")
self.idx += 1
return {"score": self.idx}
def save_checkpoint(self, checkpoint_dir):
return {"idx": self.idx}
def load_checkpoint(self, checkpoint_dict):
self.idx = checkpoint_dict["idx"]
trainable = train_func if use_function_trainable else FailingTrainable
def create_trainable_with_params():
data = MockData()
trainable_with_params = tune.with_parameters(
trainable, data_str="data", data_obj=data
)
return trainable_with_params
exp_name = "restore_with_params"
fail_marker = tmp_path / "fail_marker"
fail_marker.write_text("", encoding="utf-8")
tuner = Tuner(
create_trainable_with_params(),
run_config=RunConfig(
name=exp_name,
storage_path=str(tmp_path),
stop={"training_iteration": 3},
failure_config=FailureConfig(max_failures=0),
checkpoint_config=CheckpointConfig(
checkpoint_frequency=0 if use_function_trainable else 1
),
),
param_space={"fail_marker": fail_marker},
)
results = tuner.fit()
assert results.errors
fail_marker.unlink()
tuner = Tuner.restore(
str(tmp_path / exp_name),
trainable=create_trainable_with_params(),
resume_errored=True,
)
results = tuner.fit()
assert not results.errors
# TODO(justinvyu): [handle_moved_storage_path]
@pytest.mark.skip("Restoring from a moved storage path is not supported yet.")
@pytest.mark.parametrize("use_tune_run", [True, False])
def test_tuner_restore_from_moved_experiment_path(
ray_start_2_cpus, tmp_path, use_tune_run
):
"""Check that restoring a Tuner from a moved experiment directory works."""
# Create a fail_marker dummy file that causes the first Tune run to fail and
# the second run to succeed
fail_marker = tmp_path / "fail_marker"
fail_marker.write_text("", encoding="utf-8")
old_local_dir = tmp_path / "ray_results"
old_exp_name = "exp_dir"
new_local_dir = tmp_path / "new_ray_results"
new_exp_name = "new_exp_dir"
# Initial training run (that errors out in the middle)
num_to_keep = 2
tuner = Tuner(
_train_fn_sometimes_failing,
tune_config=TuneConfig(
num_samples=1,
),
run_config=RunConfig(
name=old_exp_name,
storage_path=str(old_local_dir),
checkpoint_config=CheckpointConfig(num_to_keep=num_to_keep),
),
param_space={
"failing_hanging": (fail_marker, None),
},
)
results = tuner.fit()
assert len(results.errors) == 1
training_iteration = results[0].metrics["training_iteration"]
assert (
training_iteration == 1
), f"Should only have 1 train.report before erroring, got {training_iteration}"
# Move experiment from `tmp_path/ray_results/exp_dir`
# to `tmp_path/moved_ray_results/new_exp_dir`, changing both `local_dir` and
# the experiment `name`
shutil.move(str(old_local_dir), str(new_local_dir))
os.rename(str(new_local_dir / old_exp_name), str(new_local_dir / new_exp_name))
del tuner
# Remove fail_marker so that the restored Tuner doesn't error again
fail_marker.unlink()
# Restore from moved experiment directory location, and launch resumed training
if use_tune_run:
analysis = tune.run(
_train_fn_sometimes_failing,
name=new_exp_name,
storage_path=str(new_local_dir),
resume="AUTO+ERRORED",
)
results = ResultGrid(analysis)
else:
restore_path = str(new_local_dir / new_exp_name)
tuner = Tuner.restore(
restore_path, trainable=_train_fn_sometimes_failing, resume_errored=True
)
results = tuner.fit()
assert len(results.errors) == 0
# Check that we restored iter=1, then made 2 calls to train.report -> iter=3
training_iteration = results[0].metrics["training_iteration"]
assert training_iteration == 3, training_iteration
# Make sure that checkpoints are loaded properly
assert results[0].checkpoint
assert len(results[0].best_checkpoints) == num_to_keep
checkpoint_dirs = [
path
for path in os.listdir(results[0].log_dir)
if path.startswith("checkpoint_")
]
assert sorted(checkpoint_dirs) == ["checkpoint_000001", "checkpoint_000002"]
# Make sure that we did not create a logdir in the old location
assert not old_local_dir.exists()
# TODO(justinvyu): [handle_moved_storage_path]
@pytest.mark.skip("Restoring from a moved storage path is not supported yet.")
def test_tuner_restore_from_moved_cloud_uri(ray_start_2_cpus, tmp_path):
"""Test that restoring an experiment that was moved to a new remote URI
resumes and continues saving new results at that URI."""
def test_custom_searcher_and_scheduler_restore(ray_start_2_cpus, tmpdir):
"""Check that a restored Tune experiment uses the original searcher/scheduler."""
fail_marker = tmpdir / "fail_marker"
fail_marker.write_text("", encoding="utf-8")
class MockSearcher(OptunaSearch):
def on_trial_result(self, trial_id: str, result: dict):
super().on_trial_result(trial_id, result)
if not hasattr(self, "_test_result_counter"):
self._test_result_counter = 0
self._test_result_counter += 1
class MockScheduler(ASHAScheduler):
def on_trial_result(self, runner, trial, result):
decision = super().on_trial_result(runner, trial, result)
if not hasattr(self, "_test_result_counter"):
self._test_result_counter = 0
self._test_result_counter += 1
return decision
tuner = Tuner(
_train_fn_sometimes_failing,
run_config=RunConfig(storage_path=str(tmpdir), name="exp_name"),
tune_config=TuneConfig(
search_alg=MockSearcher(),
scheduler=MockScheduler(),
metric="it",
mode="max",
),
param_space={"a": tune.uniform(0, 1), "failing_hanging": (fail_marker, None)},
)
tuner.fit()
del tuner
fail_marker.remove(ignore_errors=True)
tuner = Tuner.restore(
str(tmpdir / "exp_name"),
trainable=_train_fn_sometimes_failing,
resume_errored=True,
)
tuner.fit()
searcher = tuner._local_tuner._tune_config.search_alg
scheduler = tuner._local_tuner._tune_config.scheduler
assert isinstance(searcher, MockSearcher)
assert isinstance(scheduler, MockScheduler)
# Searcher state should get loaded correctly
# Total of 3 reported results (1 from before failure, 2 after restore)
assert searcher._test_result_counter == 3
# Make sure that the restored scheduler is at least used
assert (
hasattr(scheduler, "_test_result_counter")
and scheduler._test_result_counter > 0
)
@pytest.mark.parametrize("use_air_trainer", [True, False])
def test_checkpoints_saved_after_resume(ray_start_2_cpus, tmp_path, use_air_trainer):
"""Checkpoints saved after experiment restore should pick up at the correct
iteration and should not overwrite the checkpoints from the original run.
Old checkpoints should still be deleted if the total number of checkpoints
(old + new) exceeds `num_to_keep`.
In this test, `num_to_keep=4`:
- Initial run saves checkpoint_000000 and checkpoint_000001
- Restored run saves checkpoint_000002, checkpoint_000003, and checkpoint_000004
- Checkpoint 000000 should be deleted.
"""
def get_checkpoints(experiment_dir):
checkpoint_dirs = [
path
for path in os.listdir(experiment_dir)
if path.startswith("checkpoint_")
]
sorted_checkpoint_dirs = sorted(checkpoint_dirs)
checkpoints = [
Checkpoint.from_directory(os.path.join(experiment_dir, d))
for d in sorted_checkpoint_dirs
]
return sorted_checkpoint_dirs, checkpoints
fail_marker = tmp_path / "fail_marker"
fail_marker.write_text("", encoding="utf-8")
trainable = (
DataParallelTrainer(
_train_fn_sometimes_failing, scaling_config=ScalingConfig(num_workers=1)
)
if use_air_trainer
else _train_fn_sometimes_failing
)
param_space = {
"failing_hanging": (fail_marker, None),
"num_epochs": 2,
}
if use_air_trainer:
param_space = {"train_loop_config": param_space}
num_to_keep = 4
tuner = Tuner(
trainable,
tune_config=TuneConfig(num_samples=1),
run_config=RunConfig(
name="exp_name",
storage_path=str(tmp_path),
checkpoint_config=CheckpointConfig(num_to_keep=num_to_keep),
),
param_space=param_space,
)
results = tuner.fit()
training_iteration = results[0].metrics["training_iteration"]
assert (
training_iteration == 2
), f"Should be at 2 iters before erroring, got {training_iteration}"
# Initial run saves the first 2 checkpoints
checkpoint_dirs, checkpoints = get_checkpoints(results[0].path)
assert checkpoint_dirs == ["checkpoint_000000", "checkpoint_000001"]
assert [load_dict_checkpoint(ckpt)["it"] for ckpt in checkpoints] == [1, 2]
fail_marker.unlink()
tuner = Tuner.restore(
str(tmp_path / "exp_name"), trainable=trainable, resume_errored=True
)
results = tuner.fit()
assert len(results.errors) == 0
training_iteration = results[0].metrics["training_iteration"]
# Restored at it=2, reported 3 more times -> should have it=5
assert training_iteration == 5
# Restored run saves the 3 more checkpoints, and first checkpoint should be deleted
checkpoint_dirs, checkpoints = get_checkpoints(results[0].path)
assert checkpoint_dirs == [f"checkpoint_00000{i}" for i in range(1, 5)]
assert [load_dict_checkpoint(ckpt)["it"] for ckpt in checkpoints] == [2, 3, 4, 5]
def test_tuner_can_restore(tmp_path, monkeypatch):
"""Make sure that `can_restore` detects an existing experiment at a
path and only returns True if it's at the experiment dir root.
"""
monkeypatch.setenv("RAY_AIR_LOCAL_CACHE_DIR", str(tmp_path))
name = "exp_name"
Tuner(lambda _: print("dummy"), run_config=RunConfig(name=name))
fs, fs_path = get_fs_and_path("mock:///bucket/exp_name")
_upload_to_fs_path(local_path=str(tmp_path / name), fs=fs, fs_path=fs_path)
assert Tuner.can_restore(tmp_path / name)
assert not Tuner.can_restore(tmp_path)
assert not Tuner.can_restore(tmp_path / name / "other")
assert Tuner.can_restore("/bucket/exp_name", storage_filesystem=fs)
assert not Tuner.can_restore("/bucket", storage_filesystem=fs)
assert not Tuner.can_restore("/bucket/exp_name/other", storage_filesystem=fs)
def testParamSpaceOverwriteValidation(ray_start_4_cpus, tmp_path):
"""Check that validation on restore fails if we try adding or removing
hyperparameters to the param_space."""
name = "test_param_space_valid"
param_space = {"a": 1, "b": {"c": tune.choice([0, 1])}, "d": tune.uniform(0, 1)}
tuner = Tuner(
lambda _: print("dummy"),
param_space=param_space,
run_config=RunConfig(storage_path=str(tmp_path), name=name),
)
tuner.fit()
bad_param_spaces = [
{},
{"a": 1, "b": {}, "d": 2},
{"a": 1, "b": {"c": 2, "e": 3}, "d": 4},
]
for bad_param_space in bad_param_spaces:
with pytest.raises(ValueError):
Tuner.restore(
str(tmp_path / name),
lambda _: print("dummy"),
param_space=bad_param_space,
)
# Should work with the original param space
Tuner.restore(
str(tmp_path / name),
trainable=lambda _: print("dummy"),
param_space=param_space,
)
def testParamSpaceOverwrite(ray_start_4_cpus, tmp_path, monkeypatch):
"""Test that overwriting param space on restore propagates new refs to existing
trials and newly generated trials."""
# Limit the number of generated trial configs -- so restore tests
# newly generated trials.
monkeypatch.setenv("TUNE_MAX_PENDING_TRIALS_PG", "1")
class FakeDataset:
def __init__(self, name):
self.name = name
def __repr__(self):
return f"<FakeDataset {self.name}>"
def train_fn(config):
raise RuntimeError("Failing!")
param_space = {
"test": tune.grid_search(
[FakeDataset("1"), FakeDataset("2"), FakeDataset("3")]
),
"test2": tune.grid_search(
[
FakeDataset("4"),
FakeDataset("5"),
FakeDataset("6"),
FakeDataset("7"),
]
),
}
tuner = Tuner(
train_fn,
param_space=param_space,
tune_config=TuneConfig(num_samples=1),
run_config=RunConfig(
storage_path=str(tmp_path),
name="param_space_overwrite",
callbacks=[_FailOnStats(num_trials=4, num_finished=2)],
),
)
with pytest.raises(RuntimeError):
tuner.fit()
# Just suppress the error this time with a new trainable
def train_fn(config):
pass
param_space = {
"test": tune.grid_search(
[FakeDataset("8"), FakeDataset("9"), FakeDataset("10")]
),
"test2": tune.grid_search(
[
FakeDataset("11"),
FakeDataset("12"),
FakeDataset("13"),
FakeDataset("14"),
]
),
}
tuner = Tuner.restore(
str(tmp_path / "param_space_overwrite"),
trainable=train_fn,
param_space=param_space,
resume_errored=True,
)
tuner._local_tuner._run_config.callbacks = None
result_grid = tuner.fit()
assert not result_grid.errors
assert len(result_grid) == 12
for r in result_grid:
# Make sure that test and test2 are updated.
assert r.config["test"].name in ["8", "9", "10"]
assert r.config["test2"].name in ["11", "12", "13", "14"]
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
118e902b08fbd909c7212add7f62966f756429c8
|
e8ba4105967ade74b55e7943586eb533a16f908f
|
/plex_mpv_shim/player.py
|
3a45c82a8c18b26462b562c479d276f6c37cbd4e
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
iwalton3/plex-mpv-shim
|
54d97470688bf0fdf49873ce670990c889f22b30
|
a1545861bc3bd9ced71620ff0529202898f0c3cd
|
refs/heads/master
| 2023-06-11T08:21:34.513868
| 2023-06-01T00:38:46
| 2023-06-01T00:38:46
| 202,855,346
| 330
| 30
|
MIT
| 2023-08-19T21:23:11
| 2019-08-17T08:26:12
|
Python
|
UTF-8
|
Python
| false
| false
| 23,030
|
py
|
player.py
|
import logging
import os
import sys
import requests
import urllib.parse
from threading import RLock, Lock
from queue import Queue
from collections import OrderedDict
from . import conffile
from .utils import synchronous, Timer, get_resource
from .conf import settings
from .menu import OSDMenu
from .media import MediaType
log = logging.getLogger('player')
mpv_log = logging.getLogger('mpv')
python_mpv_available=True
is_using_ext_mpv=False
if not settings.mpv_ext:
try:
import mpv
log.info("Using libmpv1 playback backend.")
except OSError:
log.warning("Could not find libmpv1.")
python_mpv_available=False
if settings.mpv_ext or not python_mpv_available:
import python_mpv_jsonipc as mpv
log.info("Using external mpv playback backend.")
is_using_ext_mpv=True
APP_NAME = 'plex-mpv-shim'
SUBTITLE_POS = {
"top": 0,
"bottom": 100,
"middle": 80,
}
mpv_log_levels = {
"fatal": mpv_log.error,
"error": mpv_log.error,
"warn": mpv_log.warning,
"info": mpv_log.info
}
def mpv_log_handler(level, prefix, text):
if level in mpv_log_levels:
mpv_log_levels[level]("{0}: {1}".format(prefix, text))
else:
mpv_log.debug("{0}: {1}".format(prefix, text))
win_utils = None
if sys.platform.startswith("win32") or sys.platform.startswith("cygwin"):
try:
from . import win_utils
except ModuleNotFoundError:
log.warning("win_utils is not available.")
# Q: What is with the put_task call?
# A: Some calls to python-mpv require event processing.
# put_task is used to deal with the events originating from
# the event thread, which would cause deadlock if they run there.
class PlayerManager(object):
"""
Manages the relationship between a ``Player`` instance and a ``Media``
item. This is designed to be used as a singleton via the ``playerManager``
instance in this module. All communication between a caller and either the
current ``player`` or ``media`` instance should be done through this class
for thread safety reasons as all methods that access the ``player`` or
``media`` are thread safe.
"""
def __init__(self):
mpv_options = OrderedDict()
self._media_item = None
self._lock = RLock()
self._finished_lock = Lock()
self.last_update = Timer()
self.__part = 1
self.timeline_trigger = None
self.action_trigger = None
self.external_subtitles = {}
self.external_subtitles_rev = {}
self.url = None
self.evt_queue = Queue()
self.is_in_intro = False
self.is_in_credits = False
self.intro_has_triggered = False
self.credits_has_triggered = False
if is_using_ext_mpv:
mpv_options.update(
{
"start_mpv": settings.mpv_ext_start,
"ipc_socket": settings.mpv_ext_ipc,
"mpv_location": settings.mpv_ext_path,
"player-operation-mode": "cplayer"
}
)
if settings.menu_mouse:
if is_using_ext_mpv:
mpv_options["script"] = get_resource("mouse.lua")
else:
mpv_options["scripts"] = get_resource("mouse.lua")
if not (settings.mpv_ext and settings.mpv_ext_no_ovr):
mpv_options["include"] = conffile.get(APP_NAME, "mpv.conf", True)
mpv_options["input_conf"] = conffile.get(APP_NAME, "input.conf", True)
self._player = mpv.MPV(input_default_bindings=True, input_vo_keyboard=True,
input_media_keys=True, log_handler=mpv_log_handler,
loglevel=settings.mpv_log_level, **mpv_options)
self.menu = OSDMenu(self)
if hasattr(self._player, 'osc'):
self._player.osc = settings.enable_osc
else:
log.warning("This mpv version doesn't support on-screen controller.")
# Wrapper for on_key_press that ignores None.
def keypress(key):
def wrapper(func):
if key is not None:
self._player.on_key_press(key)(func)
return func
return wrapper
@self._player.on_key_press('CLOSE_WIN')
@self._player.on_key_press('STOP')
@keypress(settings.kb_stop)
def handle_stop():
self.stop()
self.timeline_handle()
@keypress(settings.kb_prev)
def handle_prev():
self.put_task(self.play_prev)
@keypress(settings.kb_next)
def handle_next():
self.put_task(self.play_next)
@self._player.on_key_press('PREV')
@self._player.on_key_press('XF86_PREV')
def handle_media_prev():
if settings.media_key_seek:
self._player.command("seek", -15)
else:
self.put_task(self.play_prev)
@self._player.on_key_press('NEXT')
@self._player.on_key_press('XF86_NEXT')
def handle_media_next():
if settings.media_key_seek:
if self.is_in_intro:
self.skip_intro()
elif self.is_in_credits:
self.skip_credits()
else:
self._player.command("seek", 30)
else:
self.put_task(self.play_next)
@keypress(settings.kb_watched)
def handle_watched():
self.put_task(self.watched_skip)
@keypress(settings.kb_unwatched)
def handle_unwatched():
self.put_task(self.unwatched_quit)
@keypress(settings.kb_menu)
def menu_open():
if not self.menu.is_menu_shown:
self.menu.show_menu()
else:
self.menu.hide_menu()
@keypress(settings.kb_menu_esc)
def menu_back():
if self.menu.is_menu_shown:
self.menu.menu_action('back')
else:
self._player.command('set', 'fullscreen', 'no')
@keypress(settings.kb_menu_ok)
def menu_ok():
self.menu.menu_action('ok')
@keypress(settings.kb_menu_left)
def menu_left():
if self.menu.is_menu_shown:
self.menu.menu_action('left')
else:
self._player.command("seek", settings.seek_left)
@keypress(settings.kb_menu_right)
def menu_right():
if self.menu.is_menu_shown:
self.menu.menu_action('right')
else:
if self.is_in_intro:
self.skip_intro()
elif self.is_in_credits:
self.skip_credits()
else:
self._player.command("seek", settings.seek_right)
@keypress(settings.kb_menu_up)
def menu_up():
if self.menu.is_menu_shown:
self.menu.menu_action('up')
else:
if self.is_in_intro:
self.skip_intro()
elif self.is_in_credits:
self.skip_credits()
else:
self._player.command("seek", settings.seek_up)
@keypress(settings.kb_menu_down)
def menu_down():
if self.menu.is_menu_shown:
self.menu.menu_action('down')
else:
self._player.command("seek", settings.seek_down)
@keypress(settings.kb_pause)
def handle_pause():
if self.menu.is_menu_shown:
self.menu.menu_action('ok')
else:
self.toggle_pause()
# This gives you an interactive python debugger prompt.
@keypress(settings.kb_debug)
def handle_debug():
import pdb
pdb.set_trace()
# Fires between episodes.
@self._player.property_observer('eof-reached')
def handle_end(_name, reached_end):
if self._media_item and reached_end:
has_lock = self._finished_lock.acquire(False)
self.put_task(self.finished_callback, has_lock)
# Fires at the end.
@self._player.property_observer("playback-abort")
def handle_end_idle(_name, value):
if self._media_item and value:
has_lock = self._finished_lock.acquire(False)
self.put_task(self.finished_callback, has_lock)
@self._player.event_callback('client-message')
def handle_client_message(event):
try:
# Python-MPV 1.0 uses a class/struct combination now
if hasattr(event, "as_dict"):
event = event.as_dict()
if 'event' in event:
event['event'] = event['event'].decode('utf-8')
if 'args' in event:
event['args'] = [d.decode('utf-8') for d in event['args']]
if "event_id" in event:
args = event["event"]["args"]
else:
args = event["args"]
if len(args) == 0:
return
if args[0] == "shim-menu-select":
# Apparently this can happen...
if args[1] == "inf":
return
self.menu.mouse_select(int(args[1]))
elif args[0] == "shim-menu-click":
self.menu.menu_action("ok")
except Exception:
log.warning("Error when processing client-message.", exc_info=True)
# Put a task to the event queue.
# This ensures the task executes outside
# of an event handler, which causes a crash.
def put_task(self, func, *args):
self.evt_queue.put([func, args])
if self.action_trigger:
self.action_trigger.set()
# Trigger the timeline to update all
# clients immediately.
def timeline_handle(self):
if self.timeline_trigger:
self.timeline_trigger.set()
def skip_marker(self, end_point):
if self._media_item.media_type == MediaType.VIDEO:
self._player.playback_time = end_point
self.timeline_handle()
return True
return False
def skip_intro(self):
end_point = self._media_item.intro_end
if self.skip_marker(end_point):
self.is_in_intro = False
def skip_credits(self):
end_point = self._media_item.credits_end
if self.skip_marker(end_point):
self.is_in_credits = False
def check_intro_or_credits(self):
if ((settings.skip_intro_always or settings.skip_intro_prompt)
and self._media_item is not None and self._media_item.media_type == MediaType.VIDEO and self._media_item.intro_start is not None
and self._player.playback_time is not None
and self._player.playback_time > self._media_item.intro_start
and self._player.playback_time < self._media_item.intro_end):
if not self.is_in_intro:
if settings.skip_intro_always and not self.intro_has_triggered:
self.intro_has_triggered = True
self.skip_intro()
self._player.show_text("Skipped Intro", 3000, 1)
elif settings.skip_intro_prompt:
self._player.show_text("Seek to Skip Intro", 3000, 1)
self.is_in_intro = True
else:
self.is_in_intro = False
# TODO de-duplicate this code in some way - it's ugly
if ((settings.skip_credits_always or settings.skip_credits_prompt)
and self._media_item is not None and self._media_item.media_type == MediaType.VIDEO and self._media_item.credits_start is not None
and self._player.playback_time is not None
and self._player.playback_time > self._media_item.credits_start
and self._player.playback_time < self._media_item.credits_end):
if not self.is_in_credits:
if settings.skip_credits_always and not self.credits_has_triggered:
self.credits_has_triggered = True
self.skip_credits()
self._player.show_text("Skipped Credits", 3000, 1)
elif settings.skip_credits_prompt:
self._player.show_text("Seek to Skip Credits", 3000, 1)
self.is_in_credits = True
else:
self.is_in_credits = False
@synchronous('_lock')
def update(self):
self.check_intro_or_credits()
while not self.evt_queue.empty():
func, args = self.evt_queue.get()
func(*args)
if self._media_item and not self._player.playback_abort:
if not self.is_paused():
self.last_update.restart()
def play(self, media_item, offset=0):
url = media_item.get_playback_url()
if not url:
log.error("PlayerManager::play no URL found")
return
self._play_media(media_item, url, offset)
@synchronous('_lock')
def _play_media(self, media_item, url, offset=0):
self.url = url
self.menu.hide_menu()
if settings.log_decisions:
log.debug("Playing: {0}".format(url))
self._player.play(self.url)
self._player.wait_for_property("duration")
if settings.fullscreen:
self._player.fs = True
self._player.force_media_title = media_item.get_proper_title()
self._media_item = media_item
self.is_in_intro = False
self.is_in_credits = False
self.intro_has_triggered = False
self.credits_has_triggered = False
self.update_subtitle_visuals(False)
self.upd_player_hide()
self.external_subtitles = {}
self.external_subtitles_rev = {}
if win_utils:
win_utils.raise_mpv()
if offset > 0:
self._player.playback_time = offset
if media_item.media_type == MediaType.VIDEO and not media_item.is_transcode:
audio_idx = media_item.get_audio_idx()
if audio_idx is not None:
log.debug("PlayerManager::play selecting audio stream index=%s" % audio_idx)
self._player.audio = audio_idx
sub_idx = media_item.get_subtitle_idx()
xsub_id = media_item.get_external_sub_id()
if sub_idx is not None:
log.debug("PlayerManager::play selecting subtitle index=%s" % sub_idx)
self._player.sub = sub_idx
elif xsub_id is not None:
log.debug("PlayerManager::play selecting external subtitle id=%s" % xsub_id)
self.load_external_sub(xsub_id)
else:
self._player.sub = 'no'
self._player.pause = False
self.timeline_handle()
if self._finished_lock.locked():
self._finished_lock.release()
def exec_stop_cmd(self):
if settings.stop_cmd:
os.system(settings.stop_cmd)
@synchronous('_lock')
def stop(self, playend=False):
if not playend and (not self._media_item or self._player.playback_abort):
self.exec_stop_cmd()
return
if not playend:
log.debug("PlayerManager::stop stopping playback of %s" % self._media_item)
if self._media_item.media_type == MediaType.VIDEO:
self._media_item.terminate_transcode()
self._media_item = None
self._player.command("stop")
self._player.pause = False
self.timeline_handle()
if not playend:
self.exec_stop_cmd()
@synchronous('_lock')
def get_volume(self, percent=False):
if self._player:
if not percent:
return self._player.volume / 100
return self._player.volume
@synchronous('_lock')
def toggle_pause(self):
if not self._player.playback_abort:
self._player.pause = not self._player.pause
self.timeline_handle()
@synchronous('_lock')
def seek(self, offset):
"""
Seek to ``offset`` seconds
"""
if not self._player.playback_abort:
if self.is_in_intro and offset > self._player.playback_time:
self.skip_intro()
elif self.is_in_credits and offset > self._player.playback_time:
self.skip_credits()
else:
self._player.playback_time = offset
self.timeline_handle()
@synchronous('_lock')
def set_volume(self, pct):
if not self._player.playback_abort:
self._player.volume = pct
self.timeline_handle()
@synchronous('_lock')
def get_state(self):
if self._player.playback_abort:
return "stopped"
if self._player.pause:
return "paused"
return "playing"
@synchronous('_lock')
def is_paused(self):
if not self._player.playback_abort:
return self._player.pause
return False
@synchronous('_lock')
def finished_callback(self, has_lock):
if not self._media_item:
return
self._media_item.set_played()
if self._media_item.is_multipart():
if has_lock:
log.debug("PlayerManager::finished_callback media is multi-part, checking for next part")
# Try to select the next part
next_part = self.__part+1
if self._media_item.select_part(next_part):
self.__part = next_part
log.debug("PlayerManager::finished_callback starting next part")
self.play(self._media_item)
else:
log.debug("PlayerManager::finished_callback No lock, skipping...")
elif self._media_item.parent.has_next and settings.auto_play:
if has_lock:
log.debug("PlayerManager::finished_callback starting next episode")
self.play(self._media_item.parent.get_next().get_media_item(0))
else:
log.debug("PlayerManager::finished_callback No lock, skipping...")
else:
if settings.media_ended_cmd:
os.system(settings.media_ended_cmd)
log.debug("PlayerManager::finished_callback reached end")
self.stop(playend=True)
@synchronous('_lock')
def watched_skip(self):
if not self._media_item:
return
self._media_item.set_played()
self.play_next()
@synchronous('_lock')
def unwatched_quit(self):
if not self._media_item:
return
self._media_item.set_played(False)
self.stop()
@synchronous('_lock')
def play_next(self):
if self._media_item.parent.has_next:
self.play(self._media_item.parent.get_next().get_media_item(0))
return True
return False
@synchronous('_lock')
def skip_to(self, key):
media = self._media_item.parent.get_from_key(key)
if media:
self.play(media.get_media_item(0))
return True
return False
@synchronous('_lock')
def play_prev(self):
if self._media_item.parent.has_prev:
self.play(self._media_item.parent.get_prev().get_media_item(0))
return True
return False
@synchronous('_lock')
def restart_playback(self):
current_time = self._player.playback_time
self.play(self._media_item, current_time)
return True
@synchronous('_lock')
def get_media_item_attr(self, attr, default=None):
if self._media_item:
return self._media_item.get_media_item_attr(attr, default)
return default
@synchronous('_lock')
def set_streams(self, audio_uid, sub_uid):
if not self._media_item.is_transcode:
if audio_uid is not None:
log.debug("PlayerManager::play selecting audio stream index=%s" % audio_uid)
self._player.audio = self._media_item.audio_seq[audio_uid]
if sub_uid == '0':
log.debug("PlayerManager::play selecting subtitle stream (none)")
self._player.sub = 'no'
elif sub_uid is not None:
log.debug("PlayerManager::play selecting subtitle stream index=%s" % sub_uid)
if sub_uid in self._media_item.subtitle_seq:
self._player.sub = self._media_item.subtitle_seq[sub_uid]
else:
log.debug("PlayerManager::play selecting external subtitle id=%s" % sub_uid)
self.load_external_sub(sub_uid)
self._media_item.set_streams(audio_uid, sub_uid)
if self._media_item.is_transcode:
self.restart_playback()
self.timeline_handle()
@synchronous('_lock')
def load_external_sub(self, sub_id):
if sub_id in self.external_subtitles:
self._player.sub = self.external_subtitles[sub_id]
else:
try:
sub_url = self._media_item.get_external_sub(sub_id)
if settings.log_decisions:
log.debug("Load External Subtitle: {0}".format(sub_url))
self._player.sub_add(sub_url)
self.external_subtitles[sub_id] = self._player.sub
self.external_subtitles_rev[self._player.sub] = sub_id
except SystemError:
log.debug("PlayerManager::could not load external subtitle")
def get_track_ids(self):
if self._media_item.is_transcode:
return self._media_item.get_transcode_streams()
else:
aid, sid = None, None
if self._player.sub and self._player.sub != 'no':
if self._player.sub in self.external_subtitles_rev:
sid = self.external_subtitles_rev.get(self._player.sub, '')
else:
sid = self._media_item.subtitle_uid.get(self._player.sub, '')
if self._player.audio != 'no':
aid = self._media_item.audio_uid.get(self._player.audio, '')
return aid, sid
def update_subtitle_visuals(self, restart_transcode=True):
if self._media_item.is_transcode:
if restart_transcode:
self.restart_playback()
else:
self._player.sub_pos = SUBTITLE_POS[settings.subtitle_position]
self._player.sub_scale = settings.subtitle_size / 100
self._player.sub_color = settings.subtitle_color
self.timeline_handle()
def upd_player_hide(self):
self._player.keep_open = self._media_item.parent.has_next
def terminate(self):
self.stop()
if is_using_ext_mpv:
self._player.terminate()
playerManager = PlayerManager()
|
3089e6022ded46647dba60973f6ec9db9e3fc7f6
|
38fff7bdefd8d62a740d51329b50d0e1e49258bb
|
/infra/build/functions/project_sync.py
|
c689aa7867aee2ab6db97b655df1dfce05c369bd
|
[
"Apache-2.0"
] |
permissive
|
google/oss-fuzz
|
026384c2ada61ef68b147548e830f60730c5e738
|
f0275421f84b8f80ee767fb9230134ac97cb687b
|
refs/heads/master
| 2023-08-31T23:30:28.157702
| 2023-08-31T21:49:30
| 2023-08-31T21:49:30
| 63,809,205
| 9,438
| 2,315
|
Apache-2.0
| 2023-09-14T20:32:19
| 2016-07-20T19:39:50
|
Shell
|
UTF-8
|
Python
| false
| false
| 9,647
|
py
|
project_sync.py
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Cloud functions for build scheduling."""
from collections import namedtuple
import io
import logging
import os
import re
import tempfile
import urllib.request
import zipfile
from google.api_core import exceptions
from google.cloud import ndb
from google.cloud import scheduler_v1
import yaml
import build_and_run_coverage
import build_project
from datastore_entities import Project
VALID_PROJECT_NAME = re.compile(r'^[a-zA-Z0-9_-]+$')
DEFAULT_BUILDS_PER_DAY = 1
MAX_BUILDS_PER_DAY = 4
COVERAGE_SCHEDULE = '0 6 * * *'
INTROSPECTOR_SCHEDULE = '0 10 * * *'
FUZZING_BUILD_TOPIC = 'request-build'
COVERAGE_BUILD_TOPIC = 'request-coverage-build'
INTROSPECTOR_BUILD_TOPIC = 'request-introspector-build'
ProjectMetadata = namedtuple(
'ProjectMetadata', 'schedule project_yaml_contents dockerfile_contents')
Content = namedtuple('Content', 'type path name decoded_content')
logging.basicConfig(level=logging.INFO)
# pylint: disable=too-few-public-methods
class OssFuzzRepo:
"""OSS-Fuzz repo."""
_MASTER_ZIP_LINK = (
'https://github.com/google/oss-fuzz/archive/refs/heads/master.zip')
def __init__(self, out_dir):
with urllib.request.urlopen(self._MASTER_ZIP_LINK) as response:
zip_contents = response.read()
with zipfile.ZipFile(io.BytesIO(zip_contents)) as zip_file:
zip_file.extractall(out_dir)
self._out_dir = out_dir
@property
def _repo_dir(self):
return os.path.join(self._out_dir, 'oss-fuzz-master')
def get_contents(self, path):
"""Gets contents of path."""
contents = []
list_path = os.path.join(self._repo_dir, path)
for item in os.listdir(list_path):
full_path = os.path.join(list_path, item)
rel_path = os.path.relpath(full_path, self._repo_dir)
if os.path.isdir(full_path):
file_type = 'dir'
decoded_content = None
else:
file_type = 'file'
with open(full_path, mode='rb') as file:
decoded_content = file.read()
contents.append(
Content(file_type, rel_path, os.path.basename(rel_path),
decoded_content))
return contents
class ProjectYamlError(Exception):
"""Error in project.yaml format."""
def create_scheduler(cloud_scheduler_client, project_name, schedule, tag,
topic):
"""Creates schedulers for new projects."""
project_id = os.environ.get('GCP_PROJECT')
location_id = os.environ.get('FUNCTION_REGION')
parent = cloud_scheduler_client.location_path(project_id, location_id)
job = {
'name': parent + '/jobs/' + project_name + '-scheduler-' + tag,
'pubsub_target': {
'topic_name': 'projects/' + project_id + '/topics/' + topic,
'data': project_name.encode()
},
'schedule': schedule
}
try:
existing_job = cloud_scheduler_client.get_job(job['name'])
except exceptions.NotFound:
existing_job = None
if existing_job:
if existing_job.schedule != schedule:
update_mask = {'paths': ['schedule']}
cloud_scheduler_client.update_job(job, update_mask)
else:
cloud_scheduler_client.create_job(parent, job)
def delete_scheduler(cloud_scheduler_client, project_name, tag):
"""Deletes schedulers for projects that were removed."""
project_id = os.environ.get('GCP_PROJECT')
location_id = os.environ.get('FUNCTION_REGION')
name = cloud_scheduler_client.job_path(project_id, location_id,
project_name + '-scheduler-' + tag)
cloud_scheduler_client.delete_job(name)
def delete_project(cloud_scheduler_client, project):
"""Delete the given project."""
logging.info('Deleting project %s', project.name)
for tag in (build_project.FUZZING_BUILD_TYPE,
build_and_run_coverage.COVERAGE_BUILD_TYPE,
build_and_run_coverage.INTROSPECTOR_BUILD_TYPE):
try:
delete_scheduler(cloud_scheduler_client, project.name, tag)
except exceptions.NotFound:
# Already deleted.
continue
except exceptions.GoogleAPICallError as error:
logging.error('Scheduler deletion for %s failed with %s', project.name,
error)
return
project.key.delete()
# pylint: disable=too-many-branches
def sync_projects(cloud_scheduler_client, projects):
"""Sync projects with cloud datastore."""
for project in Project.query():
if project.name not in projects:
delete_project(cloud_scheduler_client, project)
existing_projects = {project.name for project in Project.query()}
for project_name in projects:
try:
create_scheduler(cloud_scheduler_client, project_name,
projects[project_name].schedule,
build_project.FUZZING_BUILD_TYPE, FUZZING_BUILD_TOPIC)
create_scheduler(cloud_scheduler_client, project_name, COVERAGE_SCHEDULE,
build_and_run_coverage.COVERAGE_BUILD_TYPE,
COVERAGE_BUILD_TOPIC)
create_scheduler(cloud_scheduler_client, project_name,
INTROSPECTOR_SCHEDULE,
build_and_run_coverage.INTROSPECTOR_BUILD_TYPE,
INTROSPECTOR_BUILD_TOPIC)
except exceptions.GoogleAPICallError as error:
logging.error('Scheduler creation for %s failed with %s', project_name,
error)
continue
if project_name in existing_projects:
continue
project_metadata = projects[project_name]
Project(name=project_name,
schedule=project_metadata.schedule,
project_yaml_contents=project_metadata.project_yaml_contents,
dockerfile_contents=project_metadata.dockerfile_contents).put()
for project in Project.query():
if project.name not in projects:
continue
logging.info('Setting up project %s', project.name)
project_metadata = projects[project.name]
project_changed = False
if project.schedule != project_metadata.schedule:
try:
logging.info('Schedule changed.')
project.schedule = project_metadata.schedule
project_changed = True
except exceptions.GoogleAPICallError as error:
logging.error('Updating scheduler for %s failed with %s', project.name,
error)
if project.project_yaml_contents != project_metadata.project_yaml_contents:
project.project_yaml_contents = project_metadata.project_yaml_contents
project_changed = True
if project.dockerfile_contents != project_metadata.dockerfile_contents:
project.dockerfile_contents = project_metadata.dockerfile_contents
project_changed = True
if project_changed:
project.put()
def _has_docker_file(project_contents):
"""Checks if project has a Dockerfile."""
return any(
content_file.name == 'Dockerfile' for content_file in project_contents)
def get_project_metadata(project_contents):
"""Checks for schedule parameter in yaml file else uses DEFAULT_SCHEDULE."""
for content_file in project_contents:
if content_file.name == 'project.yaml':
project_yaml_contents = content_file.decoded_content.decode('utf-8')
if content_file.name == 'Dockerfile':
dockerfile_contents = content_file.decoded_content.decode('utf-8')
project_yaml = yaml.safe_load(project_yaml_contents)
builds_per_day = project_yaml.get('builds_per_day', DEFAULT_BUILDS_PER_DAY)
if not isinstance(builds_per_day, int) or builds_per_day not in range(
1, MAX_BUILDS_PER_DAY + 1):
raise ProjectYamlError('Parameter is not an integer in range [1-4]')
# Starting at 6:00 am, next build schedules are added at 'interval' slots
# Example for interval 2, hours = [6, 18] and schedule = '0 6,18 * * *'
interval = 24 // builds_per_day
hours = []
for hour in range(6, 30, interval):
hours.append(hour % 24)
schedule = '0 ' + ','.join(str(hour) for hour in hours) + ' * * *'
return ProjectMetadata(schedule, project_yaml_contents, dockerfile_contents)
def get_projects(repo):
"""Get project list from git repository."""
projects = {}
contents = repo.get_contents('projects')
for content_file in contents:
if content_file.type != 'dir' or not VALID_PROJECT_NAME.match(
content_file.name):
continue
project_contents = repo.get_contents(content_file.path)
if not _has_docker_file(project_contents):
continue
try:
projects[content_file.name] = get_project_metadata(project_contents)
except ProjectYamlError as error:
logging.error(
'Incorrect format for project.yaml file of %s with error %s',
content_file.name, error)
return projects
def sync(event, context):
"""Sync projects with cloud datastore."""
del event, context # Unused.
with ndb.Client().context():
with tempfile.TemporaryDirectory() as temp_dir:
repo = OssFuzzRepo(temp_dir)
projects = get_projects(repo)
cloud_scheduler_client = scheduler_v1.CloudSchedulerClient()
sync_projects(cloud_scheduler_client, projects)
|
e817c730662d46dff23df4f3c5a820267f891df3
|
4cf1fea45f27bbd317c08e73130c9a13bbd13b90
|
/src/pydap/handlers/dap.py
|
41a1b896063354059e343df352cc2473fe369b1c
|
[
"MIT"
] |
permissive
|
pydap/pydap
|
35eacc2752f1ef0d2e5bbed6a672ce103cd053dd
|
a330c563c2396c98271fda699c07ab05b7a472b3
|
refs/heads/master
| 2023-07-25T17:25:21.935337
| 2023-06-05T17:15:38
| 2023-06-05T17:15:38
| 10,464,231
| 102
| 78
|
MIT
| 2023-09-13T08:59:56
| 2013-06-03T20:42:50
|
Python
|
UTF-8
|
Python
| false
| false
| 23,882
|
py
|
dap.py
|
"""A handler for remote datasets.
DAP handlers convert from different data formats (NetCDF, eg) to the internal
pydap model. The pydap client is just a handler that converts from a remote
dataset to the internal model.
"""
import io
import gzip
import sys
import pprint
import copy
import re
from itertools import chain
from numpy.lib.arrayterator import Arrayterator
# handlers should be set by the application
# http://docs.python.org/2/howto/logging.html#configuring-logging-for-a-library
import logging
import numpy
import six.moves.urllib.parse
from six import text_type, string_types, BytesIO
import pydap.model
import pydap.net
from ..lib import (
encode, combine_slices, fix_slice, hyperslab,
START_OF_SEQUENCE, walk, StreamReader, BytesReader,
DEFAULT_TIMEOUT, DAP2_ARRAY_LENGTH_NUMPY_TYPE)
import pydap.handlers.lib
from ..parsers.dds import dds_to_dataset
from ..parsers.dmr import dmr_to_dataset
from ..parsers.das import parse_das, add_attributes
import pydap.parsers
from ..responses.dods import DAP2_response_dtypemap
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
BLOCKSIZE = 512
class DAPHandler(pydap.handlers.lib.BaseHandler):
"""Build a dataset from a DAP base URL."""
def __init__(self, url, application=None, session=None, output_grid=True,
timeout=DEFAULT_TIMEOUT, verify=True, user_charset='ascii', protocol=None):
self.application = application
self.session = session
self.output_grid = output_grid
self.timeout = timeout
self.verify = verify
self.user_charset = user_charset
self.url = url
scheme, netloc, path, query, fragment = six.moves.urllib.parse.urlsplit(self.url)
self.scheme = scheme
self.netloc = netloc
self.path = path
self.query = query
self.fragment = fragment
self.protocol = self.determine_protocol(protocol)
self.projection, self.selection = pydap.parsers.parse_ce(self.query)
arg = (self.scheme, self.netloc, self.path, '&'.join(self.selection), self.fragment)
self.base_url = six.moves.urllib.parse.urlunsplit(arg)
self.make_dataset()
self.add_proxies()
def determine_protocol(self, protocol):
if protocol == 'dap4':
self.scheme = 'http'
return protocol
elif protocol == 'dap2':
return protocol
elif self.scheme == 'dap4':
self.scheme = 'http'
return 'dap4'
else:
extension = self.path.split('.')[-1]
if extension in ['dmr', 'dap']:
return 'dap4'
elif extension in ['dds', 'dods']:
return 'dap2'
else:
return 'dap2'
def make_dataset(self,):
if self.protocol == 'dap4':
self.dataset_from_dap4()
else:
self.dataset_from_dap2()
self.attach_das()
def dataset_from_dap4(self):
dmr_url = six.moves.urllib.parse.urlunsplit((self.scheme, self.netloc, self.path + '.dmr', self.query, self.fragment))
r = pydap.net.GET(dmr_url, self.application, self.session, timeout=self.timeout, verify=self.verify)
pydap.net.raise_for_status(r)
dmr = safe_charset_text(r, self.user_charset)
self.dataset = dmr_to_dataset(dmr)
def dataset_from_dap2(self):
dds_url = six.moves.urllib.parse.urlunsplit((self.scheme, self.netloc, self.path + '.dds', self.query, self.fragment))
r = pydap.net.GET(dds_url, self.application, self.session, timeout=self.timeout, verify=self.verify)
pydap.net.raise_for_status(r)
dds = safe_charset_text(r, self.user_charset)
self.dataset = dds_to_dataset(dds)
def attach_das(self):
# Also pull the DAS and add additional attributes
das_url = six.moves.urllib.parse.urlunsplit((self.scheme, self.netloc, self.path + '.das', self.query, self.fragment))
r = pydap.net.GET(das_url, self.application, self.session, timeout=self.timeout, verify=self.verify)
pydap.net.raise_for_status(r)
das = safe_charset_text(r, self.user_charset)
add_attributes(self.dataset, parse_das(das))
def add_proxies(self):
if self.protocol == 'dap4':
self.add_dap4_proxies()
else:
self.add_dap2_proxies()
def add_dap4_proxies(self):
# remove any projection from the base_url, leaving selections
for var in walk(self.dataset, pydap.model.BaseType):
var.data = BaseProxyDap4(self.base_url, var.name, var.dtype, var.shape,
application=self.application, session=self.session,
timeout=self.timeout)
for var in walk(self.dataset, pydap.model.GridType):
var.set_output_grid(self.output_grid)
def add_dap2_proxies(self):
# now add data proxies
for var in walk(self.dataset, pydap.model.BaseType):
var.data = BaseProxyDap2(self.base_url, var.id, var.dtype, var.shape,
application=self.application, session=self.session,
timeout=self.timeout)
for var in walk(self.dataset, pydap.model.SequenceType):
template = copy.copy(var)
var.data = SequenceProxy(self.base_url,
template,
application=self.application,
session=self.session,
timeout=self.timeout)
# apply projections
for var in self.projection:
target = self.dataset
while var:
token, index = var.pop(0)
target = target[token]
if isinstance(target, pydap.model.BaseType):
target.data.slice = fix_slice(index, target.shape)
elif isinstance(target, pydap.model.GridType):
index = fix_slice(index, target.array.shape)
target.array.data.slice = index
for s, child in zip(index, target.maps):
target[child].data.slice = (s,)
elif isinstance(target, pydap.model.SequenceType):
target.data.slice = index
# retrieve only main variable for grid types:
for var in walk(self.dataset, pydap.model.GridType):
var.set_output_grid(self.output_grid)
def get_charset(r, user_charset):
charset = r.charset
if not charset:
charset = user_charset
return charset
def safe_charset_text(r, user_charset):
if r.content_encoding == 'gzip':
return gzip.GzipFile(fileobj=BytesIO(r.body)).read().decode(get_charset(r, user_charset))
else:
r.charset = get_charset(r, user_charset)
return r.text
def safe_dds_and_data(r, user_charset):
if r.content_encoding == 'gzip':
raw = gzip.GzipFile(fileobj=BytesIO(r.body)).read()
else:
raw = r.body
dds, data = raw.split(b'\nData:\n', 1)
return dds.decode(get_charset(r, user_charset)), data
def safe_dmr_and_data(r, user_charset):
if r.content_encoding == 'gzip':
raw = gzip.GzipFile(fileobj=BytesIO(r.body)).read()
else:
raw = r.body
logger.info("Saving and splitting dmr+")
try:
dmr, data = raw.split(b'</Dataset>', 1)
except ValueError:
logger.exception('Failed to split the following DMR+ \n %s' % raw)
import pickle, codecs
picked_response = str(codecs.encode(pickle.dumps(r), "base64").decode())
logger.exception('pickled response (base64): \n ----BEGIN PICKLE----- \n %s -----END PICKLE-----' % picked_response)
dmr = dmr[4:] + b'</Dataset>'
dmr = dmr.decode(get_charset(r, user_charset))
data = data[3:]
return dmr, data
class BaseProxyDap2(object):
"""A proxy for remote base types.
This class behaves like a Numpy array, proxying the data from a base type
on a remote dataset.
"""
def __init__(self, baseurl, id, dtype, shape, slice_=None,
application=None, session=None, timeout=DEFAULT_TIMEOUT,
verify=True, user_charset='ascii'):
self.baseurl = baseurl
self.id = id
self.dtype = dtype
self.shape = shape
self.slice = slice_ or tuple(slice(None) for s in self.shape)
self.application = application
self.session = session
self.timeout = timeout
self.verify = verify
self.user_charset = user_charset
def __repr__(self):
return 'BaseProxy(%s)' % ', '.join(map(repr, [self.baseurl, self.id, self.dtype, self.shape, self.slice]))
def __getitem__(self, index):
# build download url
index = combine_slices(self.slice, fix_slice(index, self.shape))
scheme, netloc, path, query, fragment = six.moves.urllib.parse.urlsplit(self.baseurl)
url = six.moves.urllib.parse.urlunsplit((
scheme, netloc, path + '.dods',
six.moves.urllib.parse.quote(self.id) + hyperslab(index) + '&' + query,
fragment)).rstrip('&')
# download and unpack data
logger.info("Fetching URL: %s" % url)
r = pydap.net.GET(url, self.application, self.session, timeout=self.timeout, verify=self.verify)
pydap.net.raise_for_status(r)
dds, data = safe_dds_and_data(r, self.user_charset)
# Parse received dataset:
dataset = dds_to_dataset(dds)
dataset.data = unpack_dap2_data(BytesReader(data), dataset)
return dataset[self.id].data
def __len__(self):
return self.shape[0]
def __iter__(self):
return iter(self[:])
# Comparisons return a boolean array
def __eq__(self, other):
return self[:] == other
def __ne__(self, other):
return self[:] != other
def __ge__(self, other):
return self[:] >= other
def __le__(self, other):
return self[:] <= other
def __gt__(self, other):
return self[:] > other
def __lt__(self, other):
return self[:] < other
class BaseProxyDap4(BaseProxyDap2):
def __init__(self, baseurl, id, dtype, shape, slice_=None,
application=None, session=None, timeout=DEFAULT_TIMEOUT,
verify=True, user_charset='ascii'):
self.baseurl = baseurl
self.id = id
self.dtype = dtype
self.shape = shape
self.slice = slice_ or tuple(slice(None) for s in self.shape)
self.application = application
self.session = session
self.timeout = timeout
self.verify = verify
self.user_charset = user_charset
def __repr__(self):
return 'Dap4BaseProxy(%s)' % ', '.join(
map(repr, [self.baseurl, self.id, self.dtype, self.shape, self.slice]))
def __getitem__(self, index):
# build download url
index = combine_slices(self.slice, fix_slice(index, self.shape))
scheme, netloc, path, query, fragment = six.moves.urllib.parse.urlsplit(self.baseurl)
ce = 'dap4.ce=' + six.moves.urllib.parse.quote(self.id) + hyperslab(index) + query
url = six.moves.urllib.parse.urlunsplit((scheme, netloc, path + '.dap', ce, fragment)).rstrip('&')
# download and unpack data
logger.info("Fetching URL: %s" % url)
r = pydap.net.GET(url, self.application, self.session, timeout=self.timeout, verify=self.verify)
pydap.net.raise_for_status(r)
dmr, data = safe_dmr_and_data(r, self.user_charset)
# Parse received dataset:
dataset = dmr_to_dataset(dmr)
dataset = unpack_dap4_data(BytesReader(data), dataset)
self.checksum = dataset[self.id].attributes['checksum']
self.data = dataset[self.id].data
return self.data
class SequenceProxy(object):
"""A proxy for remote sequences.
This class behaves like a Numpy structured array, proxying the data from a
sequence on a remote dataset. The data is streamed from the dataset,
meaning it can be treated one record at a time before the whole data is
downloaded.
"""
shape = ()
def __init__(self, baseurl, template, selection=None, slice_=None,
application=None, session=None, timeout=DEFAULT_TIMEOUT,
verify=True):
self.baseurl = baseurl
self.template = template
self.selection = selection or []
self.slice = slice_ or (slice(None),)
self.application = application
self.session = session
self.timeout = timeout
self.verify = verify
# this variable is true when only a subset of the children are selected
self.sub_children = False
@property
def dtype(self):
return self.template.dtype
def __repr__(self):
return 'SequenceProxy(%s)' % ', '.join(
map(repr, [
self.baseurl, self.template, self.selection, self.slice]))
def __copy__(self):
"""Return a lightweight copy of the object."""
return self.__class__(self.baseurl, self.template, self.selection[:],
self.slice[:], self.application)
def __getitem__(self, key):
"""Return a new object representing a subset of the data."""
out = copy.copy(self)
# return the data for a children
if isinstance(key, string_types):
out.template = out.template[key]
# return a new object with requested columns
elif isinstance(key, list):
out.sub_children = True
out.template._visible_keys = key
# return a copy with the added constraints
elif isinstance(key, pydap.handlers.lib.ConstraintExpression):
out.selection.extend(str(key).split('&'))
# slice data
else:
if isinstance(key, int):
key = slice(key, key + 1)
out.slice = combine_slices(self.slice, (key,))
return out
@property
def url(self):
"""Return url from where data is fetched."""
scheme, netloc, path, query, fragment = six.moves.urllib.parse.urlsplit(self.baseurl)
url = six.moves.urllib.parse.urlunsplit((
scheme, netloc, path + '.dods',
self.id + hyperslab(self.slice) + '&' +
'&'.join(self.selection), fragment)).rstrip('&')
return url
@property
def id(self):
"""Return the id of this sequence."""
if self.sub_children:
id_ = ','.join(
six.moves.urllib.parse.quote(child.id) for child in self.template.children())
else:
id_ = six.moves.urllib.parse.quote(self.template.id)
return id_
def __iter__(self):
# download and unpack data
r = pydap.net.GET(self.url, self.application, self.session, timeout=self.timeout,
verify=self.verify)
pydap.net.raise_for_status(r)
i = r.app_iter
if not hasattr(i, '__next__'):
i = iter(i)
# Fast forward past the DDS header
# the pattern could span chunk boundaries though so make sure to check
pattern = b'Data:\n'
last_chunk = find_pattern_in_string_iter(pattern, i)
if last_chunk is None:
raise ValueError("Could not find data segment in response from {}"
.format(self.url))
# Then construct a stream consisting of everything from
# 'Data:\n' to the end of the chunk + the rest of the stream
def stream_start():
yield last_chunk
stream = StreamReader(chain(stream_start(), i))
return unpack_sequence(stream, self.template)
def __eq__(self, other):
return pydap.handlers.lib.ConstraintExpression('%s=%s' % (self.id, encode(other)))
def __ne__(self, other):
return pydap.handlers.lib.ConstraintExpression('%s!=%s' % (self.id, encode(other)))
def __ge__(self, other):
return pydap.handlers.lib.ConstraintExpression('%s>=%s' % (self.id, encode(other)))
def __le__(self, other):
return pydap.handlers.lib.ConstraintExpression('%s<=%s' % (self.id, encode(other)))
def __gt__(self, other):
return pydap.handlers.lib.ConstraintExpression('%s>%s' % (self.id, encode(other)))
def __lt__(self, other):
return pydap.handlers.lib.ConstraintExpression('%s<%s' % (self.id, encode(other)))
def unpack_sequence(stream, template):
"""Unpack data from a sequence, yielding records."""
# is this a sequence or a base type?
sequence = isinstance(template, pydap.model.SequenceType)
# if there are no children, we use the template as the only column
cols = list(template.children()) or [template]
# if there are no strings and no nested sequences we can unpack record by
# record easily
simple = all(isinstance(c, pydap.model.BaseType) and c.dtype.char not in "SU" for c in cols)
if simple:
dtype = numpy.dtype([("", c.dtype, c.shape) for c in cols])
marker = stream.read(4)
while marker == START_OF_SEQUENCE:
rec = numpy.frombuffer(stream.read(dtype.itemsize), dtype=dtype)[0]
if not sequence:
rec = rec[0]
yield rec
marker = stream.read(4)
else:
marker = stream.read(4)
while marker == START_OF_SEQUENCE:
rec = unpack_children(stream, template)
if not sequence:
rec = rec[0]
else:
rec = tuple(rec)
yield rec
marker = stream.read(4)
def unpack_children(stream, template):
"""Unpack children from a structure, returning their data."""
cols = list(template.children()) or [template]
out = []
for col in cols:
# sequences and other structures
if isinstance(col, pydap.model.SequenceType):
out.append(pydap.handlers.lib.IterData(list(unpack_sequence(stream, col)), col))
elif isinstance(col, pydap.model.StructureType):
out.append(tuple(unpack_children(stream, col)))
# unpack arrays
else:
out.extend(convert_stream_to_list(stream, col.dtype, col.shape, col.id))
return out
def convert_stream_to_list(stream, parser_dtype, shape, id):
out = []
response_dtype = DAP2_response_dtypemap(parser_dtype)
if shape:
n = numpy.frombuffer(stream.read(4), DAP2_ARRAY_LENGTH_NUMPY_TYPE)[0]
count = response_dtype.itemsize * n
if response_dtype.char in 'S':
# Consider on 'S' and not 'SU' because
# response_dtype.char should never be
data = []
for _ in range(n):
k = numpy.frombuffer(stream.read(4), DAP2_ARRAY_LENGTH_NUMPY_TYPE)[0]
data.append(stream.read(k))
stream.read(-k % 4)
out.append(numpy.array([text_type(x.decode('ascii')) for x in data], 'S').reshape(shape))
else:
stream.read(4) # read additional length
try:
out.append(numpy.frombuffer(stream.read(count), response_dtype).astype(parser_dtype).reshape(shape))
except ValueError as e:
if str(e) == 'total size of new array must be unchanged':
# server-side failure.
# it is expected that the user should be mindful of this:
raise RuntimeError(
('variable {0} could not be properly '
'retrieved. To avoid this '
'error consider using open_url(..., '
'output_grid=False).').format(six.moves.urllib.parse.quote(id)))
else:
raise
if response_dtype.char == "B":
# Unsigned Byte type is packed to multiples of 4 bytes:
stream.read(-n % 4)
# special types: strings and bytes
elif response_dtype.char in 'S':
# Consider on 'S' and not 'SU' because
# response_dtype.char should never be
# 'U'
k = numpy.frombuffer(stream.read(4), DAP2_ARRAY_LENGTH_NUMPY_TYPE)[0]
out.append(text_type(stream.read(k).decode('ascii')))
stream.read(-k % 4)
# usual data
else:
out.append(
numpy.frombuffer(stream.read(response_dtype.itemsize), response_dtype)
.astype(parser_dtype)[0])
if response_dtype.char == "B":
# Unsigned Byte type is packed to multiples of 4 bytes:
stream.read(3)
return out
def unpack_dap2_data(xdr_stream, dataset):
"""Unpack a string of encoded data, returning data as lists."""
return unpack_children(xdr_stream, dataset)
def decode_chunktype(chunk_type):
encoding = '{0:03b}'.format(chunk_type)
if sys.byteorder == 'little':
# If our machine's byteorder is little, we need to swap since the chunk_type is always big endian
encoding = encoding[::-1]
last_chunk = bool(int(encoding[0]))
error = bool(int(encoding[1]))
endian = {'0': '>', '1': '<'}[encoding[2]]
return last_chunk, error, endian
def get_count(variable):
count = int(numpy.array(variable.shape).prod())
item_size = numpy.dtype(variable.dtype).itemsize
count = count * item_size
return count
def decode_variable(buffer, start, stop, variable, endian):
dtype = variable.dtype
dtype = dtype.newbyteorder(endian)
data = numpy.frombuffer(buffer[start:stop], dtype=dtype).astype(dtype)
data = data.reshape(variable.shape)
return data
def stream2bytearray(xdr_stream):
last = False
buffer = bytearray()
while not last:
chunk = numpy.frombuffer(xdr_stream.read(4), dtype='>u4')
chunk_size = (chunk & 0x00ffffff)[0]
chunk_type = ((chunk >> 24) & 0xff)[0]
last, error, endian = decode_chunktype(chunk_type)
buffer.extend(xdr_stream.read(chunk_size))
return buffer
def get_endianness(xdr_stream):
chunk_header = xdr_stream.peek(4)[0:4]
chunk_header = numpy.frombuffer(chunk_header, dtype='>u4')[0]
chunk_type = ((chunk_header >> 24) & 0xff)
last, error, endian = decode_chunktype(chunk_type)
return endian
def unpack_dap4_data(xdr_stream, dataset):
endian = get_endianness(xdr_stream)
checksum_dtype = numpy.dtype(endian + 'u4')
buffer = stream2bytearray(xdr_stream)
start = 0
for variable_name in dataset:
variable = dataset[variable_name]
count = get_count(variable)
stop = start + count
data = decode_variable(buffer, start=start, stop=stop, variable=variable, endian=endian)
checksum = numpy.frombuffer(buffer[stop:stop + 4], dtype=checksum_dtype).byteswap('=')
if isinstance(variable, pydap.model.BaseType):
variable._set_data(data)
elif isinstance(variable, pydap.model.GridType):
variable._set_data([data.data])
variable.attributes['checksum'] = checksum
# Jump over the 4 byte chunk_header
start = stop + 4
return dataset
def find_pattern_in_string_iter(pattern, i):
last_chunk = b''
length = len(pattern)
for this_chunk in i:
last_chunk += this_chunk
m = re.search(pattern, last_chunk)
if m:
return last_chunk[m.end():]
last_chunk = last_chunk[-length:]
def dump(): # pragma: no cover
"""Unpack dods response into lists.
Return pretty-printed data.
"""
dods = sys.stdin.read()
dds, xdrdata = dods.split(b'\nData:\n', 1)
dataset = dds_to_dataset(dds)
xdr_stream = io.BytesIO(xdrdata)
data = unpack_dap2_data(xdr_stream, dataset)
pprint.pprint(data)
|
7bab59d0e54810bd1a1c9f69c098b546bb197ef1
|
dbd37114c5e3b39d0ce4d6135144f5a7a3b6d4f5
|
/pytorch_tools/modules/fpn.py
|
e00f962de647644b0f0e6b388177da55db51f1bf
|
[
"MIT"
] |
permissive
|
bonlime/pytorch-tools
|
6633fe8969ef51450589670c2393838131963773
|
74cd3dcef5ce4f81302be4061414c1d1113dafb8
|
refs/heads/master
| 2023-09-03T20:18:16.718561
| 2023-08-29T12:36:35
| 2023-08-29T12:36:35
| 201,936,229
| 190
| 16
|
MIT
| 2023-08-04T22:42:27
| 2019-08-12T13:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,222
|
py
|
fpn.py
|
""" Implements Feature Piramid Pooling for Object Detection and Semantic Segmentation """
# code kindly borrowed from https://github.com/qubvel/segmentation_models.pytorch
import torch.nn as nn
import torch.nn.functional as F
from .residual import conv1x1, conv3x3
class MergeBlock(nn.Module):
def forward(self, x):
x, skip = x
x = F.interpolate(x, size=skip.shape[-2:], mode="nearest")
x += skip
return x
class FPN(nn.Module):
"""Feature Pyramid Network for enhancing high-resolution feature maps with semantic
meaning from low resolution maps
Ref: https://arxiv.org/abs/1612.03144
Args:
encoder_channels (List[int]): Number of channels for each input feature map
pyramid_channels (int): Number of channels in each feature map after FPN. Defaults to 256.
num_layers (int): Number of FPN layers.
Input:
features (List): this module expects list of feature maps of different resolution
"""
def __init__(
self,
encoder_channels,
pyramid_channels=256,
num_layers=1,
**bn_args, # for compatability only. Not used
):
super().__init__()
assert num_layers == 1, "More that 1 layer is not supported in FPN"
# we DO use bias in this convs
self.lateral_convs = nn.ModuleList([conv1x1(in_ch, pyramid_channels, bias=True) for in_ch in encoder_channels])
self.smooth_convs = nn.ModuleList(
[conv3x3(pyramid_channels, pyramid_channels, bias=True) for in_ch in encoder_channels]
)
self.merge_block = MergeBlock()
def forward(self, features):
"""features (List[torch.Tensor]): features from coarsest to finest"""
# project features
pyramid_features = [l_conv(feature) for l_conv, feature in zip(self.lateral_convs, features)]
# merge features inplace
for idx in range(1, len(pyramid_features)):
pyramid_features[idx] = self.merge_block([pyramid_features[idx - 1], pyramid_features[idx]])
# smooth them after merging
pyramid_features = [s_conv(feature) for s_conv, feature in zip(self.smooth_convs, pyramid_features)]
return pyramid_features
|
abeae792e9ef100dba9151ba632e2c6e81134405
|
af101b467134e10270bb72d02f41f07daa7f57d8
|
/configs/positional_encoding_in_gans/singan_interp-pad_balloons.py
|
08f082fc74b704a073f4130b217e2addd45e0062
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmagic
|
4d864853417db300de4dfe7e83ce380fd1557a23
|
a382f143c0fd20d227e1e5524831ba26a568190d
|
refs/heads/main
| 2023-08-31T14:40:24.936423
| 2023-08-30T05:05:56
| 2023-08-30T05:05:56
| 203,999,962
| 1,370
| 192
|
Apache-2.0
| 2023-09-14T11:39:18
| 2019-08-23T13:04:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 419
|
py
|
singan_interp-pad_balloons.py
|
_base_ = ['../singan/singan_balloons.py']
# TODO: bugs here
# MODEL
# NOTE: add by user, e.g.:
# test_pkl_data = './work_dirs/singan_pkl/singan_interp-pad_balloons_20210406_180014-96f51555.pkl' # noqa
test_pkl_data = None
model = dict(
type='PESinGAN',
generator=dict(
type='SinGANMSGeneratorPE', interp_pad=True, noise_with_pad=True),
fixed_noise_with_pad=True,
test_pkl_data=test_pkl_data)
|
4ef15272f1228cc8d57cc7bab5cf85aab5d8335a
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client_common/arena_component_system/epic_sector_warning_component.py
|
126ad87521707dc54af4311d4467d5fde19e16e1
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 20,318
|
py
|
epic_sector_warning_component.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client_common/arena_component_system/epic_sector_warning_component.py
import math
import weakref
from collections import defaultdict
from functools import partial
from math import copysign
import BigWorld
import Event
from arena_component_system.client_arena_component_system import ClientArenaComponent
from epic_constants import EPIC_BATTLE_TEAM_ID
from constants import SECTOR_STATE
from coordinate_system import AXIS_ALIGNED_DIRECTION as AAD
from debug_utils import LOG_WARNING
from gui.battle_control.avatar_getter import getArena, getPlayerTeam
from helpers.CallbackDelayer import CallbackDelayer
from Math import Vector2, Vector3
from PlayerEvents import g_playerEvents
from epic_constants import SECTOR_EDGE_STATE
from battleground.BorderVisual import BorderVisual
class MAPPED_SECTOR_STATE(object):
GOOD = 0
BAD = 1
class WARNING_TYPE(object):
NONE = 0
SAFE = 1
BOMBING = 2
PROTECTED = 3
SECTOR_STATE_TO_MAPPED_STATE = {EPIC_BATTLE_TEAM_ID.TEAM_ATTACKER: {SECTOR_STATE.CLOSED: MAPPED_SECTOR_STATE.BAD,
SECTOR_STATE.OPEN: MAPPED_SECTOR_STATE.GOOD,
SECTOR_STATE.TRANSITION: MAPPED_SECTOR_STATE.GOOD,
SECTOR_STATE.CAPTURED: MAPPED_SECTOR_STATE.GOOD,
SECTOR_STATE.BOMBING: MAPPED_SECTOR_STATE.GOOD},
EPIC_BATTLE_TEAM_ID.TEAM_DEFENDER: {SECTOR_STATE.CLOSED: MAPPED_SECTOR_STATE.GOOD,
SECTOR_STATE.OPEN: MAPPED_SECTOR_STATE.GOOD,
SECTOR_STATE.TRANSITION: MAPPED_SECTOR_STATE.BAD,
SECTOR_STATE.CAPTURED: MAPPED_SECTOR_STATE.GOOD,
SECTOR_STATE.BOMBING: MAPPED_SECTOR_STATE.BAD}}
ADJACENT_MAPPED_STATES_TO_EDGE_STATE = {(MAPPED_SECTOR_STATE.GOOD, MAPPED_SECTOR_STATE.GOOD): SECTOR_EDGE_STATE.NONE,
(MAPPED_SECTOR_STATE.GOOD, MAPPED_SECTOR_STATE.BAD): SECTOR_EDGE_STATE.DANGER,
(MAPPED_SECTOR_STATE.BAD, MAPPED_SECTOR_STATE.GOOD): SECTOR_EDGE_STATE.SAFE,
(MAPPED_SECTOR_STATE.BAD, MAPPED_SECTOR_STATE.BAD): SECTOR_EDGE_STATE.DANGER}
MAX_NUM_NODES = 10
def makeEdgeId(a, b):
if a is None or b is None:
return
else:
return MAX_NUM_NODES * a + b if a < b else MAX_NUM_NODES * b + a
def decomposeEdgeId(edgeId):
return (int(edgeId / MAX_NUM_NODES), edgeId % MAX_NUM_NODES)
class _SectorGroupNode(object):
def __init__(self):
self.neighbours = []
self.mappedState = None
return
class _SectorGroupEdge(object):
def __init__(self):
self.state = None
self.start = None
self.end = None
return
def getEdgePoints(self):
return (self.start, self.end)
class _ProtectionZoneSetting(object):
def __init__(self):
self.mappedState = None
self.geometry = None
self.edgeState = None
self.isActive = False
self.team = 0
return
class _SectorWarning(object):
def __init__(self, warningType, targetNode):
self.type = warningType
self.targetSectorGroup = targetNode
class EpicSectorWarningComponent(ClientArenaComponent, CallbackDelayer):
edges = property(lambda self: self.__edges)
protectionZones = property(lambda self: self.__protectionZones)
warnings = property(lambda self: self.__activeWarnings)
def __init__(self, componentSystem):
ClientArenaComponent.__init__(self, componentSystem)
CallbackDelayer.__init__(self)
self.__sectorComponent = None
self.__playerDataComponent = None
self.__protectionZoneComponent = None
self.__teamId = None
self.__nodes = None
self.__edges = None
self.__protectionZones = None
self.__activeWarnings = None
self.__transitionEndTimes = None
self.__visual = None
self.onShowSectorWarning = Event.Event(self._eventManager)
self.onTransitionTimerUpdated = Event.Event(self._eventManager)
return
def activate(self):
super(EpicSectorWarningComponent, self).activate()
g_playerEvents.onAvatarReady += self.__onAvatarReady
def deactivate(self):
super(EpicSectorWarningComponent, self).deactivate()
g_playerEvents.onAvatarReady -= self.__onAvatarReady
if self.__sectorComponent:
self.__sectorComponent.onSectorAdded -= self.__onSectorAdded
self.__sectorComponent.onSectorGroupUpdated -= self.__onSectorGroupUpdated
self.__sectorComponent.onSectorGroupTransitionTimeChanged -= self.__onSectorGroupTransitionTimeChanged
self.__sectorComponent.onPlayerSectorGroupChanged -= self.__onPlayerSectorGroupChanged
if self.__protectionZoneComponent:
self.__protectionZoneComponent.onProtectionZoneAdded -= self.__onProtectionZoneAdded
self.__protectionZoneComponent.onProtectionZoneActive -= self.__onProtectionZoneActive
self.__protectionZoneComponent.onPlayerInProtectedZoneAction -= self.__onPlayerInProtectionZone
if self.__visual is not None:
self.__visual.destroy()
return
def destroy(self):
self.__teamId = None
self.__nodes = None
self.__edges = None
self.__protectionZones = None
self.__activeWarnings = None
ClientArenaComponent.destroy(self)
CallbackDelayer.destroy(self)
return
def getEdgeByID(self, edgeID):
return self.__edges.get(edgeID, None)
def getEdgeIdsByNodeId(self, nodeId):
return map(partial(makeEdgeId, nodeId), self.__nodes[nodeId].neighbours)
def __onAvatarReady(self):
if self._componentSystem() is None:
return
else:
self.__sectorComponent = weakref.proxy(self._componentSystem().sectorComponent)
self.__playerDataComponent = weakref.proxy(self._componentSystem().playerDataComponent)
self.__protectionZoneComponent = weakref.proxy(self._componentSystem().protectionZoneComponent)
self.__teamId = getPlayerTeam()
self.__transitionEndTimes = {}
self.__nodes = defaultdict(_SectorGroupNode)
self.__edges = defaultdict(_SectorGroupEdge)
self.__protectionZones = defaultdict(_ProtectionZoneSetting)
self.__activeWarnings = {}
self.__visual = SectorBorderVisualisation(self)
self.__sectorComponent.onSectorAdded += self.__onSectorAdded
for sector in self.__sectorComponent.sectors:
self.__onSectorAdded(sector, self.__sectorComponent.getSectorGroupById(sector.groupID))
self.__sectorComponent.onSectorGroupUpdated += self.__onSectorGroupUpdated
sectorGroups = self.__sectorComponent.sectorGroups
for groupId in sectorGroups:
group = sectorGroups[groupId]
self.__onSectorGroupUpdated(group.id, group.state, group.center, group.getBound())
self.__sectorComponent.onSectorGroupTransitionTimeChanged += self.__onSectorGroupTransitionTimeChanged
self.__protectionZoneComponent.onProtectionZoneAdded += self.__onProtectionZoneAdded
self.__protectionZoneComponent.onProtectionZoneActive += self.__onProtectionZoneActive
for _, zone in self.__protectionZoneComponent.protectionZones.items():
self.__onProtectionZoneAdded(zone.zoneID, zone.position, zone.bound)
if zone.isActive:
self.__onProtectionZoneActive(zone.zoneID, zone.isActive)
self.__sectorComponent.onPlayerSectorGroupChanged += self.__onPlayerSectorGroupChanged
groupId = self.__playerDataComponent.physicalSectorGroup
if groupId is not None:
self.__onPlayerSectorGroupChanged(groupId, None, None, None)
self.__protectionZoneComponent.onPlayerInProtectedZoneAction += self.__onPlayerInProtectionZone
for _, zone in [ x for _, x in self.__protectionZoneComponent.protectionZones.items() if self.__protectionZoneComponent.isPlayerInProtectedZone(x.zoneID) ]:
self.__onPlayerInProtectionZone(zone.zoneID, True)
return
def __onSectorAdded(self, sector, sectorGroup):
groupId = sectorGroup.id
node = self.__nodes[groupId]
node.mappedState = SECTOR_STATE_TO_MAPPED_STATE[self.__teamId][sectorGroup.state]
neighbours = [ neighbour for neighbour in (self.__sectorComponent.getSectorById(sectorId) for sectorId in self.__sectorComponent.getNeighbouringSectorIdsByOwnSectorId(sector.sectorID)) if neighbour.groupID != groupId ]
for neighbour in neighbours:
neighbourGrpId = neighbour.groupID
if neighbourGrpId not in node.neighbours:
node.neighbours.append(neighbourGrpId)
neighboursOfNeighbour = self.__nodes[neighbourGrpId].neighbours
if groupId not in neighboursOfNeighbour:
neighboursOfNeighbour.append(groupId)
newEdgeId = makeEdgeId(groupId, neighbourGrpId)
edge = self.__edges[newEdgeId]
edge.start, edge.end = self.__calcEdgeLine(groupId, neighbourGrpId)
if edge.state is None:
edge.state = SECTOR_EDGE_STATE.NONE
if self.__sectorComponent.currentPlayerSectorId is not None:
continue
self.__showEdgeState(newEdgeId, edge, edge.state, forceUpdate=True)
return
def __onProtectionZoneAdded(self, zoneId, position, bound):
zone = self.__protectionZones[zoneId]
protectionZone = self.__protectionZoneComponent.getProtectionZoneById(zoneId)
zone.team = protectionZone.team
zone.geometry = (Vector2(position.x, position.z),
bound[0],
bound[1],
bound[1] - bound[0])
zone.mappedState = MAPPED_SECTOR_STATE.GOOD if self.__teamId == protectionZone.team else MAPPED_SECTOR_STATE.BAD
def __onProtectionZoneActive(self, zoneId, isActive):
isInZone = self.__protectionZoneComponent.isPlayerInProtectedZone(zoneId)
self.__updateProtectionZone(zoneId, isInZone, isActive)
def __onPlayerInProtectionZone(self, zoneId, isInZone):
self.__updateProtectionZone(zoneId, isInZone, self.__protectionZoneComponent.isProtectionZoneActive(zoneId))
def __updateProtectionZone(self, zoneId, isInZone, isZoneActive):
zone = self.__protectionZones.get(zoneId, None)
if zone is None:
return
else:
if not isZoneActive:
state = SECTOR_EDGE_STATE.NONE
elif isInZone:
state = ADJACENT_MAPPED_STATES_TO_EDGE_STATE[zone.mappedState, MAPPED_SECTOR_STATE.GOOD]
else:
state = ADJACENT_MAPPED_STATES_TO_EDGE_STATE[MAPPED_SECTOR_STATE.GOOD, zone.mappedState]
zone.edgeState = state
zone.isActive = isZoneActive
self.__visual.showProtectionZone(zoneId, state, zone.team, self.protectionZones)
return
def __onPlayerSectorGroupChanged(self, newNodeID, *args):
if newNodeID is None:
return
else:
adjacentEdges = self.getEdgeIdsByNodeId(newNodeID)
currentActiveWarnings = self.__activeWarnings.keys()
for activeEdgeId in currentActiveWarnings:
if activeEdgeId not in adjacentEdges:
fromId, toId = decomposeEdgeId(activeEdgeId)
self.__showWarning(fromId, toId, WARNING_TYPE.NONE)
localState = self.__nodes[newNodeID].mappedState
for edgeId, edge in self.__edges.iteritems():
adjacentNodes = decomposeEdgeId(edgeId)
adjacentStates = [ self.__nodes[nodeId].mappedState for nodeId in adjacentNodes ]
if newNodeID in adjacentNodes:
toState = adjacentStates[1] if adjacentNodes[0] == newNodeID else adjacentStates[0]
self.__showEdgeState(edgeId, edge, ADJACENT_MAPPED_STATES_TO_EDGE_STATE[localState, toState])
if MAPPED_SECTOR_STATE.BAD in adjacentStates:
self.__showEdgeState(edgeId, edge, ADJACENT_MAPPED_STATES_TO_EDGE_STATE[MAPPED_SECTOR_STATE.GOOD, MAPPED_SECTOR_STATE.BAD])
self.__showEdgeState(edgeId, edge, ADJACENT_MAPPED_STATES_TO_EDGE_STATE[MAPPED_SECTOR_STATE.GOOD, MAPPED_SECTOR_STATE.GOOD])
for neighbourId in self.__nodes[newNodeID].neighbours:
edgeId = makeEdgeId(newNodeID, neighbourId)
edge = self.__edges.get(edgeId, None)
if edge and edge.state == SECTOR_EDGE_STATE.DANGER:
if neighbourId != 0:
adjacentSectorState = self.__sectorComponent.getSectorGroupById(self.__sectorComponent.getSectorById(neighbourId).groupID).state
else:
adjacentSectorState = -1
if adjacentSectorState in (SECTOR_STATE.BOMBING, SECTOR_STATE.TRANSITION):
self.__showWarning(newNodeID, neighbourId, WARNING_TYPE.BOMBING)
else:
self.__showWarning(newNodeID, neighbourId, WARNING_TYPE.PROTECTED)
if edgeId in self.__activeWarnings:
self.__showWarning(newNodeID, neighbourId, WARNING_TYPE.NONE)
return
def __showEdgeState(self, edgeId, edge, state, forceUpdate=False):
if forceUpdate:
edge.state = state
self.__visual.showSectorBorder(edgeId, edge.state, self.edges)
return
previousState = edge.state
edge.state = state
if previousState != edge.state:
self.__visual.showSectorBorder(edgeId, edge.state, self.edges)
def __showWarning(self, fromNodeId, toNodeId, warningType):
edgeId = makeEdgeId(fromNodeId, toNodeId)
previousWarning = self.__activeWarnings.get(edgeId, _SectorWarning(None, -1))
if previousWarning.type != warningType:
if warningType in (WARNING_TYPE.NONE, WARNING_TYPE.SAFE):
self.__activeWarnings.pop(edgeId, None)
else:
self.__activeWarnings[edgeId] = _SectorWarning(warningType, toNodeId)
self.onShowSectorWarning(edgeId, warningType, toNodeId)
return
def __onSectorGroupUpdated(self, groupId, state, center, bounds):
node = self.__nodes[groupId]
if node.mappedState is not None:
node.mappedState = SECTOR_STATE_TO_MAPPED_STATE[self.__teamId][state]
playerSectorId = self.__sectorComponent.currentPlayerSectorId
if playerSectorId is not None:
self.__onPlayerSectorGroupChanged(self.__sectorComponent.getSectorById(playerSectorId).groupID)
else:
LOG_WARNING('[EpicSectorWarningComponent] A Sector has been updated but not yet initialized.')
return
def __calcEdgeLine(self, nodeIdA, nodeIdB):
if nodeIdA == nodeIdB:
pass
centerA, _, _, dimensionsA = self.__getNodeGeometry(nodeIdA)
centerB, _, _, dimensionsB = self.__getNodeGeometry(nodeIdB)
sectorA = sectorB = None
sectorIdsA = [ sector.sectorID for sector in self.__sectorComponent.getSectorGroupById(nodeIdA).sectors ]
for sectorOfB in self.__sectorComponent.getSectorGroupById(nodeIdB).sectors:
for neighbourOfBId in self.__sectorComponent.getNeighbouringSectorIdsByOwnSectorId(sectorOfB.sectorID):
if neighbourOfBId in sectorIdsA:
sectorA = self.__sectorComponent.getSectorById(neighbourOfBId)
sectorB = sectorOfB
break
if None in (sectorA, sectorB):
return (None, None)
else:
if getArena().arenaType.epicSectorGrid.mainDirection in (AAD.PLUS_Z, AAD.MINUS_Z):
isHorizontal = sectorA.playerGroup == sectorB.playerGroup
else:
isHorizontal = sectorA.IDInPlayerGroup == sectorB.IDInPlayerGroup
comparisonFunc = (lambda a, b: a.x <= b.x) if isHorizontal else (lambda a, b: a.z <= b.z)
shortWidth, shortHeight, longWidth, longHeight, shortCenter, longCenter = (dimensionsA.x,
dimensionsA.z,
dimensionsB.x,
dimensionsB.z,
centerA,
centerB) if comparisonFunc(dimensionsA, dimensionsB) else (dimensionsB.x,
dimensionsB.z,
dimensionsA.x,
dimensionsA.z,
centerB,
centerA)
if isHorizontal:
z = longCenter.z + copysign(longHeight * 0.5, shortCenter.z - longCenter.z)
return (Vector3(shortCenter.x - shortWidth * 0.5, 0, z), Vector3(shortCenter.x + shortWidth * 0.5, 0, z))
x = longCenter.x + copysign(longWidth * 0.5, shortCenter.x - longCenter.x)
return (Vector3(x, 0, shortCenter.z - shortHeight * 0.5), Vector3(x, 0, shortCenter.z + shortHeight * 0.5))
def __getNodeGeometry(self, nodeId):
sectorGroup = self.__sectorComponent.getSectorGroupById(nodeId)
center = sectorGroup.center
minBound, maxBound = sectorGroup.getBound()
return (center,
minBound,
maxBound,
maxBound - minBound)
def __onSectorGroupTransitionTimeChanged(self, sectorGroupId, oldTime, newTime):
self.__transitionEndTimes[sectorGroupId] = newTime
self.__startCountdownTimer(sectorGroupId)
def __startCountdownTimer(self, sectorGroupId):
diffTime = math.ceil(self.__transitionEndTimes[sectorGroupId] - BigWorld.serverTime())
if diffTime >= 0:
self.onTransitionTimerUpdated(sectorGroupId, diffTime)
self.delayCallback(1, self.__tick)
def __tick(self):
transitionTimesToDel = []
for sectorGroupId, endTime in self.__transitionEndTimes.iteritems():
diffTime = math.ceil(endTime - BigWorld.serverTime())
if diffTime >= 0:
self.onTransitionTimerUpdated(sectorGroupId, diffTime)
self.onTransitionTimerUpdated(sectorGroupId, -1)
transitionTimesToDel.append(sectorGroupId)
for groupId in transitionTimesToDel:
del self.__transitionEndTimes[groupId]
return 1.0 if self.__transitionEndTimes else None
class SectorBorderVisualisation(object):
_TEAM_BORDER_DIRS = {EPIC_BATTLE_TEAM_ID.TEAM_ATTACKER: (AAD.PLUS_Z,),
EPIC_BATTLE_TEAM_ID.TEAM_DEFENDER: (AAD.MINUS_Z,)}
def __init__(self, sectorWarningComponent):
self.__sectorVisuals = {}
self.__protectionVisuals = {}
def destroy(self):
for borderVisual in self.__sectorVisuals.values():
borderVisual.destroy()
for borderVisuals in self.__protectionVisuals.values():
for borderVisual in borderVisuals:
borderVisual.destroy()
self.__sectorVisuals = None
self.__protectionVisuals = None
return
def showSectorBorder(self, edgeId, edgeState, edges):
border = self.__sectorVisuals.get(edgeId, None)
if border is None:
self.__sectorVisuals[edgeId] = border = BorderVisual()
fromPos, toPos = edges[edgeId].getEdgePoints()
border.create(fromPos, toPos, edgeState)
border.showState(edgeState)
return
def showProtectionZone(self, zoneId, edgeState, owningTeam, protectionZones):
borders = self.__protectionVisuals.get(zoneId, None)
if borders is None:
borders = []
for borderDir in self._TEAM_BORDER_DIRS[owningTeam]:
visual = BorderVisual()
borders.append(visual)
bounds = protectionZones[zoneId].geometry[1:3]
fromPos, toPos = AAD.getBoundingBoxSegmentByDirection3D(bounds, borderDir)
visual.create(fromPos, toPos, edgeState)
self.__protectionVisuals[zoneId] = borders
for border in borders:
border.showState(edgeState)
return
|
154f65b0e0730e7f5e87eaad10da5ebad3c5c4e5
|
5dc28132a07ec23e7ee925e0117dd56b96825b37
|
/tests/test_unit.py
|
0e149bc0ffc9e6b6cce0eed1d851671b17e57b23
|
[
"Apache-2.0"
] |
permissive
|
clld/wals3
|
70e538bea77c90415467f656fc6265595cc5a22a
|
a9d1a68c5899e13e73893eef3fcccc8fbc076749
|
refs/heads/master
| 2023-06-08T01:58:11.366314
| 2023-06-03T06:08:24
| 2023-06-03T06:08:24
| 7,600,736
| 113
| 16
|
Apache-2.0
| 2021-12-07T16:25:39
| 2013-01-14T08:22:36
|
CSS
|
UTF-8
|
Python
| false
| false
| 479
|
py
|
test_unit.py
|
from clld.db.models.common import Language
def test_Matrix(env, tmp_path):
from wals3.adapters import Matrix
class TestMatrix(Matrix):
def abspath(self, req):
return tmp_path / 'test'
def query(self, req):
return Matrix.query(self, req).filter(Language.pk < 100)
m = TestMatrix(Language, 'wals3', description="Feature values CSV")
m.create(env['request'], verbose=False)
assert tmp_path.joinpath('test').exists()
|
eee273f854704c0801df38c1951c5582a624312f
|
84ae399f0133a32223386ef598ac9ca5fcd1b32e
|
/examples/table/utils/udfs.py
|
f6f8ab9d62f02e3e61bda7c75c91065bddeecad0
|
[
"Apache-2.0"
] |
permissive
|
pyflink/playgrounds
|
49b4601dd9645513d6ebd87fed7f1f8845407bf1
|
cacfa58a8ae4bfe5880f08ca3bdc6b2a75585f72
|
refs/heads/master
| 2023-08-21T15:27:32.892403
| 2023-08-07T06:26:13
| 2023-08-07T06:26:13
| 255,244,417
| 171
| 82
| null | 2023-05-22T16:19:29
| 2020-04-13T06:01:35
|
Shell
|
UTF-8
|
Python
| false
| false
| 142
|
py
|
udfs.py
|
from pyflink.table import DataTypes
from pyflink.table.udf import udf
@udf(result_type=DataTypes.BIGINT())
def add_one(i):
return i + 1
|
d519d78ac267fa0f22e9b847fc35ac532d30800a
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-cc/huaweicloudsdkcc/v3/model/create_inter_region_bandwidth.py
|
12a495cd55efb50a3a68880cb46a4518edcc50e7
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 6,145
|
py
|
create_inter_region_bandwidth.py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateInterRegionBandwidth:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'cloud_connection_id': 'str',
'bandwidth_package_id': 'str',
'bandwidth': 'int',
'inter_region_ids': 'list[str]'
}
attribute_map = {
'cloud_connection_id': 'cloud_connection_id',
'bandwidth_package_id': 'bandwidth_package_id',
'bandwidth': 'bandwidth',
'inter_region_ids': 'inter_region_ids'
}
def __init__(self, cloud_connection_id=None, bandwidth_package_id=None, bandwidth=None, inter_region_ids=None):
"""CreateInterRegionBandwidth
The model defined in huaweicloud sdk
:param cloud_connection_id: 云连接实例ID。
:type cloud_connection_id: str
:param bandwidth_package_id: 带宽包实例ID。
:type bandwidth_package_id: str
:param bandwidth: 域间带宽值。
:type bandwidth: int
:param inter_region_ids: 域间RegionID。
:type inter_region_ids: list[str]
"""
self._cloud_connection_id = None
self._bandwidth_package_id = None
self._bandwidth = None
self._inter_region_ids = None
self.discriminator = None
self.cloud_connection_id = cloud_connection_id
self.bandwidth_package_id = bandwidth_package_id
self.bandwidth = bandwidth
self.inter_region_ids = inter_region_ids
@property
def cloud_connection_id(self):
"""Gets the cloud_connection_id of this CreateInterRegionBandwidth.
云连接实例ID。
:return: The cloud_connection_id of this CreateInterRegionBandwidth.
:rtype: str
"""
return self._cloud_connection_id
@cloud_connection_id.setter
def cloud_connection_id(self, cloud_connection_id):
"""Sets the cloud_connection_id of this CreateInterRegionBandwidth.
云连接实例ID。
:param cloud_connection_id: The cloud_connection_id of this CreateInterRegionBandwidth.
:type cloud_connection_id: str
"""
self._cloud_connection_id = cloud_connection_id
@property
def bandwidth_package_id(self):
"""Gets the bandwidth_package_id of this CreateInterRegionBandwidth.
带宽包实例ID。
:return: The bandwidth_package_id of this CreateInterRegionBandwidth.
:rtype: str
"""
return self._bandwidth_package_id
@bandwidth_package_id.setter
def bandwidth_package_id(self, bandwidth_package_id):
"""Sets the bandwidth_package_id of this CreateInterRegionBandwidth.
带宽包实例ID。
:param bandwidth_package_id: The bandwidth_package_id of this CreateInterRegionBandwidth.
:type bandwidth_package_id: str
"""
self._bandwidth_package_id = bandwidth_package_id
@property
def bandwidth(self):
"""Gets the bandwidth of this CreateInterRegionBandwidth.
域间带宽值。
:return: The bandwidth of this CreateInterRegionBandwidth.
:rtype: int
"""
return self._bandwidth
@bandwidth.setter
def bandwidth(self, bandwidth):
"""Sets the bandwidth of this CreateInterRegionBandwidth.
域间带宽值。
:param bandwidth: The bandwidth of this CreateInterRegionBandwidth.
:type bandwidth: int
"""
self._bandwidth = bandwidth
@property
def inter_region_ids(self):
"""Gets the inter_region_ids of this CreateInterRegionBandwidth.
域间RegionID。
:return: The inter_region_ids of this CreateInterRegionBandwidth.
:rtype: list[str]
"""
return self._inter_region_ids
@inter_region_ids.setter
def inter_region_ids(self, inter_region_ids):
"""Sets the inter_region_ids of this CreateInterRegionBandwidth.
域间RegionID。
:param inter_region_ids: The inter_region_ids of this CreateInterRegionBandwidth.
:type inter_region_ids: list[str]
"""
self._inter_region_ids = inter_region_ids
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateInterRegionBandwidth):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
9029911f4832a30c189bdc9ed6f533bf9ae48e65
|
25e99a0af5751865bce1702ee85cc5c080b0715c
|
/design_pattern/src/大話設計模式/design-patterns-py/proxy.py
|
abc79425859cd19b5e95c449749b7db5bb4db0cd
|
[] |
no_license
|
jasonblog/note
|
215837f6a08d07abe3e3d2be2e1f183e14aa4a30
|
4471f95736c60969a718d854cab929f06726280a
|
refs/heads/master
| 2023-05-31T13:02:27.451743
| 2022-04-04T11:28:06
| 2022-04-04T11:28:06
| 35,311,001
| 130
| 67
| null | 2023-02-10T21:26:36
| 2015-05-09T02:04:40
|
C
|
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
proxy.py
|
import abc
class GiveGift:
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self, sg):
"""init"""
@abc.abstractmethod
def give_dolls(self):
"""give dolls"""
@abc.abstractmethod
def give_flowers(self):
"""give flowers"""
class SchoolGirl:
def __init__(self, n):
self.name = n
class Pursuit(GiveGift):
def __init__(self, sg):
GiveGift.__init__(self, sg)
self.school_girl = sg
def give_dolls(self):
print "give " + self.school_girl.name + " dolls"
def give_flowers(self):
print "give " + self.school_girl.name + " flowers"
class Proxy(GiveGift):
def __init__(self, sg):
GiveGift.__init__(self, sg)
self.pursuit = Pursuit(sg)
def give_dolls(self):
self.pursuit.give_dolls()
def give_flowers(self):
self.pursuit.give_flowers()
if __name__ == "__main__":
school_girl = SchoolGirl("Alice")
proxy = Proxy(school_girl)
proxy.give_flowers()
proxy.give_dolls()
|
abbd8851dfcaaeb92b0d7de723716a64fc1a1adf
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-Cocoa/PyObjCTest/test_foundationerrors.py
|
fd6d97fea3fada3f9d1eeb92962aee581fa85f47
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 6,646
|
py
|
test_foundationerrors.py
|
import Foundation
from PyObjCTools.TestSupport import TestCase, min_os_level
class FoundationErrorsTest(TestCase):
def testConstants(self):
self.assertEqual(Foundation.NSFileNoSuchFileError, 4)
self.assertEqual(Foundation.NSFileLockingError, 255)
self.assertEqual(Foundation.NSFileReadUnknownError, 256)
self.assertEqual(Foundation.NSFileReadNoPermissionError, 257)
self.assertEqual(Foundation.NSFileReadInvalidFileNameError, 258)
self.assertEqual(Foundation.NSFileReadCorruptFileError, 259)
self.assertEqual(Foundation.NSFileReadInapplicableStringEncodingError, 261)
self.assertEqual(Foundation.NSFileReadUnsupportedSchemeError, 262)
self.assertEqual(Foundation.NSFileReadTooLargeError, 263)
self.assertEqual(Foundation.NSFileReadUnknownStringEncodingError, 264)
self.assertEqual(Foundation.NSFileWriteUnknownError, 512)
self.assertEqual(Foundation.NSFileWriteNoPermissionError, 513)
self.assertEqual(Foundation.NSFileWriteInvalidFileNameError, 514)
self.assertEqual(Foundation.NSFileWriteInapplicableStringEncodingError, 517)
self.assertEqual(Foundation.NSFileWriteUnsupportedSchemeError, 518)
self.assertEqual(Foundation.NSFileWriteOutOfSpaceError, 640)
self.assertEqual(Foundation.NSKeyValueValidationError, 1024)
self.assertEqual(Foundation.NSUserCancelledError, 3072)
self.assertEqual(Foundation.NSExecutableNotLoadableError, 3584)
self.assertEqual(Foundation.NSExecutableArchitectureMismatchError, 3585)
self.assertEqual(Foundation.NSExecutableRuntimeMismatchError, 3586)
self.assertEqual(Foundation.NSExecutableLoadError, 3587)
self.assertEqual(Foundation.NSExecutableLinkError, 3588)
self.assertEqual(Foundation.NSFileErrorMinimum, 0)
self.assertEqual(Foundation.NSFileErrorMaximum, 1023)
self.assertEqual(Foundation.NSValidationErrorMinimum, 1024)
self.assertEqual(Foundation.NSValidationErrorMaximum, 2047)
self.assertEqual(Foundation.NSExecutableErrorMinimum, 3584)
self.assertEqual(Foundation.NSExecutableErrorMaximum, 3839)
self.assertEqual(Foundation.NSFormattingErrorMinimum, 2048)
self.assertEqual(Foundation.NSFormattingErrorMaximum, 2559)
self.assertEqual(Foundation.NSFormattingError, 2048)
self.assertEqual(Foundation.NSCoderInvalidValueError, 4866)
self.assertEqual(Foundation.NSBundleOnDemandResourceOutOfSpaceError, 4992)
self.assertEqual(
Foundation.NSBundleOnDemandResourceExceededMaximumSizeError, 4993
)
self.assertEqual(Foundation.NSBundleOnDemandResourceInvalidTagError, 4994)
@min_os_level("10.6")
def testConstants10_6(self):
self.assertEqual(Foundation.NSFileWriteVolumeReadOnlyError, 642)
self.assertEqual(Foundation.NSPropertyListReadCorruptError, 3840)
self.assertEqual(Foundation.NSPropertyListReadUnknownVersionError, 3841)
self.assertEqual(Foundation.NSPropertyListReadStreamError, 3842)
self.assertEqual(Foundation.NSPropertyListWriteStreamError, 3851)
self.assertEqual(Foundation.NSPropertyListErrorMinimum, 3840)
self.assertEqual(Foundation.NSPropertyListErrorMaximum, 4095)
@min_os_level("10.7")
def testConstants10_7(self):
self.assertEqual(Foundation.NSFileWriteFileExistsError, 516)
@min_os_level("10.8")
def testConstants10_8(self):
self.assertEqual(Foundation.NSFeatureUnsupportedError, 3328)
self.assertEqual(Foundation.NSXPCConnectionInterrupted, 4097)
self.assertEqual(Foundation.NSXPCConnectionInvalid, 4099)
self.assertEqual(Foundation.NSXPCConnectionReplyInvalid, 4101)
self.assertEqual(Foundation.NSXPCConnectionCodeSigningRequirementFailure, 4102)
self.assertEqual(Foundation.NSXPCConnectionErrorMinimum, 4096)
self.assertEqual(Foundation.NSXPCConnectionErrorMaximum, 4224)
@min_os_level("10.9")
def testConstants10_9(self):
self.assertEqual(Foundation.NSUbiquitousFileUnavailableError, 4353)
self.assertEqual(Foundation.NSUbiquitousFileNotUploadedDueToQuotaError, 4354)
self.assertEqual(Foundation.NSUbiquitousFileUbiquityServerNotAvailable, 4355)
self.assertEqual(Foundation.NSUbiquitousFileErrorMinimum, 4352)
self.assertEqual(Foundation.NSUbiquitousFileErrorMaximum, 4607)
@min_os_level("10.10")
def testConstants10_10(self):
self.assertEqual(Foundation.NSUserActivityHandoffFailedError, 4608)
self.assertEqual(Foundation.NSUserActivityConnectionUnavailableError, 4609)
self.assertEqual(Foundation.NSUserActivityRemoteApplicationTimedOutError, 4610)
self.assertEqual(Foundation.NSUserActivityHandoffUserInfoTooLargeError, 4611)
self.assertEqual(Foundation.NSUserActivityErrorMinimum, 4608)
self.assertEqual(Foundation.NSUserActivityErrorMaximum, 4863)
self.assertEqual(Foundation.NSPropertyListWriteInvalidError, 3852)
@min_os_level("10.11")
def testConstants10_11(self):
self.assertEqual(Foundation.NSFileManagerUnmountUnknownError, 768)
self.assertEqual(Foundation.NSFileManagerUnmountBusyError, 769)
self.assertEqual(Foundation.NSCoderReadCorruptError, 4864)
self.assertEqual(Foundation.NSCoderValueNotFoundError, 4865)
self.assertEqual(Foundation.NSCoderErrorMinimum, 4864)
self.assertEqual(Foundation.NSCoderErrorMaximum, 4991)
self.assertEqual(Foundation.NSBundleErrorMinimum, 4992)
self.assertEqual(Foundation.NSBundleErrorMaximum, 5119)
@min_os_level("10.12")
def testConstants10_12(self):
self.assertEqual(Foundation.NSCloudSharingNetworkFailureError, 5120)
self.assertEqual(Foundation.NSCloudSharingQuotaExceededError, 5121)
self.assertEqual(Foundation.NSCloudSharingTooManyParticipantsError, 5122)
self.assertEqual(Foundation.NSCloudSharingConflictError, 5123)
self.assertEqual(Foundation.NSCloudSharingNoPermissionError, 5124)
self.assertEqual(Foundation.NSCloudSharingOtherError, 5375)
self.assertEqual(Foundation.NSCloudSharingErrorMinimum, 5120)
self.assertEqual(Foundation.NSCloudSharingErrorMaximum, 5375)
@min_os_level("10.15")
def testConstants10_15(self):
self.assertEqual(Foundation.NSCompressionFailedError, 5376)
self.assertEqual(Foundation.NSDecompressionFailedError, 5377)
self.assertEqual(Foundation.NSCompressionErrorMinimum, 5376)
self.assertEqual(Foundation.NSCompressionErrorMaximum, 5503)
|
6a6d03e942edd30010f4b48a977498f52d513510
|
c5311176cd07f267fb1ca4f9cd71b308ed0778c5
|
/dygie/training/event_metrics.py
|
5845a5681049b7845574a2210e5789142c21d9c2
|
[
"MIT"
] |
permissive
|
dwadden/dygiepp
|
1a71885b0588bb5f0997dec13b27ebfd30169e7c
|
ab764cd0d48b7c430a78a1edddf5acaeec13c109
|
refs/heads/master
| 2023-07-27T19:30:00.399646
| 2023-07-19T20:52:06
| 2023-07-19T20:52:06
| 171,385,430
| 534
| 129
|
MIT
| 2023-07-19T20:52:08
| 2019-02-19T01:48:41
|
Python
|
UTF-8
|
Python
| false
| false
| 6,731
|
py
|
event_metrics.py
|
from overrides import overrides
from collections import Counter
from allennlp.training.metrics.metric import Metric
from dygie.training.f1 import compute_f1
def _invert_arguments(arguments, triggers):
"""
For scoring the argument, we don't need the trigger spans to match exactly. We just need the
trigger label corresponding to the predicted trigger span to be correct.
"""
# Can't use a dict because multiple triggers could share the same argument.
inverted = set()
for k, v in arguments.items():
if k[0] in triggers: # If it's not, the trigger this arg points to is null. TODO(dwadden) check.
trigger_label = triggers[k[0]]
to_append = (k[1], trigger_label, v)
inverted.add(to_append)
return inverted
# TODO(dwadden) Clean this up.
class EventMetrics(Metric):
"""
Computes precision, recall, and micro-averaged F1 for triggers and arguments.
"""
def __init__(self):
self.reset()
@overrides
def __call__(self, predicted_events_list, metadata_list):
for predicted_events, metadata in zip(predicted_events_list, metadata_list):
# Trigger scoring.
predicted_triggers = predicted_events["trigger_dict"]
gold_triggers = metadata.events.trigger_dict
self._score_triggers(predicted_triggers, gold_triggers)
# Argument scoring.
predicted_arguments = predicted_events["argument_dict"]
gold_arguments = metadata.events.argument_dict
self._score_arguments(
predicted_triggers, gold_triggers, predicted_arguments, gold_arguments)
def _score_triggers(self, predicted_triggers, gold_triggers):
self._gold_triggers += len(gold_triggers)
self._predicted_triggers += len(predicted_triggers)
for token_ix, pred in predicted_triggers.items():
label = pred[0]
# Check whether the offsets match, and whether the labels match.
if token_ix in gold_triggers:
self._matched_trigger_ids += 1
if gold_triggers[token_ix] == label:
self._matched_trigger_classes += 1
def _score_arguments(self, predicted_triggers, gold_triggers, predicted_arguments, gold_arguments):
# Note that the index of the trigger doesn't actually need to be correct to get full credit;
# the event type and event role need to be correct (see Sec. 3 of paper).
def format(arg_dict, trigger_dict, prediction=False):
# Make it a list of [index, event_type, arg_label].
res = []
for (trigger_ix, arg_ix), label in arg_dict.items():
# If it doesn't match a trigger, don't predict it (enforced in decoding).
if trigger_ix not in trigger_dict:
continue
event_type = trigger_dict[trigger_ix]
# TODO(dwadden) This is clunky; it's because predictions have confidence scores.
if prediction:
event_type = event_type[0]
label = label[0]
res.append((arg_ix, event_type, label))
return res
formatted_gold_arguments = format(gold_arguments, gold_triggers, prediction=False)
formatted_predicted_arguments = format(predicted_arguments, predicted_triggers, prediction=True)
self._gold_arguments += len(formatted_gold_arguments)
self._predicted_arguments += len(formatted_predicted_arguments)
# Go through each predicted arg and look for a match.
for entry in formatted_predicted_arguments:
# No credit if not associated with a predicted trigger.
class_match = int(any([entry == gold for gold in formatted_gold_arguments]))
id_match = int(any([entry[:2] == gold[:2] for gold in formatted_gold_arguments]))
self._matched_argument_classes += class_match
self._matched_argument_ids += id_match
@overrides
def get_metric(self, reset=False):
res = {}
# Triggers
res["trig_id_precision"], res["trig_id_recall"], res["trig_id_f1"] = compute_f1(
self._predicted_triggers, self._gold_triggers, self._matched_trigger_ids)
res["trig_class_precision"], res["trig_class_recall"], res["trig_class_f1"] = compute_f1(
self._predicted_triggers, self._gold_triggers, self._matched_trigger_classes)
# Arguments
res["arg_id_precision"], res["arg_id_recall"], res["arg_id_f1"] = compute_f1(
self._predicted_arguments, self._gold_arguments, self._matched_argument_ids)
res["arg_class_precision"], res["arg_class_recall"], res["arg_class_f1"] = compute_f1(
self._predicted_arguments, self._gold_arguments, self._matched_argument_classes)
# Reset counts if at end of epoch.
if reset:
self.reset()
return res
@overrides
def reset(self):
self._gold_triggers = 0
self._predicted_triggers = 0
self._matched_trigger_ids = 0
self._matched_trigger_classes = 0
self._gold_arguments = 0
self._predicted_arguments = 0
self._matched_argument_ids = 0
self._matched_argument_classes = 0
class ArgumentStats(Metric):
"""
Compute the fraction of predicted event arguments that are associated with multiple triggers.
"""
def __init__(self):
self.reset()
@overrides
def __call__(self, predicted_events_list):
for predicted_events in predicted_events_list:
predicted_arguments = _invert_arguments(predicted_events["argument_dict"],
predicted_events["trigger_dict"])
# Count how many times each span appears as an argument.
span_counts = Counter()
for prediction in predicted_arguments:
span_counts[prediction[0]] += 1
# Count how many spans appear more than once.
repeated = {k: v for k, v in span_counts.items() if v > 1}
self._total_arguments += len(span_counts)
self._repeated_arguments += len(repeated)
@overrides
def get_metric(self, reset=False):
# Fraction of event arguments associated with multiple triggers.
args_multiple = (self._repeated_arguments / self._total_arguments
if self._total_arguments
else 0)
if reset:
self.reset()
res = dict(args_multiple=args_multiple)
return res
@overrides
def reset(self):
self._total_arguments = 0
self._repeated_arguments = 0
|
09ff9754ca49e3597bd507cd886b3d0e3d4814cf
|
d0f1472c167339f1e5267f404a5b73e0c180b754
|
/cpca/exceptions.py
|
221bc6c76ce836317975bf6d23800a81fe35d237
|
[
"MIT"
] |
permissive
|
DQinYuan/chinese_province_city_area_mapper
|
a756670401ac648b4b82991ee77651262790aca6
|
9bdfd6e8694b44ca3de0730bd9122c6df5916133
|
refs/heads/master
| 2023-09-01T13:48:16.396552
| 2021-12-12T09:20:04
| 2021-12-12T09:20:04
| 123,792,414
| 1,558
| 366
|
MIT
| 2022-12-08T11:46:07
| 2018-03-04T13:43:06
|
Python
|
UTF-8
|
Python
| false
| false
| 418
|
py
|
exceptions.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 28 18:55:58 2018
自定义的一些异常类
@author: 燃烧杯
"""
class CPCAException(Exception):
pass
class PlaceTypeNotExistException(CPCAException):
pass
class InputTypeNotSuportException(CPCAException):
input_type = \
"""
输入应该为
|省 |市 |区 |
|江苏省 |扬州市 |邗江区 |
格式的pandas.core.frame.DateFrame
"""
pass
|
70071d746006bad93d8a6fd8b3368c8bae0cd8e3
|
e0ed4496e94263643cedea56bfcdec1140ced8d6
|
/examples/competitive/reduce_iris_sample_size_lvq.py
|
de4f9612ea3115e90d7bb41f768f099942b34c3b
|
[
"MIT"
] |
permissive
|
itdxer/neupy
|
6307666271807bd9028e3e60dd2536a544ed8421
|
317ed4204b5239e8be2b94a95fe3157c5f9edc65
|
refs/heads/master
| 2023-06-13T23:09:36.487633
| 2023-01-03T21:24:56
| 2023-01-03T21:24:56
| 41,323,480
| 840
| 206
|
MIT
| 2022-12-16T16:32:10
| 2015-08-24T19:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 944
|
py
|
reduce_iris_sample_size_lvq.py
|
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import datasets
from neupy import algorithms, utils
utils.reproducible()
def plot_scattermatrix(data, target):
df = pd.DataFrame(data)
df['target'] = target
return sns.pairplot(df, hue='target', diag_kind='hist')
if __name__ == '__main__':
dataset = datasets.load_iris()
data, target = dataset.data, dataset.target
lvqnet = algorithms.LVQ3(
# number of features
n_inputs=4,
# number of data points that we want
# to have at the end
n_subclasses=30,
# number of classes
n_classes=3,
verbose=True,
show_epoch=20,
step=0.001,
n_updates_to_stepdrop=150 * 100,
)
lvqnet.train(data, target, epochs=100)
plot_scattermatrix(data, target)
plot_scattermatrix(data=lvqnet.weight, target=lvqnet.subclass_to_class)
plt.show()
|
7441c5885489b30e8d75bc36943a524c5a9585e4
|
24c8bf613f848419f6f96a09e3093e997e14aad5
|
/linty_fresh/linters/buck_unittest.py
|
2d0467513fb7ca0225bb1e41e75d7eef4e48b133
|
[
"Apache-2.0"
] |
permissive
|
lyft/linty_fresh
|
32743f40cecdaaf99097a56ba01827d69b6e5eb3
|
7fc46ecd91f64c89bd45b0d136d037dbd7cc2be4
|
refs/heads/master
| 2023-08-27T20:55:48.189059
| 2022-12-13T04:09:02
| 2022-12-13T04:09:02
| 48,265,704
| 207
| 32
|
Apache-2.0
| 2023-06-24T01:09:01
| 2015-12-19T02:37:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,096
|
py
|
buck_unittest.py
|
from typing import Set
from xml.etree import ElementTree
from linty_fresh.problem import TestProblem
def parse(contents: str, **kwargs) -> Set[TestProblem]:
result = set()
try:
root = ElementTree.fromstring(contents)
except ElementTree.ParseError:
return result
for test in root.findall('test'):
if test.get('status') == 'FAIL':
test_group = test.get('name')
for tr in test.findall('testresult'):
message = None
stack_trace = None
for m in tr.findall('message'):
if m.text:
message = m.text
for st in tr.findall('stacktrace'):
if st.text:
stack_trace = st.text
if stack_trace and message:
test_name = tr.get('name')
result.add(TestProblem(
test_group,
test_name,
message,
stack_trace
))
return result
|
eee9415256945967263793f84472eed86da345ff
|
d7afe289dcd2b5865d836d9fdfe82d1303e0ea66
|
/plugins/modules/panos_tag_object.py
|
6ebe31a56289e62069e2d1b89d6f098d1035a3d3
|
[
"Apache-2.0"
] |
permissive
|
PaloAltoNetworks/pan-os-ansible
|
e221fefce26e938969f5affa5eae644af69b7b0f
|
ff65f0fda622f654d9b837bd8b1a289eb3b38fad
|
refs/heads/develop
| 2023-09-05T11:10:08.357707
| 2023-08-15T19:16:50
| 2023-08-15T19:16:50
| 238,510,689
| 171
| 93
|
Apache-2.0
| 2023-09-14T09:48:48
| 2020-02-05T17:39:28
|
Python
|
UTF-8
|
Python
| false
| false
| 6,859
|
py
|
panos_tag_object.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2018 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
module: panos_tag_object
short_description: Manage tag objects on PAN-OS devices.
description:
- Manage tag objects on PAN-OS devices.
author: "Michael Richardson (@mrichardson03)"
version_added: '1.0.0'
requirements:
- pan-python can be obtained from PyPI U(https://pypi.python.org/pypi/pan-python)
- pandevice can be obtained from PyPI U(https://pypi.python.org/pypi/pandevice)
notes:
- Panorama is supported.
- Check mode is supported.
extends_documentation_fragment:
- paloaltonetworks.panos.fragments.transitional_provider
- paloaltonetworks.panos.fragments.vsys
- paloaltonetworks.panos.fragments.device_group
- paloaltonetworks.panos.fragments.network_resource_module_state
- paloaltonetworks.panos.fragments.deprecated_commit
- paloaltonetworks.panos.fragments.gathered_filter
options:
name:
description:
- Name of the tag.
type: str
color_value:
description:
- The XML value of the color for this tag.
- Mutually exclusive with I(color).
type: str
color:
description:
- Color for the tag.
- Mutually exclusive with I(color_value).
- NOTE that this param is not available for I(gathered_filter) as it is a meta-param.
type: str
choices:
- red
- green
- blue
- yellow
- copper
- orange
- purple
- gray
- light green
- cyan
- light gray
- blue gray
- lime
- black
- gold
- brown
- olive
- maroon
- red-orange
- yellow-orange
- forest green
- turquoise blue
- azure blue
- cerulean blue
- midnight blue
- medium blue
- cobalt blue
- violet blue
- blue violet
- medium violet
- medium rose
- lavender
- orchid
- thistle
- peach
- salmon
- magenta
- red violet
- mahogany
- burnt sienna
- chestnut
comments:
description:
- Comments for the tag.
type: str
"""
EXAMPLES = """
- name: Create tag object 'Prod'
panos_tag_object:
provider: '{{ provider }}'
name: 'Prod'
color: 'red'
comments: 'Prod Environment'
- name: Remove tag object 'Prod'
panos_tag_object:
provider: '{{ provider }}'
name: 'Prod'
state: 'absent'
"""
RETURN = """
# Default return values
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.paloaltonetworks.panos.plugins.module_utils.panos import (
ConnectionHelper,
get_connection,
)
COLOR_NAMES = [
"",
"red",
"green",
"blue",
"yellow",
"copper",
"orange",
"purple",
"gray",
"light green",
"cyan",
"light gray",
"blue gray",
"lime",
"black",
"gold",
"brown",
"olive",
"",
"maroon",
"red-orange",
"yellow-orange",
"forest green",
"turquoise blue",
"azure blue",
"cerulean blue",
"midnight blue",
"medium blue",
"cobalt blue",
"violet blue",
"blue violet",
"medium violet",
"medium rose",
"lavender",
"orchid",
"thistle",
"peach",
"salmon",
"magenta",
"red violet",
"mahogany",
"burnt sienna",
"chestnut",
]
def to_color(color_value):
"""Returns the color for the given color_value."""
if color_value is None or not color_value.startswith("color"):
return None
cv = int(color_value[5:])
return COLOR_NAMES[cv]
class Helper(ConnectionHelper):
def initial_handling(self, module):
if module.params["color"] and module.params["color_value"]:
module.fail_json(msg="Specify either 'color' or 'color_value', not both")
def spec_handling(self, spec, module):
if (
module.params["state"] not in ("present", "replaced")
or not module.params["color"]
):
return
for num, x in enumerate(COLOR_NAMES):
if module.params["color"] == x:
spec["color"] = "color{0}".format(num)
break
else:
module.fail_json(
msg="Unable to find color_value for color: {0}".format(
module.params["color"]
)
)
def post_state_handling(self, obj, result, module):
if "before" in result and result["before"] is not None:
result["before"]["color"] = to_color(result["before"]["color_value"])
if "after" in result and result["after"] is not None:
result["after"]["color"] = to_color(result["after"]["color_value"])
if "gathered" in result:
if isinstance(result["gathered"], dict):
result["gathered"]["color"] = to_color(
result["gathered"]["color_value"]
)
elif isinstance(result["gathered"], list):
for x in result["gathered"]:
x["color"] = to_color(x["color_value"])
def main():
helper = get_connection(
helper_cls=Helper,
vsys=True,
device_group=True,
with_network_resource_module_state=True,
with_gathered_filter=True,
with_classic_provider_spec=True,
with_commit=True,
sdk_cls=("objects", "Tag"),
sdk_params=dict(
name=dict(required=True),
color_value=dict(sdk_param="color"),
comments=dict(),
),
extra_params=dict(
color=dict(choices=[x for x in COLOR_NAMES if x]),
),
)
module = AnsibleModule(
argument_spec=helper.argument_spec,
required_one_of=helper.required_one_of,
supports_check_mode=True,
)
helper.process(module)
if __name__ == "__main__":
main()
|
fcfb2545fbde05f342442b1d85620b3dc21aa324
|
f6277ff7677db4568c1c65df70f1ea5e6bb94b2a
|
/soccerapi/api/kambi.py
|
1b79402137fde4ac80b4ac83aec549db0e924744
|
[
"MIT"
] |
permissive
|
S1M0N38/soccerapi
|
0823b5234761a1caaf73be210bf161b9c6876366
|
88c6733cf65f55145d7d07910756241f7710786f
|
refs/heads/master
| 2022-12-13T07:39:46.505665
| 2022-02-16T13:49:41
| 2022-02-16T13:49:41
| 232,615,041
| 158
| 41
|
MIT
| 2022-12-08T08:48:46
| 2020-01-08T17:07:31
|
Python
|
UTF-8
|
Python
| false
| false
| 4,753
|
py
|
kambi.py
|
from typing import Dict, List
class ParserKambi:
"""888sport, unibet and other use the same CDN (eu-offering.kambicdn)
so the requetsting and parsing process is exaclty the same.
This class implements parsers for variuos category for Kambi."""
def full_time_result(self, data: Dict) -> List:
""" Parse the raw json requests for full_time_result """
odds = []
for event in data['events']:
if event['event'].get('state') == 'STARTED':
continue
try:
full_time_result = {
'1': event['betOffers'][0]['outcomes'][0].get('odds'),
'X': event['betOffers'][0]['outcomes'][1].get('odds'),
'2': event['betOffers'][0]['outcomes'][2].get('odds'),
}
except IndexError:
full_time_result = None
odds.append(
{
'time': event['event'].get('start'),
'home_team': event['event'].get('homeName'),
'away_team': event['event'].get('awayName'),
'odds': full_time_result,
}
)
return odds
def under_over(self, data: Dict) -> List:
""" Parse the raw json requests for under_over """
odds = []
for event in data['events']:
if event['event'].get('state') == 'STARTED':
continue
try:
under_over = {
'O2.5': event['betOffers'][0]['outcomes'][0].get('odds'),
'U2.5': event['betOffers'][0]['outcomes'][1].get('odds'),
}
except IndexError:
under_over = None
odds.append(
{
'time': event['event'].get('start'),
'home_team': event['event'].get('homeName'),
'away_team': event['event'].get('awayName'),
'odds': under_over,
}
)
return odds
def both_teams_to_score(self, data: Dict) -> List:
""" Parse the raw json requests for both_teams_to_score """
odds = []
for event in data['events']:
if event['event'].get('state') == 'STARTED':
continue
try:
both_teams_to_score = {
'yes': event['betOffers'][0]['outcomes'][0].get('odds'),
'no': event['betOffers'][0]['outcomes'][1].get('odds'),
}
except IndexError:
both_teams_to_score = None
odds.append(
{
'time': event['event'].get('start'),
'home_team': event['event'].get('homeName'),
'away_team': event['event'].get('awayName'),
'odds': both_teams_to_score,
}
)
return odds
def double_chance(self, data: Dict) -> List:
""" Parse the raw json requests for double chance """
odds = []
for event in data['events']:
if event['event'].get('state') == 'STARTED':
continue
try:
double_chance = {
'1X': event['betOffers'][0]['outcomes'][0].get('odds'),
'12': event['betOffers'][0]['outcomes'][1].get('odds'),
'2X': event['betOffers'][0]['outcomes'][2].get('odds'),
}
except IndexError:
double_chance = None
odds.append(
{
'time': event['event'].get('start'),
'home_team': event['event'].get('homeName'),
'away_team': event['event'].get('awayName'),
'odds': double_chance,
}
)
return odds
# Auxiliary methods
def _parse_competitions(self, base_url: str, data: Dict) -> Dict:
""" Parse the raw json request for competitions """
table = {}
for sport in data['group']['groups']:
if sport['termKey'] == 'football':
football = sport['groups']
break
for country in football:
if country['name'] not in table:
table[country['name']] = {}
if 'groups' in country:
for league in country['groups']:
link = f'{base_url}{country["termKey"]}/{league["termKey"]}/'
table[country['name']][league['name']] = link
else:
link = f'{base_url}{country["termKey"]}/'
table[country['name']][country['name']] = link
return table
|
c7a0f2156bd95e6341e8d6a6083ffa129376e276
|
99199db3f78a344e72b281c71c690518ae07375a
|
/octavia/tests/unit/common/test_data_models.py
|
2ceac252044c36feb6a4646778833b6e3c5571b4
|
[
"Apache-2.0"
] |
permissive
|
openstack/octavia
|
3faf2afe2ade5bd3978bb3a0558d2eeefc648ba2
|
0426285a41464a5015494584f109eed35a0d44db
|
refs/heads/master
| 2023-09-01T20:12:48.272344
| 2023-08-31T17:24:04
| 2023-08-31T17:24:04
| 21,018,188
| 147
| 180
|
Apache-2.0
| 2021-03-30T12:34:30
| 2014-06-19T22:47:19
|
Python
|
UTF-8
|
Python
| false
| false
| 18,583
|
py
|
test_data_models.py
|
# Copyright 2018 Rackspace US Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import json
import random
from oslo_utils import uuidutils
from octavia.common import constants
from octavia.common import data_models
import octavia.tests.unit.base as base
class TestDataModels(base.TestCase):
def setUp(self):
self.LB_ID = uuidutils.generate_uuid()
self.LISTENER_ID = uuidutils.generate_uuid()
self.PROJECT_ID = uuidutils.generate_uuid()
self.SERVER_GROUP_ID = uuidutils.generate_uuid()
self.CREATED_AT = datetime.datetime.now()
self.UPDATED_AT = datetime.datetime.utcnow()
self.VIP_IP = '192.0.2.10'
self.VIP_SUBNET_ID = uuidutils.generate_uuid()
self.VIP_NETWORK_ID = uuidutils.generate_uuid()
self.VIP_PORT_ID = uuidutils.generate_uuid()
self.VIP_QOS_ID = uuidutils.generate_uuid()
self.POOL_ID = uuidutils.generate_uuid()
self.AMP_ID = uuidutils.generate_uuid()
self.COMPUTE_ID = uuidutils.generate_uuid()
self.IMAGE_ID = uuidutils.generate_uuid()
self.COMPUTE_FLAVOR = uuidutils.generate_uuid()
self.TLS_CONTAINER_ID = uuidutils.generate_uuid()
self.LB_obj = data_models.LoadBalancer(
id=self.LB_ID,
project_id=self.PROJECT_ID,
name='test-lb',
description='test-lb-description',
provisioning_status='great',
operating_status='even-better',
enabled=True,
vip=None,
vrrp_group=1,
topology='infinite',
listeners=[],
amphorae=[],
pools=[],
server_group_id=self.SERVER_GROUP_ID,
created_at=self.CREATED_AT,
updated_at=self.UPDATED_AT)
self.VIP_obj = data_models.Vip(
load_balancer_id=self.LB_ID,
ip_address=self.VIP_IP,
subnet_id=self.VIP_SUBNET_ID,
network_id=self.VIP_NETWORK_ID,
port_id=self.VIP_PORT_ID,
qos_policy_id=self.VIP_QOS_ID)
self.POOL_obj = data_models.Pool(
id=self.POOL_ID,
project_id=self.PROJECT_ID,
name='test-pool',
description='test-pool-description',
load_balancer_id=self.LB_ID,
load_balancer=None,
protocol='avian',
lb_algorithm='UseAllofThem',
enabled=True,
provisioning_status='great',
operating_status='even-better',
members=[],
health_monitor=None,
session_persistence=None,
listeners=[],
l7policies=[],
created_at=self.CREATED_AT,
updated_at=self.UPDATED_AT)
self.SP_obj = data_models.SessionPersistence(
pool_id=self.POOL_ID,
type='adhesive',
cookie_name='chocolate',
pool=None)
self.AMP_obj = data_models.Amphora(
id=self.AMP_ID,
load_balancer_id=self.LB_ID,
compute_id=self.COMPUTE_ID,
status=constants.ACTIVE,
lb_network_ip=None,
vrrp_ip=None,
ha_ip=None,
vrrp_port_id=None,
ha_port_id=self.VIP_PORT_ID,
load_balancer=self.LB_obj,
role=constants.ROLE_MASTER,
cert_expiration=None,
cert_busy=False,
vrrp_interface=None,
vrrp_id=None,
vrrp_priority=constants.ROLE_MASTER_PRIORITY,
cached_zone=None,
created_at=self.CREATED_AT,
updated_at=self.UPDATED_AT,
image_id=self.IMAGE_ID,
compute_flavor=self.COMPUTE_FLAVOR
)
self.QUOTA_obj = data_models.Quotas(
project_id=self.PROJECT_ID,
load_balancer=None,
listener=None,
pool=None,
health_monitor=None,
member=None,
l7policy=None,
l7rule=None,
in_use_health_monitor=None,
in_use_listener=None,
in_use_load_balancer=None,
in_use_member=None,
in_use_pool=None,
in_use_l7policy=None,
in_use_l7rule=None
)
super().setUp()
def test_LoadBalancer_update(self):
new_id = uuidutils.generate_uuid()
new_project_id = uuidutils.generate_uuid()
new_server_group_id = uuidutils.generate_uuid()
new_created_at = self.CREATED_AT + datetime.timedelta(minutes=5)
new_updated_at = self.UPDATED_AT + datetime.timedelta(minutes=10)
new_name = 'new-test-lb'
new_description = 'new-test-lb-description'
new_provisioning_status = 'new-great'
new_operating_status = 'new-even-better'
new_enabled = False
new_vrrp_group = 2
new_topology = 'new-infinite'
reference_LB_obj = data_models.LoadBalancer(
id=new_id,
project_id=new_project_id,
name=new_name,
description=new_description,
provisioning_status=new_provisioning_status,
operating_status=new_operating_status,
enabled=new_enabled,
vip=None,
vrrp_group=new_vrrp_group,
topology=new_topology,
listeners=[],
amphorae=[],
pools=[],
server_group_id=new_server_group_id,
created_at=new_created_at,
updated_at=new_updated_at)
update_dict = {
'id': new_id,
'project_id': new_project_id,
'name': new_name,
'description': new_description,
'provisioning_status': new_provisioning_status,
'operating_status': new_operating_status,
'enabled': new_enabled,
'vrrp_group': new_vrrp_group,
'topology': new_topology,
'server_group_id': new_server_group_id,
'created_at': new_created_at,
'updated_at': new_updated_at
}
test_LB_obj = copy.deepcopy(self.LB_obj)
test_LB_obj.update(update_dict)
self.assertEqual(reference_LB_obj, test_LB_obj)
def test_LoadBalancer_update_add_vip(self):
new_ip = '192.0.2.44'
new_subnet_id = uuidutils.generate_uuid()
new_network_id = uuidutils.generate_uuid()
new_port_id = uuidutils.generate_uuid()
new_qos_id = uuidutils.generate_uuid()
reference_VIP_obj = data_models.Vip(
load_balancer_id=self.LB_ID,
ip_address=new_ip,
subnet_id=new_subnet_id,
network_id=new_network_id,
port_id=new_port_id,
load_balancer=None,
qos_policy_id=new_qos_id
)
update_dict = {
'vip': {
'ip_address': new_ip,
'subnet_id': new_subnet_id,
'network_id': new_network_id,
'port_id': new_port_id,
'load_balancer': None,
'qos_policy_id': new_qos_id
}
}
test_LB_obj = copy.deepcopy(self.LB_obj)
test_LB_obj.update(update_dict)
self.assertEqual(reference_VIP_obj, test_LB_obj.vip)
def test_LoadBalancer_update_vip_update(self):
new_id = uuidutils.generate_uuid()
new_ip = '192.0.2.44'
new_subnet_id = uuidutils.generate_uuid()
new_network_id = uuidutils.generate_uuid()
new_port_id = uuidutils.generate_uuid()
new_qos_id = uuidutils.generate_uuid()
reference_VIP_obj = data_models.Vip(
load_balancer_id=new_id,
ip_address=new_ip,
subnet_id=new_subnet_id,
network_id=new_network_id,
port_id=new_port_id,
qos_policy_id=new_qos_id
)
update_dict = {
'vip': {
'load_balancer_id': new_id,
'ip_address': new_ip,
'subnet_id': new_subnet_id,
'network_id': new_network_id,
'port_id': new_port_id,
'qos_policy_id': new_qos_id
}
}
test_LB_obj = copy.deepcopy(self.LB_obj)
test_LB_obj.vip = copy.deepcopy(self.VIP_obj)
test_LB_obj.update(update_dict)
self.assertEqual(reference_VIP_obj, test_LB_obj.vip)
def test_Pool_update(self):
new_id = uuidutils.generate_uuid()
new_project_id = uuidutils.generate_uuid()
new_name = 'new-test-pool'
new_description = 'new-test-pool-description'
new_lb_id = uuidutils.generate_uuid()
new_protocol = 'sneaker'
new_lb_algorithm = 'JustOne'
new_enabled = False
new_provisioning_status = 'new-great'
new_operating_status = 'new-even-better'
new_created_at = self.CREATED_AT + datetime.timedelta(minutes=5)
new_updated_at = self.UPDATED_AT + datetime.timedelta(minutes=10)
reference_Pool_obj = data_models.Pool(
id=new_id,
project_id=new_project_id,
name=new_name,
description=new_description,
load_balancer_id=new_lb_id,
protocol=new_protocol,
lb_algorithm=new_lb_algorithm,
enabled=new_enabled,
provisioning_status=new_provisioning_status,
operating_status=new_operating_status,
members=[],
health_monitor=None,
session_persistence=None,
listeners=[],
l7policies=[],
created_at=new_created_at,
updated_at=new_updated_at)
update_dict = {
'id': new_id,
'project_id': new_project_id,
'name': new_name,
'description': new_description,
'load_balancer_id': new_lb_id,
'protocol': new_protocol,
'lb_algorithm': new_lb_algorithm,
'enabled': new_enabled,
'provisioning_status': new_provisioning_status,
'operating_status': new_operating_status,
'created_at': new_created_at,
'updated_at': new_updated_at}
test_Pool_obj = copy.deepcopy(self.POOL_obj)
test_Pool_obj.update(update_dict)
self.assertEqual(reference_Pool_obj, test_Pool_obj)
def test_Pool_update_add_SP(self):
new_type = 'glue'
new_cookie_name = 'chip'
reference_SP_obj = data_models.SessionPersistence(
pool_id=self.POOL_ID,
type=new_type,
cookie_name=new_cookie_name,
pool=None)
update_dict = {
'session_persistence': {
'type': new_type,
'cookie_name': new_cookie_name
}
}
test_Pool_obj = copy.deepcopy(self.POOL_obj)
test_Pool_obj.update(update_dict)
self.assertEqual(reference_SP_obj, test_Pool_obj.session_persistence)
def test_Pool_update_delete_SP(self):
update_dict = {'session_persistence': {}}
test_Pool_obj = copy.deepcopy(self.POOL_obj)
test_Pool_obj.session_persistence = copy.deepcopy(self.SP_obj)
test_Pool_obj.session_persistence.pool = test_Pool_obj
test_Pool_obj.update(update_dict)
self.assertIsNone(test_Pool_obj.session_persistence)
def test_Pool_update_SP_update(self):
new_type = 'glue'
new_cookie_name = 'chip'
update_dict = {
'session_persistence': {
'type': new_type,
'cookie_name': new_cookie_name
}
}
test_Pool_obj = copy.deepcopy(self.POOL_obj)
reference_SP_obj = data_models.SessionPersistence(
pool_id=self.POOL_ID,
type=new_type,
cookie_name=new_cookie_name,
pool=test_Pool_obj)
test_Pool_obj.session_persistence = copy.deepcopy(self.SP_obj)
test_Pool_obj.session_persistence.pool = test_Pool_obj
test_Pool_obj.update(update_dict)
self.assertEqual(reference_SP_obj, test_Pool_obj.session_persistence)
def test_Amphora_update(self):
new_id = uuidutils.generate_uuid()
new_status = constants.ERROR
new_role = constants.ROLE_BACKUP
new_vrrp_priority = constants.ROLE_BACKUP_PRIORITY
new_created_at = self.CREATED_AT + datetime.timedelta(minutes=5)
new_updated_at = self.UPDATED_AT + datetime.timedelta(minutes=10)
new_image_id = uuidutils.generate_uuid()
new_compute_flavor = uuidutils.generate_uuid()
update_dict = {
'id': new_id,
'status': new_status,
'role': new_role,
'vrrp_priority': new_vrrp_priority,
'created_at': new_created_at,
'updated_at': new_updated_at,
'image_id': new_image_id,
'compute_flavor': new_compute_flavor
}
test_Amp_obj = copy.deepcopy(self.AMP_obj)
reference_Amp_obj = data_models.Amphora(
id=new_id,
load_balancer_id=self.LB_ID,
compute_id=self.COMPUTE_ID,
status=new_status,
lb_network_ip=None,
vrrp_ip=None,
ha_ip=None,
vrrp_port_id=None,
ha_port_id=self.VIP_PORT_ID,
load_balancer=self.LB_obj,
role=new_role,
cert_expiration=None,
cert_busy=False,
vrrp_interface=None,
vrrp_id=None,
vrrp_priority=constants.ROLE_BACKUP_PRIORITY,
cached_zone=None,
created_at=new_created_at,
updated_at=new_updated_at,
image_id=new_image_id,
compute_flavor=new_compute_flavor
)
test_Amp_obj.update(update_dict)
self.assertEqual(reference_Amp_obj, test_Amp_obj)
def test_Quota_update(self):
new_loadbalancer_quota = 10
new_listener_quota = 11
new_pool_quota = 12
new_healthmonitor_quota = 13
new_member_quota = 14
new_l7policy_quota = 15
new_l7rule_quota = 16
update_dict = {
'load_balancer': new_loadbalancer_quota,
'listener': new_listener_quota,
'pool': new_pool_quota,
'health_monitor': new_healthmonitor_quota,
'member': new_member_quota,
'l7policy': new_l7policy_quota,
'l7rule': new_l7rule_quota
}
test_Quota_obj = copy.deepcopy(self.QUOTA_obj)
reference_Quota_obj = data_models.Quotas(
project_id=self.PROJECT_ID,
load_balancer=new_loadbalancer_quota,
listener=new_listener_quota,
pool=new_pool_quota,
health_monitor=new_healthmonitor_quota,
member=new_member_quota,
l7policy=new_l7policy_quota,
l7rule=new_l7rule_quota,
in_use_health_monitor=None,
in_use_listener=None,
in_use_load_balancer=None,
in_use_member=None,
in_use_pool=None,
in_use_l7policy=None,
in_use_l7rule=None
)
test_Quota_obj.update(update_dict)
self.assertEqual(reference_Quota_obj, test_Quota_obj)
def test_ListenerStatistics_iadd(self):
# test incrementing add function
bytes_in1 = random.randrange(1000000000)
bytes_out1 = random.randrange(1000000000)
active_conns1 = random.randrange(1000000000)
total_conns1 = random.randrange(1000000000)
request_errors1 = random.randrange(1000000000)
stats_1 = data_models.ListenerStatistics(
listener_id=self.LISTENER_ID,
amphora_id=self.AMP_ID,
bytes_in=bytes_in1,
bytes_out=bytes_out1,
active_connections=active_conns1,
total_connections=total_conns1,
request_errors=request_errors1
)
bytes_in2 = random.randrange(1000000000)
bytes_out2 = random.randrange(1000000000)
active_conns2 = random.randrange(1000000000)
total_conns2 = random.randrange(1000000000)
request_errors2 = random.randrange(1000000000)
stats_2 = data_models.ListenerStatistics(
listener_id="listener 2",
amphora_id="amphora 2",
bytes_in=bytes_in2,
bytes_out=bytes_out2,
active_connections=active_conns2,
total_connections=total_conns2,
request_errors=request_errors2
)
# test successful +=
stats_1 += stats_2
# not a delta, so it won't be incremented
self.assertEqual(stats_1.active_connections, active_conns1)
self.assertEqual(stats_1.listener_id, self.LISTENER_ID)
self.assertEqual(stats_1.amphora_id, self.AMP_ID)
# deltas will be incremented
self.assertEqual(stats_1.bytes_in, bytes_in1 + bytes_in2)
self.assertEqual(stats_1.bytes_out, bytes_out1 + bytes_out2)
self.assertEqual(stats_1.total_connections,
total_conns1 + total_conns2)
self.assertEqual(stats_1.request_errors,
request_errors1 + request_errors2)
# test incrementing an incompatible object
self.assertRaises(TypeError, stats_1.__iadd__, "boom")
def test_TLSContainer_serialization(self):
tls_container = data_models.TLSContainer(
id=self.TLS_CONTAINER_ID,
primary_cn='fake_cn',
certificate=b'certificate_buffer1',
private_key=b'private_key1',
passphrase=b'passphrase1',
intermediates=[
b'intermediate_buffer1',
b'intermediate_buffer2',
]
)
tls_container_dict = tls_container.to_dict(recurse=True)
json_buffer = json.dumps(tls_container_dict)
json_doc = json.loads(json_buffer)
self.assertEqual(tls_container_dict, json_doc)
|
ba1a45bc98aada86f54df0e664acf7ae0b05e614
|
226727e281e6ce17450fac3ea78d1a3c4a3999fc
|
/tests/test_math/test_linalg.py
|
7c8aa137d037945796fb4702c23c7eed165adb50
|
[
"MIT"
] |
permissive
|
GazzolaLab/PyElastica
|
20df23e97560d05ef50e60f2aeefb420968fb01d
|
49017d456aa10032e0ba1af23d5afd92cecedfa5
|
refs/heads/master
| 2023-08-31T14:28:48.056038
| 2023-08-18T16:54:51
| 2023-08-18T16:54:51
| 254,172,891
| 159
| 94
|
MIT
| 2023-09-09T04:11:01
| 2020-04-08T18:47:47
|
Python
|
UTF-8
|
Python
| false
| false
| 6,516
|
py
|
test_linalg.py
|
#!/usr/bin/env python3
__doc__ = (
""" Test scripts for linear algebra helpers in Elastica Numba implementation"""
)
# System imports
import numpy as np
import pytest
from numpy.testing import assert_allclose
from elastica._linalg import (
_batch_matvec,
_batch_matmul,
_batch_cross,
_batch_vec_oneD_vec_cross,
_batch_dot,
_batch_norm,
_batch_product_i_k_to_ik,
_batch_product_i_ik_to_k,
_batch_product_k_ik_to_ik,
_batch_vector_sum,
_batch_matrix_transpose,
)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_matvec(blocksize):
input_matrix_collection = np.random.randn(3, 3, blocksize)
input_vector_collection = np.random.randn(3, blocksize)
test_vector_collection = _batch_matvec(
input_matrix_collection, input_vector_collection
)
correct_vector_collection = [
np.dot(input_matrix_collection[..., i], input_vector_collection[..., i])
for i in range(blocksize)
]
correct_vector_collection = np.array(correct_vector_collection).T
assert_allclose(test_vector_collection, correct_vector_collection)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_matmul(blocksize):
input_first_matrix_collection = np.random.randn(3, 3, blocksize)
input_second_matrix_collection = np.random.randn(3, 3, blocksize)
test_matrix_collection = _batch_matmul(
input_first_matrix_collection, input_second_matrix_collection
)
correct_matrix_collection = np.empty((3, 3, blocksize))
for i in range(blocksize):
correct_matrix_collection[..., i] = np.dot(
input_first_matrix_collection[..., i],
input_second_matrix_collection[..., i],
)
assert_allclose(test_matrix_collection, correct_matrix_collection)
# TODO : Generalize to two dimensions
@pytest.mark.parametrize("dim", [3])
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_cross(dim, blocksize):
input_first_vector_collection = np.random.randn(dim, blocksize)
input_second_vector_collection = np.random.randn(dim, blocksize)
test_vector_collection = _batch_cross(
input_first_vector_collection, input_second_vector_collection
)
correct_vector_collection = np.cross(
input_first_vector_collection, input_second_vector_collection, axisa=0, axisb=0
).T
assert_allclose(test_vector_collection, correct_vector_collection)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_vec_oneD_vec_cross(blocksize):
input_first_vector_collection = np.random.randn(3, blocksize)
input_second_vector = np.random.randn(3)
test_vector_collection = _batch_vec_oneD_vec_cross(
input_first_vector_collection, input_second_vector
)
correct_vector_collection = np.cross(
input_first_vector_collection, input_second_vector, axisa=0, axisb=0
).T
assert_allclose(test_vector_collection, correct_vector_collection)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_dot(blocksize):
input_first_vector_collection = np.random.randn(3, blocksize)
input_second_vector_collection = np.random.randn(3, blocksize)
test_vector_collection = _batch_dot(
input_first_vector_collection, input_second_vector_collection
)
correct_vector_collection = np.einsum(
"ij,ij->j", input_first_vector_collection, input_second_vector_collection
)
assert_allclose(test_vector_collection, correct_vector_collection)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_norm(blocksize):
input_first_vector_collection = np.random.randn(3, blocksize)
test_vector_collection = _batch_norm(input_first_vector_collection)
correct_vector_collection = np.sqrt(
np.einsum(
"ij,ij->j", input_first_vector_collection, input_first_vector_collection
)
)
assert_allclose(test_vector_collection, correct_vector_collection)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_product_i_k_to_ik(blocksize):
input_first_vector_collection = np.random.randn(3)
input_second_vector_collection = np.random.randn(blocksize)
test_vector_collection = _batch_product_i_k_to_ik(
input_first_vector_collection, input_second_vector_collection
)
correct_vector_collection = np.einsum(
"i,j->ij", input_first_vector_collection, input_second_vector_collection
)
assert_allclose(test_vector_collection, correct_vector_collection)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_product_i_ik_to_k(blocksize):
input_first_vector_collection = np.random.randn(3)
input_second_vector_collection = np.random.randn(3, blocksize)
test_vector_collection = _batch_product_i_ik_to_k(
input_first_vector_collection, input_second_vector_collection
)
correct_vector_collection = np.einsum(
"i,ij->j", input_first_vector_collection, input_second_vector_collection
)
assert_allclose(test_vector_collection, correct_vector_collection)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_product_k_ik_to_ik(blocksize):
input_first_vector_collection = np.random.randn(blocksize)
input_second_vector_collection = np.random.randn(3, blocksize)
test_vector_collection = _batch_product_k_ik_to_ik(
input_first_vector_collection, input_second_vector_collection
)
correct_vector_collection = np.einsum(
"j,ij->ij", input_first_vector_collection, input_second_vector_collection
)
assert_allclose(test_vector_collection, correct_vector_collection)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_vector_sum(blocksize):
input_first_vector_collection = np.random.randn(3, blocksize)
input_second_vector_collection = np.random.randn(3, blocksize)
test_vector_collection = _batch_vector_sum(
input_first_vector_collection, input_second_vector_collection
)
correct_vector_collection = (
input_first_vector_collection + input_second_vector_collection
)
assert_allclose(test_vector_collection, correct_vector_collection)
@pytest.mark.parametrize("blocksize", [8, 32])
def test_batch_matrix_transpose(blocksize):
input_matrix_collection = np.random.randn(3, 3, blocksize)
test_matrix_collection = _batch_matrix_transpose(input_matrix_collection)
correct_matrix_collection = np.einsum("ijk->jik", input_matrix_collection)
assert_allclose(test_matrix_collection, correct_matrix_collection)
|
afc48ea76235f6f3c96968c74289422a4b222309
|
a0aa383f62648abac2bac7076cff66406351938b
|
/docs/src/guid2.py
|
1d2811cf5032b1bcb1741ec6d2682023ff8aee84
|
[
"MIT"
] |
permissive
|
dmontagu/fastapi-utils
|
0ecb1be3778733ad92d2bf4dea8dc28f6144fbcd
|
3ef27a6f67ac10fae6a8b4816549c0c44567a451
|
refs/heads/master
| 2023-07-20T00:55:42.737982
| 2023-03-19T18:34:49
| 2023-03-19T18:34:49
| 234,978,122
| 1,548
| 153
|
MIT
| 2023-09-11T14:52:24
| 2020-01-19T22:34:25
|
Python
|
UTF-8
|
Python
| false
| false
| 212
|
py
|
guid2.py
|
import sqlalchemy as sa
from fastapi_utils.guid_type import setup_guids_postgresql
database_uri = "postgresql://user:password@db:5432/app"
engine = sa.create_engine(database_uri)
setup_guids_postgresql(engine)
|
56ebdc08e33e3c9fa11f27d67e5b64920a720b2a
|
9468849850c7c2b2040835eb9496bfb716a98c21
|
/cea/resources/radiation/main.py
|
ef318171c66fdd3dc970536be5bf6bba9b78fcd8
|
[
"MIT"
] |
permissive
|
architecture-building-systems/CityEnergyAnalyst
|
e6532c0c794538dbb665366ccf6d783e0d9d1345
|
b84bcefdfdfc2bc0e009b5284b74391a957995ac
|
refs/heads/master
| 2023-08-30T19:57:47.445797
| 2023-08-25T13:30:28
| 2023-08-25T13:30:28
| 49,491,341
| 166
| 60
|
MIT
| 2023-09-11T11:10:00
| 2016-01-12T10:02:17
|
Python
|
UTF-8
|
Python
| false
| false
| 8,915
|
py
|
main.py
|
"""
Radiation engine and geometry handler for CEA
"""
import os
import shutil
import time
from itertools import repeat
import pandas as pd
import geopandas as gpd
from osgeo import gdal
import cea.config
import cea.inputlocator
from cea.datamanagement.databases_verification import verify_input_geometry_zone, verify_input_geometry_surroundings
from cea.resources.radiation import daysim, geometry_generator
from cea.resources.radiation.daysim import GridSize
from cea.resources.radiation.radiance import CEADaySim
from cea.utilities import epwreader
from cea.utilities.parallel import vectorize
__author__ = "Paul Neitzel, Kian Wee Chen"
__copyright__ = "Copyright 2016, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Paul Neitzel", "Kian Wee Chen", "Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
def read_surface_properties(locator) -> pd.DataFrame:
"""
This function returns a dataframe with the emissivity values of walls, roof, and windows
of every building in the scene
:param cea.inputlocator.InputLocator locator: CEA InputLocator
:returns pd.DataFrame: Dataframe with the emissivity values
"""
# local variables
architectural_properties = gpd.GeoDataFrame.from_file(locator.get_building_architecture())
surface_database_windows = pd.read_excel(locator.get_database_envelope_systems(), "WINDOW").set_index("code")
surface_database_roof = pd.read_excel(locator.get_database_envelope_systems(), "ROOF").set_index("code")
surface_database_walls = pd.read_excel(locator.get_database_envelope_systems(), "WALL").set_index("code")
def match_code(property_code_column: str, code_value_df: pd.DataFrame) -> pd.DataFrame:
"""
Matches envelope code in building properties with code in the database and retrieves its values
"""
df = pd.merge(architectural_properties[[property_code_column]], code_value_df,
left_on=property_code_column, right_on="code", how="left")
return df
# query data
building_names = architectural_properties['Name']
df1 = match_code('type_win', surface_database_windows[['G_win']])
df2 = match_code('type_roof', surface_database_roof[['r_roof']])
df3 = match_code('type_wall', surface_database_walls[['r_wall']])
surface_properties = pd.concat([building_names, df1, df2, df3], axis=1)
return surface_properties.set_index('Name').round(decimals=2)
def run_daysim_simulation(cea_daysim: CEADaySim, zone_building_names, locator, settings, geometry_pickle_dir, num_processes):
weather_path = locator.get_weather_file()
# check inconsistencies and replace by max value of weather file
weatherfile = epwreader.epw_reader(weather_path)
max_global = weatherfile['glohorrad_Whm2'].max()
list_of_building_names = [building_name for building_name in settings.buildings
if building_name in zone_building_names]
# get chunks of buildings to iterate
chunks = [list_of_building_names[i:i + settings.n_buildings_in_chunk] for i in
range(0, len(list_of_building_names),
settings.n_buildings_in_chunk)]
write_sensor_data = settings.write_sensor_data
radiance_parameters = {"rad_ab": settings.rad_ab, "rad_ad": settings.rad_ad, "rad_as": settings.rad_as,
"rad_ar": settings.rad_ar, "rad_aa": settings.rad_aa,
"rad_lr": settings.rad_lr, "rad_st": settings.rad_st, "rad_sj": settings.rad_sj,
"rad_lw": settings.rad_lw, "rad_dj": settings.rad_dj,
"rad_ds": settings.rad_ds, "rad_dr": settings.rad_dr, "rad_dp": settings.rad_dp}
grid_size = GridSize(walls=settings.walls_grid, roof=settings.roof_grid)
num_chunks = len(chunks)
if num_chunks == 1:
daysim.isolation_daysim(
0, cea_daysim, chunks[0], locator, radiance_parameters, write_sensor_data, grid_size,
max_global, weatherfile, geometry_pickle_dir)
else:
vectorize(daysim.isolation_daysim, num_processes)(
range(0, num_chunks),
repeat(cea_daysim, num_chunks),
chunks,
repeat(locator, num_chunks),
repeat(radiance_parameters, num_chunks),
repeat(write_sensor_data, num_chunks),
repeat(grid_size, num_chunks),
repeat(max_global, num_chunks),
repeat(weatherfile, num_chunks),
repeat(geometry_pickle_dir, num_chunks)
)
def main(config):
"""
This function makes the calculation of solar insolation in X sensor points for every building in the zone
of interest. The number of sensor points depends on the size of the grid selected in the config file and
are generated automatically.
:param config: Configuration object with the settings (genera and radiation)
:type config: cea.config.Configuration
:return:
"""
# reference case need to be provided here
locator = cea.inputlocator.InputLocator(scenario=config.scenario)
# the selected buildings are the ones for which the individual radiation script is run for
# this is only activated when in default.config, run_all_buildings is set as 'False'
daysim_bin_path, daysim_lib_path = daysim.check_daysim_bin_directory(config.radiation.daysim_bin_directory,
config.radiation.use_latest_daysim_binaries)
print(f'Using Daysim binaries from path: {daysim_bin_path}')
print(f'Using Daysim data from path: {daysim_lib_path}')
print("verifying geometry files")
zone_path = locator.get_zone_geometry()
surroundings_path = locator.get_surroundings_geometry()
print(f"zone: {zone_path}")
print(f"surroundings: {surroundings_path}")
zone_df = gpd.GeoDataFrame.from_file(zone_path)
surroundings_df = gpd.GeoDataFrame.from_file(surroundings_path)
verify_input_geometry_zone(zone_df)
verify_input_geometry_surroundings(surroundings_df)
# import material properties of buildings
print("Getting geometry materials")
building_surface_properties = read_surface_properties(locator)
building_surface_properties.to_csv(locator.get_radiation_materials())
geometry_staging_location = os.path.join(locator.get_solar_radiation_folder(), "radiance_geometry_pickle")
print("Creating 3D geometry and surfaces")
print(f"Saving geometry pickle files in: {geometry_staging_location}")
# create geometrical faces of terrain and buildings
terrain_raster = gdal.Open(locator.get_terrain())
architecture_wwr_df = gpd.GeoDataFrame.from_file(locator.get_building_architecture()).set_index('Name')
geometry_terrain, zone_building_names, surroundings_building_names = geometry_generator.geometry_main(config,
zone_df,
surroundings_df,
terrain_raster,
architecture_wwr_df,
geometry_staging_location)
daysim_staging_location = os.path.join(locator.get_temporary_folder(), 'cea_radiation')
cea_daysim = CEADaySim(daysim_staging_location, daysim_bin_path, daysim_lib_path)
# create radiance input files
print("Creating radiance material file")
cea_daysim.create_radiance_material(building_surface_properties)
print("Creating radiance geometry file")
cea_daysim.create_radiance_geometry(geometry_terrain, building_surface_properties, zone_building_names,
surroundings_building_names, geometry_staging_location)
print("Converting files for DAYSIM")
weather_file = locator.get_weather_file()
print('Transforming weather files to daysim format')
cea_daysim.execute_epw2wea(weather_file)
print('Transforming radiance files to daysim format')
cea_daysim.execute_radfiles2daysim()
time1 = time.time()
run_daysim_simulation(cea_daysim, zone_building_names, locator, config.radiation, geometry_staging_location,
num_processes=config.get_number_of_processes())
# Remove staging location after everything is successful
shutil.rmtree(daysim_staging_location)
print("Daysim simulation finished in %.2f mins" % ((time.time() - time1) / 60.0))
if __name__ == '__main__':
main(cea.config.Configuration())
|
1e79b57e6660d83b5fbcb8cc92dece627fec9482
|
2a382e3e17fce63433baa0ea46e945d5306f29dc
|
/droidconfig.py
|
8ac5c6c471c06ebc4ab89676368386a8e1f97cd0
|
[
"MIT"
] |
permissive
|
cryptax/droidlysis
|
8a8cdeb0b71d3f412a41c47f5b1a39f6a3228450
|
29ce79f776e5e943ef5a540b02884d54662a2e31
|
refs/heads/master
| 2023-08-09T00:09:52.539269
| 2023-07-24T11:21:05
| 2023-07-24T11:21:05
| 189,957,586
| 189
| 34
|
MIT
| 2023-02-21T10:26:16
| 2019-06-03T07:38:33
|
Python
|
UTF-8
|
Python
| false
| false
| 5,829
|
py
|
droidconfig.py
|
import os
import errno
import configparser
import logging
import shutil
from platformdirs import *
logging.basicConfig(format='%(levelname)s:%(filename)s:%(message)s',
level=logging.INFO)
# ------------------------- Reading *.conf configuration files -----------
class generalconfig:
def __init__(self, filename='./conf/general.conf', verbose=False):
if not os.path.exists(filename):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), filename)
self.config = configparser.ConfigParser()
self.config.read(filename)
# get config
self.APKTOOL_JAR = os.path.expanduser(self.config['tools']['apktool'])
self.BAKSMALI_JAR = os.path.expanduser(self.config['tools']['baksmali'])
self.DEX2JAR_CMD = os.path.expanduser(self.config['tools']['dex2jar'])
self.PROCYON_JAR = os.path.expanduser(self.config['tools']['procyon'])
self.KEYTOOL = os.path.expanduser(self.config['tools']['keytool'])
self.SMALI_CONFIGFILE = os.path.join(os.path.dirname(filename),
self.config['general']['smali_config'])
self.WIDE_CONFIGFILE = os.path.join(os.path.dirname(filename),
self.config['general']['wide_config'])
self.ARM_CONFIGFILE = os.path.join(os.path.dirname(filename),
self.config['general']['arm_config'])
self.DISTRIB_KIT_CONFIGFILE = os.path.join(os.path.dirname(filename),
self.config['general']['kit_config'])
# duplicate kit configuration for edition
cache_dir = user_cache_dir('droidlysis', 'cryptax')
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.KIT_CONFIGFILE = os.path.join(cache_dir,
self.config['general']['kit_config'])
if not os.path.exists(self.KIT_CONFIGFILE):
logging.debug(f'Copying {self.DISTRIB_KIT_CONFIGFILE}'
f' to {self.KIT_CONFIGFILE}')
shutil.copyfile(self.DISTRIB_KIT_CONFIGFILE, self.KIT_CONFIGFILE)
self.SQLALCHEMY = f'sqlite:///{self.config["general"]["db_file"]}'
# check files are accessible
for f in [self.APKTOOL_JAR, self.BAKSMALI_JAR,
self.DEX2JAR_CMD, self.PROCYON_JAR,
self.SMALI_CONFIGFILE, self.WIDE_CONFIGFILE,
self.ARM_CONFIGFILE, self.KIT_CONFIGFILE]:
if not os.access(f, os.R_OK):
logging.warning(f'Cannot access {f} - check your configuration file {filename}')
if not os.access(self.KEYTOOL, os.X_OK):
logging.warning(f'Cannot access keytool at {self.KEYTOOL} - check your configuration file {filename}')
class droidconfig:
def __init__(self, filename, verbose=False):
assert filename is not None, "Filename is invalid"
assert os.access(filename, os.R_OK) is not False, "File {0} is not readable".format(filename)
self.filename = filename
self.configparser = configparser.RawConfigParser()
if verbose:
logging.getLogger().setLevel(logging.DEBUG)
logging.debug("Reading configuration file: '%s'" % (filename))
self.configparser.read(filename)
def get_sections(self):
return self.configparser.sections()
def get_pattern(self, section):
return self.configparser.get(section, 'pattern')
def get_description(self, section):
try:
return self.configparser.get(section, 'description')
except (configparser.NoSectionError, configparser.NoOptionError):
pass
return None
def is_pattern_present(self, pattern):
for section in self.get_sections():
section_patterns = self.get_pattern(section).split('|')
if pattern in section_patterns:
return True
for p in section_patterns:
if p in pattern:
# our pattern is more generic
return True
return False
def get_all_regexp(self):
# reads the config file and returns a list
# of all patterns for all sections
# the patterns are concatenated with a |
# throws NoSectionError, NoOptionError
allpatterns = ''
for section in self.configparser.sections():
if allpatterns == '':
allpatterns = self.configparser.get(section, 'pattern')
else:
allpatterns = self.configparser.get(section, 'pattern') + '|' + allpatterns
return bytes(allpatterns, 'utf-8')
def match_properties(self, match, properties):
"""
Call this when the recursive search has been done to analyze the results
and understand which properties have been spotted.
@param match: returned by droidutil.recursive_search. This is a dictionary
of matching lines ordered by matching keyword (pattern)
@param properties: dictionary of properties where the key is the property name
and the value will be False/True if set or not
throws NoSessionError, NoOptionError
"""
for section in self.configparser.sections():
pattern_list = self.configparser.get(section, 'pattern').split('|')
properties[section] = False
for pattern in pattern_list:
# beware when pattern has blah\$binz, the matching key is blah$binz
if match[pattern.replace('\\', '')]:
logging.debug("Setting properties[%s] = True (matches %s)" % (section, pattern))
properties[section] = True
break
|
ccf3b3021a6f8d2ec42afa663343b3e0e63d2269
|
753aafa747871f556600b28dbb867298132b1e6b
|
/supervisely/metric/common.py
|
1603e13518c697aadc3869960f1e43457ea9f053
|
[
"Apache-2.0"
] |
permissive
|
supervisely/supervisely
|
85dd63e5ccb590b2861271ef7bd5401aa2a99038
|
f0df756b8fb89364202fde54e6ef5fe89fca089d
|
refs/heads/master
| 2023-08-27T07:29:57.682377
| 2023-08-24T13:17:31
| 2023-08-24T13:17:31
| 140,302,908
| 447
| 91
|
Apache-2.0
| 2023-09-13T11:11:09
| 2018-07-09T15:09:32
|
Python
|
UTF-8
|
Python
| false
| false
| 2,526
|
py
|
common.py
|
# coding: utf-8
from supervisely.project.project import Project
from supervisely.annotation.tag_meta_collection import TagMetaCollection
from supervisely.annotation.tag_meta import TagValueType
from supervisely.sly_logger import logger
CLASSES_MAPPING = 'classes_mapping'
TAGS_MAPPING = 'tags_mapping'
CONFIDENCE_THRESHOLD = 'confidence_threshold'
TRUE_POSITIVE = 'true-positive'
TRUE_NEGATIVE = 'true-negative'
FALSE_POSITIVE = 'false-positive'
FALSE_NEGATIVE = 'false-negative'
ACCURACY = 'accuracy'
PRECISION = 'precision'
RECALL = 'recall'
F1_MEASURE = 'F1-measure'
TOTAL = 'total'
TOTAL_GROUND_TRUTH = 'total-ground-truth'
TOTAL_PREDICTIONS = 'total-predictions'
CONFUSION_MATRIX = 'confusion-matrix'
UNMATCHED_GT = 'unmatched-gt'
UNMATCHED_PREDICTIONS = 'unmatched-predictions'
def check_class_mapping(first_project: Project, second_project: Project, classes_mapping: dict) -> None:
for k, v in classes_mapping.items():
if first_project.meta.obj_classes.get(k) is None:
raise RuntimeError('Class {} does not exist in input project "{}".'.format(k, first_project.name))
if second_project.meta.obj_classes.get(v) is None:
raise RuntimeError('Class {} does not exist in input project "{}".'.format(v, second_project.name))
def check_tag_mapping(first_project: Project, second_project: Project, tags_mapping: dict) -> None:
for k, v in tags_mapping.items():
if not first_project.meta.tag_metas.has_key(k):
raise RuntimeError('Tag {} does not exist in input project "{}".'.format(k, first_project.name))
if not second_project.meta.tag_metas.has_key(v):
raise RuntimeError('Tag {} does not exist in input project "{}".'.format(v, second_project.name))
def render_labels_for_classes(labels, class_colors, canvas, missing_classes_color):
for label in labels:
color = class_colors.get(label.obj_class.name, missing_classes_color)
label.geometry.draw(canvas, color)
def render_labels_for_class_name(labels, class_name, canvas):
return render_labels_for_classes(labels, {class_name: True}, canvas, missing_classes_color=False)
def safe_ratio(num, denom):
return (num / denom) if denom != 0 else 0
def sum_counters(elementwise_counters, counter_names):
return {counter_name: sum(c.get(counter_name, 0) for c in elementwise_counters) for counter_name in counter_names}
def log_line(length=80, c=' '):
logger.info(c * length)
def log_head(string):
logger.info(string.center(80, '*'))
|
5745784fcbfe586c21ee705775f696910a34d1f9
|
ae6c2a6fa37613ac31b2bd3537b3276c9b333632
|
/server/migrations/0090_auto_20190305_1023.py
|
04ce5df390673878d2db492df300d18a198e13fd
|
[
"Apache-2.0"
] |
permissive
|
salopensource/sal
|
435a31904eb83048c02c9fbff02bbf832835d1b4
|
0895106c6729d5465da5e21a810e967a73ed6e24
|
refs/heads/main
| 2023-08-03T06:53:40.142752
| 2023-07-28T15:51:08
| 2023-07-28T15:51:08
| 35,883,375
| 227
| 94
|
Apache-2.0
| 2023-07-28T15:51:10
| 2015-05-19T13:21:57
|
Python
|
UTF-8
|
Python
| false
| false
| 563
|
py
|
0090_auto_20190305_1023.py
|
# Generated by Django 2.1.4 on 2019-03-05 15:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('server', '0089_remove_machine_report'),
]
operations = [
migrations.RemoveField(
model_name='machine',
name='last_puppet_run',
),
migrations.RemoveField(
model_name='machine',
name='puppet_errors',
),
migrations.RemoveField(
model_name='machine',
name='puppet_version',
),
]
|
6120d907ae31a31069aa0961b5959437c1127456
|
4a59e1f44693e424cae8bee19167ff401ac0eba7
|
/spark/train_model.py
|
d31c14c60967f05580f4f285852e6542384c9d11
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
yahoo/lopq
|
2bc10c9ff92771b42eac76fec02c5015ef41496f
|
0f17655b901e6dfabe5c2aa62b4c8e492f34b05a
|
refs/heads/master
| 2023-08-31T00:33:42.456245
| 2017-12-16T00:29:23
| 2017-12-16T00:29:23
| 48,455,253
| 595
| 142
|
Apache-2.0
| 2019-04-10T19:50:27
| 2015-12-22T21:40:24
|
Python
|
UTF-8
|
Python
| false
| false
| 15,428
|
py
|
train_model.py
|
# Copyright 2015, Yahoo Inc.
# Licensed under the terms of the Apache License, Version 2.0. See the LICENSE file associated with the project for terms.
from pyspark.context import SparkContext
import numpy as np
import cPickle as pkl
import base64
import os
import subprocess
import sys
from tempfile import NamedTemporaryFile
from operator import add
from pyspark.mllib.clustering import KMeans, KMeansModel
from lopq.model import LOPQModel, compute_rotations_from_accumulators
STEP_COARSE = 0
STEP_ROTATION = 1
STEP_SUBQUANT = 2
def default_data_loading(sc, data_path, sampling_ratio, seed):
"""
This function loads training data from a text file, sampling it by the provided
ratio and random seed, and interprets each line as a tab-separated (id, data) pair
where 'data' is assumed to be a base64-encoded pickled numpy array. The ids are discarded.
The data is returned as an RDD of numpy arrays.
"""
# Compute the number of cores in our cluster - used below to heuristically set the number of partitions
total_cores = int(sc._conf.get('spark.executor.instances')) * int(sc._conf.get('spark.executor.cores'))
# Load and sample down the dataset
d = sc.textFile(data_path, total_cores * 3).sample(False, sampling_ratio, seed)
# The data is (id, vector) tab-delimited pairs where each vector is
# a base64-encoded pickled numpy array
deserialize_vec = lambda s: pkl.loads(base64.decodestring(s.split('\t')[1]))
vecs = d.map(deserialize_vec)
return vecs
def load_data(sc, args, data_load_fn=default_data_loading):
"""
Load training data as an RDD.
"""
# Load data
vecs = data_load_fn(sc, args.data, args.sampling_ratio, args.seed)
# Split the vectors
split_vecs = vecs.map(lambda x: np.split(x, 2))
return split_vecs
def train_coarse(sc, split_vecs, V, seed=None):
"""
Perform KMeans on each split of the data with V clusters each.
"""
# Cluster first split
first = split_vecs.map(lambda x: x[0])
first.cache()
print 'Total training set size: %d' % first.count()
print 'Starting training coarse quantizer...'
C0 = KMeans.train(first, V, initializationMode='random', maxIterations=10, seed=seed)
print '... done training coarse quantizer.'
first.unpersist()
# Cluster second split
second = split_vecs.map(lambda x: x[1])
second.cache()
print 'Starting training coarse quantizer...'
C1 = KMeans.train(second, V, initializationMode='random', maxIterations=10, seed=seed)
print '... done training coarse quantizer.'
second.unpersist()
return np.vstack(C0.clusterCenters), np.vstack(C1.clusterCenters)
def train_rotations(sc, split_vecs, M, Cs):
"""
For compute rotations for each split of the data using given coarse quantizers.
"""
Rs = []
mus = []
counts = []
for split in xrange(2):
print 'Starting rotation fitting for split %d' % split
# Get the data for this split
data = split_vecs.map(lambda x: x[split])
# Get kmeans model
model = KMeansModel(Cs[split])
R, mu, count = compute_local_rotations(sc, data, model, M / 2)
Rs.append(R)
mus.append(mu)
counts.append(count)
return Rs, mus, counts
def accumulate_covariance_estimators(sc, data, model):
"""
Analogous function to function of the same name in lopq.model.
:param SparkContext sc:
a SparkContext
:param RDD data:
an RDD of numpy arrays
:param KMeansModel model:
a KMeansModel instance for which to fit local rotations
"""
def get_residual(x):
cluster = model.predict(x)
centroid = model.clusterCenters[cluster]
residual = x - centroid
return (cluster, residual)
def seq_op(acc, x):
acc += np.outer(x, x)
return acc
# Compute (assignment, residual) k/v pairs
residuals = data.map(get_residual)
residuals.cache()
# Collect counts and mean residuals
count = residuals.countByKey()
mu = residuals.reduceByKey(add).collectAsMap()
# Extract the dimension of the data
D = len(mu.values()[0])
# Collect accumulated outer products
A = residuals.aggregateByKey(np.zeros((D, D)), seq_op, add).collectAsMap()
residuals.unpersist()
return A, mu, count
def dict_to_ndarray(d, N):
"""
Helper for collating a dict with int keys into an ndarray. The value for a key
becomes the value at the corresponding index in the ndarray and indices missing
from the dict become zero ndarrays of the same dimension.
:param dict d:
a dict of (int, ndarray) or (int, number) key/values
:param int N:
the size of the first dimension of the new ndarray (the rest of the dimensions
are determined by the shape of elements in d)
"""
el = d.values()[0]
if type(el) == np.ndarray:
value_shape = el.shape
arr = np.zeros((N,) + value_shape)
else:
arr = np.zeros(N)
for i in d:
arr[i] = d[i]
return arr
def compute_local_rotations(sc, data, model, num_buckets):
"""
Analogous to the function of the same name in lopq.model.
:param SparkContext sc:
a SparkContext
:param RDD data:
an RDD of numpy arrays
:param KMeansModel model:
a KMeansModel instance for which to fit local rotations
:param int num_buckets:
the number of subvectors over which to balance residual variance
"""
# Get estimators
A, mu, count = accumulate_covariance_estimators(sc, data, model)
# Format as ndarrays
V = len(model.centers)
A = dict_to_ndarray(A, V)
mu = dict_to_ndarray(mu, V)
count = dict_to_ndarray(count, V)
# Compute params
R, mu = compute_rotations_from_accumulators(A, mu, count, num_buckets)
return R, mu, count
def train_subquantizers(sc, split_vecs, M, subquantizer_clusters, model, seed=None):
"""
Project each data point into it's local space and compute subquantizers by clustering
each fine split of the locally projected data.
"""
b = sc.broadcast(model)
def project_local(x):
x = np.concatenate(x)
coarse = b.value.predict_coarse(x)
return b.value.project(x, coarse)
projected = split_vecs.map(project_local)
# Split the vectors into the subvectors
split_vecs = projected.map(lambda x: np.split(x, M))
split_vecs.cache()
subquantizers = []
for split in xrange(M):
data = split_vecs.map(lambda x: x[split])
data.cache()
sub = KMeans.train(data, subquantizer_clusters, initializationMode='random', maxIterations=10, seed=seed)
data.unpersist()
subquantizers.append(np.vstack(sub.clusterCenters))
return (subquantizers[:len(subquantizers) / 2], subquantizers[len(subquantizers) / 2:])
def save_hdfs_pickle(m, pkl_path):
"""
Given a python object and a path on hdfs, save the object as a pickle file locally and copy the file
to the hdfs path.
"""
print 'Saving pickle to temp file...'
f = NamedTemporaryFile(delete=False)
pkl.dump(m, f, -1)
f.close()
print 'Copying pickle file to hdfs...'
copy_to_hdfs(f, pkl_path)
os.remove(f.name)
def save_hdfs_proto(m, proto_path):
"""
Given an LOPQModel object and a path on hdfs, save the model parameters as a protobuf file locally and
copy the file to the hdfs path.
"""
print 'Saving protobuf to temp file...'
f = NamedTemporaryFile(delete=False)
m.export_proto(f)
f.close()
print 'Copying proto file to hdfs...'
copy_to_hdfs(f, proto_path)
os.remove(f.name)
def copy_to_hdfs(f, hdfs_path):
subprocess.call(['hadoop', 'fs', '-copyFromLocal', f.name, hdfs_path])
def validate_arguments(args, model):
"""
Check provided command line arguments to ensure they are coherent. Provide feedback for potential errors.
"""
# Parse steps
args.steps = set(map(int, args.steps.split(',')))
# Check that the steps make sense
if STEP_ROTATION not in args.steps and len(args.steps) == 2:
print 'Training steps invalid'
sys.exit(1)
# Find parameters and warn of possibly unintentional discrepancies
if args.V is None:
if model is not None:
args.V = model.V
print 'Parameter V not specified: using V=%d from provided model.' % model.V
else:
print 'Parameter V not specified and no existing model provided. Exiting.'
sys.exit(1)
else:
if model is not None and model.V != args.V:
if STEP_COARSE in args.steps:
print 'Parameter V differs between command line argument and provided model: ' + \
'coarse quantizers will be trained with V=%d' % args.V
else:
print 'Parameter V differs between command line argument and provided model: ' + \
'coarse quantizers must be retrained or this discrepancy corrected. Exiting.'
sys.exit(1)
if STEP_ROTATION in args.steps or STEP_SUBQUANT in args.steps:
if args.M is None:
if model is not None:
args.M = model.M
print 'Parameter M not specified: using M=%d from provided model.' % model.M
else:
print 'Parameter M not specified and no existing model provided. Exiting.'
sys.exit(1)
else:
if model is not None and model.M != args.M:
if STEP_ROTATION in args.steps:
print 'Parameter M differs between command line argument and provided model: ' + \
'model will be trained with M=%d' % args.M
else:
print 'Parameter M differs between command line argument and provided model: ' + \
'rotations must be retrained or this discrepancy corrected. Exiting.'
sys.exit(1)
if STEP_ROTATION in args.steps:
if STEP_COARSE not in args.steps and (model is None or model.Cs is None):
print 'Cannot train rotations without coarse quantizers. Either train coarse quantizers or provide an existing model. Exiting.'
sys.exit(1)
if STEP_SUBQUANT in args.steps:
if STEP_COARSE not in args.steps and (model is None or model.Cs is None):
print 'Cannot train subquantizers without coarse quantizers. Either train coarse quantizers or provide an existing model. Exiting.'
sys.exit(1)
if STEP_ROTATION not in args.steps and (model is None or model.Rs is None or model.mus is None):
print 'Cannot train subquantizers without rotations. Either train rotations or provide an existing model. Exiting.'
sys.exit(1)
return args
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
# Data handling parameters
parser.add_argument('--data', dest='data', type=str, required=True, help='hdfs path to input data')
parser.add_argument('--data_udf', dest='data_udf', type=str, default=None, help='module name from which to load a data loading UDF')
parser.add_argument('--seed', dest='seed', type=int, default=None, help='optional random seed')
parser.add_argument('--sampling_ratio', dest='sampling_ratio', type=float, default=1.0, help='proportion of data to sample for training')
parser.add_argument('--subquantizer_sampling_ratio', dest='subquantizer_sampling_ratio', type=float, default=1.0,
help='proportion of data to subsample for subquantizer training')
# Model parameters
existing_model_group = parser.add_mutually_exclusive_group()
existing_model_group.add_argument('--existing_model_pkl', dest='existing_model_pkl', type=str, default=None,
help='a pickled LOPQModel from which to extract existing parameters')
existing_model_group.add_argument('--existing_model_proto', dest='existing_model_proto', type=str, default=None,
help='a protobuf of existing model parameters')
# Model hyperparameters
parser.add_argument('--V', dest='V', type=int, default=None, help='number of coarse clusters')
parser.add_argument('--M', dest='M', type=int, default=None, help='total number of subquantizers')
parser.add_argument('--subquantizer_clusters', dest='subquantizer_clusters', type=int, default=256, help='number of subquantizer clusters')
# Training and output directives
parser.add_argument('--steps', dest='steps', type=str, default='0,1,2',
help='comma-separated list of integers indicating which steps of training to perform')
parser.add_argument('--model_pkl', dest='model_pkl', type=str, default=None, help='hdfs path to save pickle file of resulting LOPQModel')
parser.add_argument('--model_proto', dest='model_proto', type=str, default=None, help='hdfs path to save protobuf file of resulting model parameters')
args = parser.parse_args()
# Check that some output format was provided
if args.model_pkl is None and args.model_proto is None:
parser.error('at least one of --model_pkl and --model_proto is required')
# Load existing model if provided
model = None
if args.existing_model_pkl:
model = pkl.load(open(args.existing_model_pkl))
elif args.existing_model_proto:
model = LOPQModel.load_proto(args.existing_model_proto)
args = validate_arguments(args, model)
# Build descriptive app name
get_step_name = lambda x: {STEP_COARSE: 'coarse', STEP_ROTATION: 'rotations', STEP_SUBQUANT: 'subquantizers'}.get(x, None)
steps_str = ', '.join(filter(lambda x: x is not None, map(get_step_name, sorted(args.steps))))
APP_NAME = 'LOPQ{V=%d,M=%d}; training %s' % (args.V, args.M, steps_str)
sc = SparkContext(appName=APP_NAME)
# Load UDF module if provided and load training data RDD
if args.data_udf:
udf_module = __import__(args.data_udf, fromlist=['udf'])
load_udf = udf_module.udf
data = load_data(sc, args, data_load_fn=load_udf)
else:
data = load_data(sc, args)
# Initialize parameters
Cs = Rs = mus = subs = None
# Get coarse quantizers
if STEP_COARSE in args.steps:
Cs = train_coarse(sc, data, args.V, seed=args.seed)
else:
Cs = model.Cs
# Get rotations
if STEP_ROTATION in args.steps:
Rs, mus, counts = train_rotations(sc, data, args.M, Cs)
else:
Rs = model.Rs
mus = model.mus
# Get subquantizers
if STEP_SUBQUANT in args.steps:
model = LOPQModel(V=args.V, M=args.M, subquantizer_clusters=args.subquantizer_clusters, parameters=(Cs, Rs, mus, None))
if args.subquantizer_sampling_ratio != 1.0:
data = data.sample(False, args.subquantizer_sampling_ratio, args.seed)
subs = train_subquantizers(sc, data, args.M, args.subquantizer_clusters, model, seed=args.seed)
# Final output model
model = LOPQModel(V=args.V, M=args.M, subquantizer_clusters=args.subquantizer_clusters, parameters=(Cs, Rs, mus, subs))
if args.model_pkl:
save_hdfs_pickle(model, args.model_pkl)
if args.model_proto:
save_hdfs_proto(model, args.model_proto)
sc.stop()
|
8a9a49ee2cd49dbfbc2954347656bd44d31331f0
|
3f0948e07aef06f734fa6db3945b192f71ab435f
|
/tests/test_atom.py
|
84052216b0d9ba3d7c7ca27e63b9b5f4187b0414
|
[
"BSD-3-Clause"
] |
permissive
|
nucleic/atom
|
c15c932c3a1386469685580867105df7be5c0ba3
|
761a52821d8c77b5718216256963682d11599c1e
|
refs/heads/main
| 2023-08-25T07:55:24.697711
| 2023-05-05T07:59:30
| 2023-05-05T07:59:30
| 8,594,952
| 251
| 49
|
NOASSERTION
| 2023-09-11T17:00:00
| 2013-03-06T03:20:53
|
Python
|
UTF-8
|
Python
| false
| false
| 6,792
|
py
|
test_atom.py
|
# --------------------------------------------------------------------------------------
# Copyright (c) 2013-2023, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# --------------------------------------------------------------------------------------
"""Test the working of the Atom class and metaclass
The handling of specially named method is not tested here as it is exercised
in the test dedicated to the associated behaviors.
The methods related to member observation are tested in test_observe.py
"""
import gc
import pickle
from textwrap import dedent
import pytest
from atom.api import (
Atom,
Int,
MissingMemberWarning,
Str,
Value,
add_member,
atomref,
clone_if_needed,
observe,
set_default,
)
def test_init():
"""Test init."""
class A(Atom):
val = Int()
a = A(val=2)
assert a.val == 2
with pytest.raises(TypeError):
A(None)
# Simply check it does not crash
a.__sizeof__()
def test_set_default():
"""Test changing the default value of a member."""
class Default1(Atom):
i = Int()
i2 = Int()
sd = set_default(1)
class Default2(Default1):
i = sd
i2 = sd
# By setting the same default twice we should get a clone
assert Default1.i is not Default2.i
assert Default1().i == 0
assert Default2().i == 1
with pytest.raises(TypeError):
class Check(Atom):
a = set_default(1)
def test_multi_inheritance():
"""Test that multiple inheritance does not break the memory layout."""
class Multi1(Atom):
i1 = Int()
i2 = Int()
class Multi2(Atom):
i3 = Int()
i4 = Int()
# This ensures the conflict will occur (dict lack of ordering can cause
# unexpected mismatch)
assert Multi1.i1.index == Multi2.i3.index or Multi1.i1.index == Multi2.i4.index
assert Multi1.i2.index == Multi2.i3.index or Multi1.i2.index == Multi2.i4.index
class Multi(Multi1, Multi2):
i4 = Int(12) # Test that conflicts do not mess up overridden members
assert Multi().i4 == 12
members = Multi().members()
for m in members.values():
for m2 in members.values():
if m is m2:
continue
assert m.index != m2.index
class Mixin(Atom):
i5 = Int()
i6 = Int()
i7 = Int()
class MultiNext(Multi, Mixin):
i1 = Int()
class MultiNext2(MultiNext):
i1 = Int()
assert sorted(m.index for m in MultiNext2.__atom_members__.values()) == list(
range(7)
)
def test_member_mro_in_multiple_inheritance():
"""Test that we respect the MRO for members."""
class A(Atom):
a = Str("a")
class B(Atom):
b = Str("b")
class AB(A, B):
pass
class A2(A):
a = Str("a2")
class C(AB, A2):
pass
# C mro AB -> A2 -> A -> B -> Atom
# a is not defined or altered on AB so we expect to get A2 behavior
assert C().a == "a2"
# When AB alters a we expect to see it
class BB(A, B):
a = set_default("b")
class D(BB, A2):
pass
assert D().a == "b"
def test_add_member():
class A(Atom):
pass
add_member(A, "a", Int())
class B(A):
pass
assert "a" in B().members()
def test_cloning_members():
"""Test cloning a member when appropriate.
Here we test assigning the same member to two names. Other cases in which
cloning is required such as modifying a mode are tested in the tests
"""
class CloneTest(Atom):
a = b = Int()
assert CloneTest.a is not CloneTest.b
def test_listing_members():
"""Test listing the members from an Atom instance."""
class MembersTest(Atom):
a = b = c = d = e = Int()
assert sorted(MembersTest().members().keys()) == ["a", "b", "c", "d", "e"]
def test_getting_members():
"""Test accessing members directly."""
class A(Atom):
val = Int()
assert A().get_member("val") is A.val
assert A().get_member("") is None
with pytest.raises(TypeError):
A().get_member(1)
class PicklingTest(Atom):
__slots__ = ("d",)
a = b = c = Int()
# See also test_get_set_state
def test_pickling():
"""Test pickling an Atom instance."""
pt = PicklingTest()
pt.a = 2
pt.b = 3
pt.c = 4
pt.d = 5
pick = pickle.dumps(pt)
loaded = pickle.loads(pick)
assert isinstance(loaded, PicklingTest)
assert loaded.a == 2
assert loaded.b == 3
assert loaded.c == 4
assert loaded.d == 5
def test_freezing():
"""Test freezing an Atom instance."""
class FreezingTest(Atom):
a = Int()
ft = FreezingTest()
ft.a = 25
ft.freeze()
assert ft.a == 25
with pytest.raises(AttributeError):
ft.a = 1
with pytest.raises(AttributeError):
del ft.a
def test_traverse_atom():
"""Test that we can break reference cycles involving Atom object."""
class MyAtom(Atom):
val = Value()
a = MyAtom()
l1 = list()
a.val = l1
a.val.append(a)
ref = atomref(a)
del a, l1
gc.collect()
assert not ref()
@pytest.mark.parametrize(
"method_name",
[
"_default_x",
"_validate_x",
"_post_validate_x",
"_post_getattr_",
"_post_setattr_",
"_getstate_",
"_observe_",
],
)
def test_warn_on_missing(method_name):
src = dedent(
f"""
from atom.api import Atom
class A(Atom):
def {method_name}(self):
pass
"""
)
with pytest.warns(MissingMemberWarning):
exec(src)
def test_warn_on_missing_observe():
src = dedent(
"""
class A(Atom):
@observe("x")
def f(self, change):
pass
"""
)
with pytest.warns(MissingMemberWarning):
exec(src, globals(), {"Atom": Atom, "observe": observe})
def test_enable_weakref():
class WA(Atom, enable_weakrefs=True):
pass
assert "__weakref__" in WA.__slots__
def test_init_subclass():
members = {}
class A(Atom):
def __init_subclass__(cls) -> None:
super().__init_subclass__()
nonlocal members
members = cls.members()
class B(A):
a = Int()
assert members == {"a": B.a}
def test_clone_if_needed():
class A(Atom):
def __init_subclass__(cls) -> None:
for m in cls.members().values():
clone_if_needed(cls, m)
class B(A):
a = Int()
i = Int()
class C(B):
b = i
assert C.a is not B.a
assert C.b is i
|
f45d4a82c7d4ed084a67373a0fd4627593efce65
|
4daab5ba90185bae65169ebb8183c635385ab3f8
|
/examples/tutorials/s_logging.py
|
b85354f3eafef282b3c81820c29389ef03923d72
|
[
"MIT"
] |
permissive
|
duartegroup/autodE
|
bcf69440bd04411f97d39df0df0ae1f2bf6feb8c
|
4d6667592f083dfcf38de6b75c4222c0a0e7b60b
|
refs/heads/master
| 2023-09-01T15:08:16.028378
| 2023-07-25T08:09:05
| 2023-07-25T08:09:05
| 196,085,570
| 132
| 42
|
MIT
| 2023-09-12T15:20:54
| 2019-07-09T21:20:27
|
Python
|
UTF-8
|
Python
| false
| false
| 543
|
py
|
s_logging.py
|
import autode as ade
# For more informative logs installing coloredlogs is recommended:
# conda install coloredlogs
# autodE writes logging information at the 'ERROR' level by default. To
# turn on logging export the AUTODE_LOG_LEVEL environment variable to
# one of: INFO, WARNING, ERROR
# Will not print any log
_ = ade.Molecule(smiles="N")
# To set the level to info in bash:
# export AUTODE_LOG_LEVEL=INFO
# then run this script again.
# To write the log to a file set pipe the output to a file e.g.
# python s_logging.py 2> ade.log
|
a2f4172967019ee01ddf73033655127c63e5047e
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli/azure/cli/command_modules/dla/vendored_sdks/azure_mgmt_datalake_analytics/job/models/job_relationship_properties.py
|
df704d923be28363f5df5021ac4b88060e01acff
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463
| 2023-08-17T06:00:10
| 2023-08-17T06:00:10
| 51,040,886
| 4,018
| 3,310
|
MIT
| 2023-09-14T11:11:05
| 2016-02-04T00:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 2,500
|
py
|
job_relationship_properties.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobRelationshipProperties(Model):
"""Job relationship information properties including pipeline information,
correlation information, etc.
:param pipeline_id: the job relationship pipeline identifier (a GUID).
:type pipeline_id: str
:param pipeline_name: the friendly name of the job relationship pipeline,
which does not need to be unique.
:type pipeline_name: str
:param pipeline_uri: the pipeline uri, unique, links to the originating
service for this pipeline.
:type pipeline_uri: str
:param run_id: the run identifier (a GUID), unique identifier of the
iteration of this pipeline.
:type run_id: str
:param recurrence_id: the recurrence identifier (a GUID), unique per
activity/script, regardless of iterations. This is something to link
different occurrences of the same job together.
:type recurrence_id: str
:param recurrence_name: the recurrence name, user friendly name for the
correlation between jobs.
:type recurrence_name: str
"""
_validation = {
'pipeline_name': {'max_length': 260},
'recurrence_id': {'required': True},
'recurrence_name': {'max_length': 260},
}
_attribute_map = {
'pipeline_id': {'key': 'pipelineId', 'type': 'str'},
'pipeline_name': {'key': 'pipelineName', 'type': 'str'},
'pipeline_uri': {'key': 'pipelineUri', 'type': 'str'},
'run_id': {'key': 'runId', 'type': 'str'},
'recurrence_id': {'key': 'recurrenceId', 'type': 'str'},
'recurrence_name': {'key': 'recurrenceName', 'type': 'str'},
}
def __init__(self, recurrence_id, pipeline_id=None, pipeline_name=None, pipeline_uri=None, run_id=None, recurrence_name=None):
self.pipeline_id = pipeline_id
self.pipeline_name = pipeline_name
self.pipeline_uri = pipeline_uri
self.run_id = run_id
self.recurrence_id = recurrence_id
self.recurrence_name = recurrence_name
|
8687a11d24f55a7c5f009fca1a50b1ca22e5b937
|
e4f9f1e6e71a23e167047e5a900ec66a13e1578d
|
/tests/test_eval.py
|
7ac4cdf1f60e164de77a1f610893dcc67aea9a63
|
[
"MIT"
] |
permissive
|
onecommons/unfurl
|
546b9255e3551aa2f4badd8d58ea7958dd362881
|
25a3c81a64ee661813662615d6a0651ff3749219
|
refs/heads/main
| 2023-08-23T22:07:56.949658
| 2023-08-22T19:59:23
| 2023-08-22T19:59:23
| 132,939,353
| 133
| 11
|
MIT
| 2023-09-12T00:09:57
| 2018-05-10T18:27:46
|
Python
|
UTF-8
|
Python
| false
| false
| 30,856
|
py
|
test_eval.py
|
import logging
import unittest
import os
import json
import pickle
import io
from unfurl.result import ResultsList, ResultsMap, serialize_value, ChangeRecord, Result
from unfurl.eval import Ref, UnfurlEvalError, map_value, RefContext, set_eval_func, ExternalValue, SafeRefContext
from unfurl.support import apply_template, TopologyMap, _sandboxed_template
from unfurl.util import UnfurlError, sensitive_str, substitute_env, sensitive_list
from unfurl.runtime import NodeInstance
from ruamel.yaml.comments import CommentedMap
class EvalTest(unittest.TestCase):
longMessage = True
def test_CommentedMap(self):
cm = CommentedMap()
# check bug in ruamel.yaml is fixed: raises TypeError: source has undefined order
self.assertEqual(cm, cm.copy())
def test_CommentedMapEquality(self):
cm = CommentedMap((("b", 2),))
cm.insert(1, "a", 1, comment="a comment")
self.assertEqual(cm, {"a": 1, "b": 2})
def _getTestResource(self, more=None, parent=None):
resourceDef = {
"name": "test",
"a": {"ref": "name"},
"b": [1, 2, 3],
"d": {"a": "va", "b": "vb"},
"n": {"n": {"n": "n"}},
"s": {"ref": "."},
"x": [
{
"a": [
{"c": 1},
{"c": 2},
{"b": "exists"},
{"l": ["l1"]},
{"l": ["l2"]},
]
},
[{"c": 5}],
{"a": [{"c": 3}, {"c": 4}, {"l": ["l3"]}, {"l": ["l4"]}]},
[{"c": 6}],
],
"e": {"a1": {"b1": "v1"}, "a2": {"b2": "v2"}},
"f": {"a": 1, "b": {"ref": ".::f::a"}},
"empty_list": []
}
if more:
resourceDef.update(more)
resource = NodeInstance("test", resourceDef, parent)
assert resource.attributes["x"] == resourceDef["x"]
assert resource.attributes["a"] == "test"
assert resource.attributes["s"] is resource
return resource
def test_refs(self):
assert Ref.is_ref({"ref": "::name"})
assert not Ref.is_ref({"ref": "::name", "somethingUnexpected": 1})
assert Ref.is_ref({"ref": "::name", "vars": {"a": None}})
def test_refPaths(self):
resource = self._getTestResource()
for (exp, expected) in [
["x?::a[c=4]", [[{"c": 3}, {"c": 4}, {"l": ["l3"]}, {"l": ["l4"]}]]],
[
"x::a[c]?",
[[{"c": 1}, {"c": 2}, {"b": "exists"}, {"l": ["l1"]}, {"l": ["l2"]}]],
],
["x::a::[c]", [{"c": 1}, {"c": 2}, {"c": 3}, {"c": 4}]],
["x::a?::[c]", [{"c": 1}, {"c": 2}]],
["a", ["test"]],
["b", [[1, 2, 3]]],
["b::0", [1]],
["b?::2", [3]],
["[b::2]::b::2", [3]],
["b::1", [2]],
["s::b", [[1, 2, 3]]],
["s::b::1", [2]],
["s::s::b::1", [2]],
["n::n::n", ["n"]],
["d[a=va]", [{"a": "va", "b": "vb"}]],
["d[a=vb]", []],
["b[1=2]", [[1, 2, 3]]],
["b[1=1]", []],
["a[=test]", ["test"]],
["a[!=test]", []],
["a[key]", []],
["d[a=va][b=vb]", [{"a": "va", "b": "vb"}]],
["d[a=va][a=vb]", []],
["d[a=va][a!=vb]", [{"a": "va", "b": "vb"}]],
["d[a=va]::b", ["vb"]],
["x::a::c", [1, 2, 3, 4]],
["x::c", [5, 6]],
["x::[c]", [{"c": 5}, {"c": 6}]],
["x::[c=5]", [{"c": 5}]],
["x::a[b]::c", [1, 2]],
["x::a[!b]::c", [3, 4]],
["x::a::l", [["l1"], ["l2"], ["l3"], ["l4"]]],
[{"ref": "a[=$yes]", "vars": {"yes": "test"}}, ["test"]],
[{"ref": "a[=$no]", "vars": {"no": None}}, []],
["[a]", [resource]],
["[=blah]", []],
["[blah]", []],
["[!blah]", [resource]],
[".[!=blah]", [resource]],
["[!a]", []],
["::test", [resource]],
["d::*", set(["va", "vb"])],
["e::*::b2", ["v2"]],
["*", []],
["f", [{"a": 1, "b": 1}]],
["::*", [resource]],
["::*::.template::type", ["tosca.nodes.Root"]],
# [{"q": "{{ foo }}"}, ["{{ foo }}"]]
# XXX test nested ['.[k[d=3]=4]']
]:
ref = Ref(exp)
# print ('eval', ref.source, ref)
result = ref.resolve(RefContext(resource, trace=0))
assert all(not isinstance(i, Result) for i in result)
if isinstance(expected, set):
# for results where order isn't guaranteed in python2.7
self.assertEqual(
set(result),
expected,
"expr was: " + ref.source,
)
else:
self.assertEqual(
result,
expected,
"expr was: " + ref.source,
)
def test_last_resource(self):
parent = NodeInstance("parent")
self._getTestResource(parent=parent)
NodeInstance("another_child", parent=parent)
ref = Ref(".::.instances::x::c")
ctx = RefContext(parent, trace=0)
# index = ctx.referenced.start()
result = ref.resolve(ctx)
assert(ctx._lastResource.name) == "parent"
assert result == [5, 6]
def test_funcs(self):
resource = self._getTestResource()
test1 = {"ref": ".name", "vars": {"a": None}}
test2 = {"ref": "$b", "vars": {"b": 1}}
test3 = {
"ref": {
"if": {"not": "$a"},
"then": {"q": "expected"},
"else": {"q": "unexpected"},
},
"vars": {"a": None},
}
result1 = Ref(test1).resolve_one(RefContext(resource))
self.assertEqual("test", result1)
result2 = Ref(test2).resolve_one(RefContext(resource))
self.assertEqual(1, result2)
result3 = Ref(test3).resolve_one(RefContext(resource))
self.assertEqual("expected", result3)
result4 = Ref(test3).resolve(RefContext(resource))
self.assertEqual(["expected"], result4)
test5 = {"ref": {"or": ["$a", "b"]}, "vars": {"a": None}}
result5 = Ref(test5).resolve_one(RefContext(resource))
assert all(not isinstance(i, Result) for i in result5)
self.assertEqual(
resource.attributes["b"], result5
) # this doesn't seem obvious!
def test_circular_refs(self):
more = {}
more["circular_a"] = dict(eval=".::circular_b")
more["circular_b"] = dict(eval=".::circular_a")
more["circular_c"] = dict(eval={"or": [".::circular_d", 1]})
more["circular_d"] = dict(eval={"or": [".::circular_c", 1]})
resource = self._getTestResource(more)
assert resource.attributes["circular_a"] == None, resource.attributes["circular_a"]
assert resource.attributes["circular_b"] == None, resource.attributes["circular_b"]
assert resource.attributes["circular_c"] == 1, resource.attributes["circular_c"]
assert resource.attributes["circular_d"] == 1, resource.attributes["circular_d"]
def test_forEach(self):
resource = self._getTestResource()
test1 = {"ref": ".", "select": {"value": {"content": {"ref": "b"}}}}
expected0 = {"content": [1, 2, 3]}
result0 = Ref(test1).resolve_one(RefContext(resource, trace=0))
self.assertEqual(expected0, result0)
# resolve has same result as resolveOne
self.assertEqual([expected0], Ref(test1).resolve(RefContext(resource)))
# add 'key' to make result a dict
# test that template strings work
# XXX fragile: key is base64 of __str__ of NodeInstance
test1["select"]["key"] = "{{ item | ref | b64encode}}"
result1 = Ref(test1).resolve_one(RefContext(resource))
expected = {"Tm9kZUluc3RhbmNlKCd0ZXN0Jyk=": expected0}
self.assertEqual(expected, result1, result1)
result2 = Ref(test1).resolve(RefContext(resource))
self.assertEqual([expected], result2)
test2 = {
"eval": ".::b",
"foreach": "{{ item * 2 }}"
}
result3 = Ref(test2).resolve_one(RefContext(resource, trace=0))
assert result3 == [2, 4, 6]
test3 = {
"eval": "a",
"foreach": "$item"
}
result4 = Ref(test3).resolve_one(RefContext(resource, trace=0))
assert result4 == ["test"]
test4 = {
"eval": "empty_list",
"foreach": "$item"
}
result5 = Ref(test4).resolve_one(RefContext(resource, trace=0))
assert result5 == []
test5 = {
"eval": {"portspec": "80:81"},
"select": "source"
}
result6 = Ref(test5).resolve_one(RefContext(resource, trace=0))
assert result6 == 80
test6 = {
"eval": {"portspec": "80:81"},
"select": "target"
}
result7 = Ref(test6).resolve_one(RefContext(resource, trace=0))
assert result7 == 81
from toscaparser.elements.portspectype import PortSpec
test7 = {
"eval": "$p",
"foreach": {
"eval": {"portspec": {"eval": "$item"}}
}
}
result7 = Ref(test7).resolve_one(RefContext(resource, vars=dict(p="80:81"), trace=0))
assert result7 == [PortSpec.make("80:81")]
def test_serializeValues(self):
resource = self._getTestResource()
src = {"a": ["b", resource]}
serialized = serialize_value(src)
self.assertEqual(serialized, {"a": ["b", {"ref": "::test"}]})
self.assertEqual(src, map_value(serialized, resource))
serialized = serialize_value(dict(foo=sensitive_str("sensitive")), redact=True)
self.assertEqual(json.dumps(serialized), '{"foo": "<<REDACTED>>"}')
def test_map_value(self):
resource = self._getTestResource()
result = {'outputs': {'ec2_instance':
{'ami': 'ami-0077f1602df963b17',
'arn': 'arn:aws:ec2:eu-central-1',
'id': 'i-087439ef0d1105c1c' }
}
}
ctx = RefContext(resource)
resultTemplate = ResultsMap(dict(attributes=dict(
id = "{{ outputs.ec2_instance.id }}",
arn = "{{ outputs.ec2_instance.arn }}")), ctx)
# if don't get _attributes here this fails because it resolve's against ctx's vars:
results = map_value(resultTemplate._attributes, ctx.copy(vars=result))
assert results == {'attributes': {'id': 'i-087439ef0d1105c1c', 'arn': 'arn:aws:ec2:eu-central-1'}}
def test_jinjaTemplate(self):
resource = NodeInstance("test", attributes=dict(a1="hello"))
ctx = RefContext(resource, {"foo": "hello"})
self.assertEqual(apply_template(" {{ foo }} ", ctx), "hello")
# test jinja2 native types
self.assertEqual(apply_template(" {{[foo]}} ", ctx), ["hello"])
self.assertEqual(apply_template(' {{ "::test::a1" | ref }} ', ctx), u"hello")
self.assertEqual(
apply_template(' {{ lookup("unfurl", "::test::a1") }} ', ctx), u"hello"
)
# ansible query() always returns a list
self.assertEqual(
apply_template('{{ query("unfurl", "::test::a1") }}', ctx), [u"hello"]
)
os.environ[
"TEST_ENV"
] = "testEnv" # note: tox doesn't pass on environment variables so we need to set one now
self.assertEqual(
map_value("{{ lookup('env', 'TEST_ENV') }}", resource), "testEnv"
)
self.assertEqual(
map_value("{{ lookup('env', 'MISSING') }}", resource), ""
)
# test that ref vars as can be used as template string vars
exp = {"a": "{{ aVar }} world"}
vars = {"aVar": "hello"}
self.assertEqual(
map_value(exp, RefContext(resource, vars)), {"a": "hello world"}
)
vars = {"foo": {"bar": sensitive_str("sensitive")}}
val = apply_template("{{ foo.bar }}", RefContext(resource, vars, trace=0))
assert isinstance(val, sensitive_str), type(val)
val = map_value(
"{{ {'key' : zone['n']} }}", # n returns a ResultsMap
RefContext(
self._getTestResource(),
{"zone": {"n": ResultsMap({"a": "b"}, RefContext(resource))}},
trace=0,
),
)
assert val == {"key": {"a": "b"}}
# actually {'key': Results({'a': Result('b', None, ())})}
assert type(apply_template(" {{ {} }} ", RefContext(resource, vars))) == dict
assert apply_template('{{ {} }}{{"\n "}}', RefContext(resource, vars)) == '{}\n '
val = apply_template("{{ 'foo' | sensitive }}", RefContext(resource, trace=0))
assert isinstance(val, sensitive_str), type(val)
val = apply_template("{{ ['a', 'b' | sensitive] }}", RefContext(resource, trace=0))
assert isinstance(val[1], sensitive_str), type(val)
val = apply_template("{{ ['a', 'b'] | sensitive }}", RefContext(resource, trace=0))
assert isinstance(val, sensitive_list), type(val)
# test treating expression functions as RefContext methods
val = map_value(
"{{ __unfurl.to_label('a','b', sep='.') }}",
ctx
)
assert val == "a.b"
def test_templateFunc(self):
query = {
"eval": {"template": "{%if testVar %}{{success}}{%else%}failed{%endif%}"},
"vars": {
"testVar": True,
"success": dict(eval={"if": "$true", "then": ".name"}),
},
}
resource = self._getTestResource({"aTemplate": query})
self.assertEqual(map_value(query, resource), "test")
self.assertEqual(resource.attributes["aTemplate"], "test")
template = """\
#jinja2: variable_start_string: '<%', variable_end_string: '%>'
{% filter from_yaml %}
a_dict:
key1: "{{ extras.quoted }}" # shouldn't be evaluated
key2: "<% extras.quoted %>" # will be evaluated to "{{ quoted }} "
key3: "original"
<<: <% key4 | mapValue | to_json %> # will merge into a_dict
{% endfilter %}
"""
vars = {
"extras": dict(key3="hello", quoted={"q": "{{ quoted }} "}),
"key4": dict(extra1={"a": 1}, extra2=[1, 3], key3="overwritten"),
"key2": {"q": "{{ quoted2 }} "},
}
query2 = {"eval": {"template": template}}
expected = {
"a_dict": {
"key1": "{{ extras.quoted }}",
"key2": "{{ quoted }} ",
"key3": "original",
"extra1": {"a": 1},
"extra2": [1, 3],
}
}
ctx = RefContext(resource, vars)
result = apply_template(template, ctx)
self.assertEqual(result, expected)
result = map_value(query2, ctx)
self.assertEqual(result, expected)
def test_templateNodes(self):
resource = self._getTestResource()
NODES = TopologyMap(resource)
assert resource.attributes is NODES["test"]
ctx = RefContext(resource, dict(NODES=NODES))
self.assertEqual("va", apply_template("{{ NODES.test.d.a }}", ctx))
def test_sandbox(self):
resource = self._getTestResource()
ctx = SafeRefContext(resource, vars=dict(subdomain="foo.com"))
expr = "{{ {subdomain.split('.')[0] : 1} }}"
result = _sandboxed_template(expr, ctx, ctx.vars, None)
assert result == {'foo': 1}
assert type(result) == dict
with self.assertRaises(UnfurlEvalError) as err:
map_value(dict(eval={"get_env": "HOME"}), ctx)
assert 'function unsafe or missing in ' in str(err.exception)
assert not map_value(dict(eval={"is_function_defined": "get_env"}), ctx)
ctx2 = RefContext(resource, vars=dict(subdomain="foo.com"))
assert map_value(dict(eval={"is_function_defined": "get_env"}), ctx2)
ctx3 = SafeRefContext(resource, strict=False)
assert map_value(dict(eval="{{ foo | to_json }}"), ctx3) == "<<Error rendering template: No filter named 'to_json'.>>"
assert map_value(dict(eval="{{ foo | abspath }}"), ctx3) == "<<Error rendering template: No filter named 'abspath'.>>"
def test_innerReferences(self):
resourceDef = {
"a": dict(b={"ref": "a::c"}, c={"e": 1}, d=["2", {"ref": "a::d::0"}])
}
resource = NodeInstance("test", attributes=resourceDef)
assert not not resource.attributes
self.assertEqual(len(resource.attributes), 1)
expectedA = {"c": {"e": 1}, "b": {"e": 1}, "d": ["2", "2"]}
self.assertEqual(resource.attributes["a"]["b"], expectedA["b"])
self.assertEqual(resource.attributes["a"], expectedA)
self.assertEqual(Ref("a").resolve(RefContext(resource)), [expectedA])
self.assertEqual(Ref("a").resolve_one(RefContext(resource)), expectedA)
expected = ["2"]
self.assertEqual(Ref("a::d::0").resolve(RefContext(resource)), expected)
self.assertEqual(Ref("a::d::1").resolve(RefContext(resource)), expected)
# print('test_references', resource.attributes,
# 'AAAA', resource.attributes['a'],
# 'BBB', resource.attributes['a']['b'],
# )
self.assertEqual(resource.attributes["a"], expectedA)
self.assertEqual(resource.attributes["a"]["d"][0], "2")
self.assertEqual(resource.attributes["a"]["d"][1], "2")
self.assertEqual(resource.attributes["a"]["b"]["e"], 1)
self.assertEqual(Ref("a::b::e").resolve(RefContext(resource)), [1])
# test again to make sure it still resolves correctly
self.assertEqual(Ref("a::d::0").resolve(RefContext(resource)), expected)
self.assertEqual(Ref("a::d::1").resolve(RefContext(resource)), expected)
def test_vars(self):
# test dereferencing vars
resource = self._getTestResource()
query = {
"eval": "$aDict",
"vars": {"aDict": {"aRef": {"eval": "::test"}, "aTemplate": "{{ true }}"}},
}
result = Ref(query).resolve_one(RefContext(resource))
self.assertEqual(result, {"aRef": resource, "aTemplate": True})
query = {"eval": "$aRef", "vars": {"aRef": {"eval": "::test"}}}
assert Ref.is_ref(query["vars"]["aRef"])
result = Ref(query).resolve_one(RefContext(resource))
self.assertEqual(result, resource)
def test_nodeTraversal1(self):
root = NodeInstance(
"r2", {"a": [dict(ref="::r1::a"), dict(ref="b")], "b": "r2"} #'r1' #'r2'
)
child = NodeInstance("r1", {"a": dict(ref="b"), "b": "r1"}, root)
ctx = RefContext(root)
x = [{"a": [{"c": 1}, {"c": 2}]}]
r1 = ResultsList(x, ctx)
assert x == r1
self.assertEqual(Ref("b").resolve(RefContext(child)), ["r1"])
self.assertEqual(Ref("a").resolve(RefContext(child)), ["r1"])
self.assertEqual(Ref("a").resolve(RefContext(root)), [["r1", "r2"]])
assert not r1._haskey(1)
r1.append("added")
assert r1._haskey(1)
r1[0]["a"][1] = "not c"
assert r1[0]["a"][1] == "not c"
def test_nodeTraversal2(self):
root = NodeInstance("root", {"a": [{"ref": "::child"}, {"b": 2}]})
child = NodeInstance("child", {"b": 1}, root)
self.assertEqual(Ref(".ancestors").resolve(RefContext(child)), [[child, root]])
# self.assertEqual(Ref('a::b').resolve(RefContext(root)), [1])
self.assertEqual(Ref("a").resolve(RefContext(child)), [[child, {"b": 2}]])
# a resolves to [child, dict] so a::b resolves to [child[b], [b]2]
self.assertEqual(Ref("a::b").resolve(RefContext(child)), [1, 2])
def test_lookup(self):
resource = self._getTestResource()
os.environ[
"TEST_ENV"
] = "testEnv" # note: tox doesn't pass on environment variables so we need to set one now
query = {"eval": {"lookup": {"env": "TEST_ENV"}}}
self.assertEqual(map_value(query, resource), "testEnv")
def test_tempfile(self):
resource = self._getTestResource()
value = {"a": 1}
template = dict(
eval={"template": "{{ valuesfile }}"},
vars={"valuesfile": {"eval": {"tempfile": value}}},
)
result = map_value(template, resource)
# result is a path to the tempfile
with open(result) as tp:
self.assertEqual(tp.read(), json.dumps(value, indent=2))
def test_template_path(self):
resource = self._getTestResource()
template_contents = """\
{% if str(image) -%}
foo
{% endif %}
"""
# write template_contents to a temp file and have the template function read that file
template = dict(
eval={"template": dict(path={"eval": {"tempfile": template_contents}})},
vars={"image": "foo/bar"},
)
ctx = RefContext(resource, strict=False, trace=0)
class mock_task:
_errors = []
logger = logging.getLogger("test")
ctx.task = mock_task()
assert not ctx.strict
assert not ctx.task._errors
result = map_value(template, ctx)
# templating failed so it returned the original template
self.assertEqual(result, template_contents.strip(), len(result))
assert ctx.task._errors
def test_changerecord(self):
assert ChangeRecord.is_change_id("A01110000005")
assert not ChangeRecord.is_change_id("A0111000000"), "too short"
assert not ChangeRecord.is_change_id(None), "not a string"
assert not ChangeRecord.is_change_id(True), "not a string"
def test_binaryvault(self):
from unfurl.support import AttributeManager
from unfurl.yamlloader import make_yaml, sensitive_bytes, make_vault_lib
# load a binary file then write it out as a temporary vault file
fixture = os.path.join(
os.path.dirname(__file__), "fixtures/helmrepo/mysql-1.6.4.tgz"
)
src = (
"""
eval:
tempfile:
eval:
file: %s
select: contents
encoding: vault
"""
% fixture
)
vault = make_vault_lib("password")
yaml = make_yaml(vault)
expr = yaml.load(io.StringIO(src))
resource = self._getTestResource()
resource.attributeManager = AttributeManager(yaml)
resource._templar._loader.set_vault_secrets(vault.secrets)
pickled = pickle.dumps(resource, -1)
assert pickle.loads(pickled)
filePath = map_value(expr, resource)
with open(filePath, "rb") as vf:
vaultContents = vf.read()
assert vaultContents.startswith(b"$ANSIBLE_VAULT;")
# decrypt the vault file, make sure it's a sensitive_bytes string that matches the original contents
src = """
eval:
file:
eval: $tempfile
encoding: binary
select: contents
"""
expr = yaml.load(io.StringIO(src))
contents = map_value(expr, RefContext(resource, vars=dict(tempfile=filePath)))
assert isinstance(contents, sensitive_bytes), type(contents)
with open(fixture, "rb") as tp:
self.assertEqual(tp.read(), contents)
def test_external(self):
class ExternalTest(ExternalValue):
def __init__(self):
super(ExternalTest, self).__init__("ExternalTest", "test")
singleton = ExternalTest()
set_eval_func("externaltest", lambda arg, ctx: singleton)
ctx = RefContext(self._getTestResource())
expr = Ref({"eval": {"externaltest": None}})
result = expr.resolve(ctx)
self.assertEqual(result[0], "test")
result = expr.resolve_one(ctx)
self.assertEqual(result, "test")
result = expr.resolve(ctx, wantList="result")
self.assertIs(result.external, singleton)
asTemplate = '{{ {"externaltest": none } | eval }}'
result = map_value(asTemplate, ctx)
assert isinstance(result, str)
self.assertEqual(result, "test")
ctx2 = ctx.copy(wantList="result")
result = map_value(asTemplate, ctx2)
self.assertIs(result.external, singleton)
result = map_value("transformed " + asTemplate, ctx2)
self.assertEqual(result, "transformed test")
def test_to_env(self):
from unfurl.yamlloader import make_yaml
src = """
eval:
to_env:
FOO: 1 # get converted to string
BAR: false # get converted to empty string
BAZ: true # get converted to string
NUL: null # key gets excluded
QUU: # redacted when serialized into yaml
eval:
sensitive: "passw0rd"
SUB: "${FOO}"
"""
yaml = make_yaml()
expr = yaml.load(io.StringIO(src))
ctx = RefContext(self._getTestResource())
env = map_value(expr, ctx)
assert env == {'FOO': '1', 'BAR': '', 'BAZ': 'true', 'QUU': 'passw0rd', "SUB": "1"}
out=io.StringIO()
yaml.dump(serialize_value(env), out)
assert out.getvalue() == '''\
FOO: '1'
BAR: ''
BAZ: 'true'
QUU: <<REDACTED>>
SUB: '1'
'''
def test_to_env_set_environ(self):
from unfurl.yamlloader import make_yaml
src = """
eval:
to_env:
^PATH: /foo/bin
update_os_environ: true
"""
yaml = make_yaml()
expr = yaml.load(io.StringIO(src))
ctx = RefContext(self._getTestResource())
path = os.environ['PATH']
env = map_value(expr, ctx)
new_path = "/foo/bin:"+path
assert os.environ['PATH'] == new_path
assert env == {'PATH': new_path}
def test_labels(self):
import io
from unfurl.yamlloader import make_yaml
src = """
eval:
to_googlecloud_label:
Url: https://foo-bar.com
Missing: null
digest: d80912dc
"""
yaml = make_yaml()
expr = yaml.load(io.StringIO(src))
ctx = RefContext(self._getTestResource())
labels = map_value(expr, ctx)
assert labels == {'url': "https______foo-bar__comvit"}
src = """
eval:
to_label: "1 convert me"
replace: _
max: 10
case: upper
start_prepend: _
digestlen: 0 # disable digest on truncation
"""
yaml = make_yaml()
expr = yaml.load(io.StringIO(src))
ctx = SafeRefContext(self._getTestResource())
label = map_value(expr, ctx)
assert label == "_1CON_RTME"
src = """
eval:
to_label: "1 Convert Me"
replace: _
max: "{{ 5 + 5 }}"
case: lower
start_prepend: _
"""
yaml = make_yaml()
expr = yaml.load(io.StringIO(src))
ctx = SafeRefContext(self._getTestResource())
label = map_value(expr, ctx)
assert label == "_1converrd"
src = """
eval:
to_label:
- longprefix
- name
- suffix
sep: .
max: 10
"""
yaml = make_yaml()
expr = yaml.load(io.StringIO(src))
ctx = SafeRefContext(self._getTestResource())
label = map_value(expr, ctx)
assert label == "lo.na.s.RC"
src = """
eval:
to_label:
- longprefix
- name
- suffix
sep: .
replace: "-"
max: 20
"""
yaml = make_yaml()
expr = yaml.load(io.StringIO(src))
ctx = SafeRefContext(self._getTestResource())
label = map_value(expr, ctx)
assert label == "longpr.name.suffi.RC"
src = """
eval:
to_label:
- reallyreallylongprefix
- short
sep: .
replace: "-"
max: 20
"""
yaml = make_yaml()
expr = yaml.load(io.StringIO(src))
ctx = SafeRefContext(self._getTestResource())
label = map_value(expr, ctx)
assert len(label) == 20
assert label == "reall-refix.short.yi"
src = """
eval:
to_label:
- longprefix
- name
- suffix
sep: .
max: 1
"""
yaml = make_yaml()
expr = yaml.load(io.StringIO(src))
ctx = SafeRefContext(self._getTestResource())
label = map_value(expr, ctx)
assert label == "R"
def test_urljoin(self):
resource = self._getTestResource()
# default port is omitted
test1 = dict(eval={"urljoin": ["http", "localhost", 80]})
result1 = Ref(test1).resolve_one(RefContext(resource))
self.assertEqual("http://localhost", result1)
# no port
test2 = dict(eval={"urljoin": ["http", "localhost", "", "path"]})
result2 = Ref(test2).resolve_one(RefContext(resource))
self.assertEqual("http://localhost/path", result2)
# port (string ok)
test2 = dict(eval={"urljoin": ["http", "localhost", "8080", "path"]})
result2 = Ref(test2).resolve_one(RefContext(resource))
self.assertEqual("http://localhost:8080/path", result2)
# list of at least length 2 is required but eval to None if empty
test3 = dict(eval={"urljoin": [None, None]})
result3 = Ref(test3).resolve_one(RefContext(resource))
assert result3 is None
def pairs(iterable):
i = iter(iterable)
try:
while True:
yield next(i), next(i)
except StopIteration:
pass
def test_env_sub():
env = dict(baz="env", alt="env2", bad=None)
tests = [
"${baz} ${bar:default value}", "env default value",
"foo${baz|missing}${bar:default value}", "fooenvdefault value",
r"foo\${baz}${bar:default value}", "foo${baz}default value",
r"foo\\${baz}${bar:default value}", r"foo\${baz}default value",
"${missing|baz} ${missing|missing2:default value}", "env default value",
"${bad}", ""
]
for test, expected in pairs(tests):
assert expected == substitute_env(test, env), test
|
34a0167f628ab6c277291a6369488485a5915bd0
|
9ed3b16b3da72e4c47a04f2f2e3ef395e9fd9f20
|
/main/orc/template.py
|
070a7d2d0f36ca410197f507714f975a4957d902
|
[
"BSD-2-Clause"
] |
permissive
|
chimera-linux/cports
|
fdae59dc25856942be3041e10e3533dbf8f883c3
|
714680161cd719dd047452c95fbb9b447bc23a86
|
refs/heads/master
| 2023-09-03T19:30:40.720670
| 2023-09-03T15:07:40
| 2023-09-03T15:07:40
| 374,000,317
| 118
| 37
|
BSD-2-Clause
| 2023-09-14T20:31:08
| 2021-06-05T02:07:34
|
Python
|
UTF-8
|
Python
| false
| false
| 678
|
py
|
template.py
|
pkgname = "orc"
pkgver = "0.4.34"
pkgrel = 0
build_style = "meson"
configure_args = [
"-Dexamples=disabled",
]
hostmakedepends = [
"meson",
"pkgconf",
"gtk-doc-tools",
]
makedepends = ["linux-headers"]
pkgdesc = "Optimized Inner Loop Runtime Compiler"
maintainer = "q66 <q66@chimera-linux.org>"
license = "BSD-2-Clause"
url = "https://gstreamer.freedesktop.org"
source = f"{url}/src/{pkgname}/{pkgname}-{pkgver}.tar.xz"
sha256 = "8f47abb3f097171e44eb807adcdabd860fba2effd37d8d3c4fbd5f341cadd41f"
def post_install(self):
self.install_license("COPYING")
@subpackage("orc-devel")
def _devel(self):
return self.default_devel(extra=["usr/share/gtk-doc"])
|
6c74ddd11c97a328186f2a079ad6f2615193b8d9
|
13800b7827598e76428a335559b7bf11867ec2f0
|
/python/ccxt/pro/binanceusdm.py
|
86eb197ea0590bb0357476013283d42a9fce06b8
|
[
"MIT"
] |
permissive
|
ccxt/ccxt
|
b40a0466f5c430a3c0c6026552ae697aa80ba6c6
|
e4065f6a490e6fc4dd7a72b375428b2faa570668
|
refs/heads/master
| 2023-09-04T03:41:29.787733
| 2023-09-03T19:25:57
| 2023-09-03T19:25:57
| 91,253,698
| 30,798
| 8,190
|
MIT
| 2023-09-14T21:59:09
| 2017-05-14T15:41:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,357
|
py
|
binanceusdm.py
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.pro.binance import binance
from ccxt.base.errors import InvalidOrder
class binanceusdm(binance):
def describe(self):
return self.deep_extend(super(binanceusdm, self).describe(), {
'id': 'binanceusdm',
'name': 'Binance USDⓈ-M',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/117738721-668c8d80-b205-11eb-8c49-3fad84c4a07f.jpg',
},
'options': {
'fetchMarkets': ['linear'],
'defaultSubType': 'linear',
},
# https://binance-docs.github.io/apidocs/futures/en/#error-codes
'exceptions': {
'exact': {
'-5021': InvalidOrder, # {"code":-5021,"msg":"Due to the order could not be filled immediately, the FOK order has been rejected."}
'-5022': InvalidOrder, # {"code":-5022,"msg":"Due to the order could not be executed, the Post Only order will be rejected."}
'-5028': InvalidOrder, # {"code":-5028,"msg":"Timestamp for self request is outside of the ME recvWindow."}
},
},
})
|
99848224cfda0763ba16a7257cf9f269b5401ce4
|
acf7457d3a799cb9bff12686d2d616688bcd4b5b
|
/packages/python/plotly/plotly/graph_objs/layout/template/data/_contourcarpet.py
|
6240faf510053b8a9e83a7be012bd1afdabb3244
|
[
"MIT"
] |
permissive
|
plotly/plotly.py
|
f4f61639f08160f16195efc95b5901dc5a937346
|
975a704074f01c078e0fdfa32bdf17130bf89e69
|
refs/heads/master
| 2023-09-06T06:15:08.340035
| 2023-08-24T12:28:14
| 2023-08-24T12:28:14
| 14,579,099
| 14,751
| 2,989
|
MIT
| 2023-09-08T19:55:32
| 2013-11-21T05:53:08
|
Python
|
UTF-8
|
Python
| false
| false
| 44
|
py
|
_contourcarpet.py
|
from plotly.graph_objs import Contourcarpet
|
922405fbe1efdef625cf6569200a19c8c31d1436
|
26e3d85a3b61219e13f794289ff2b70baa248f14
|
/material/frontend/models.py
|
aad537502f44554155abb3ea9f3c1f6a35a30652
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
viewflow/django-material
|
ac8dd10daa8352440845c767b07cafc7f7d09216
|
31b1ce5f6fecc10ba4c9babe4219fb7be97dbf93
|
refs/heads/master
| 2023-08-15T23:32:58.330321
| 2023-04-12T06:12:07
| 2023-04-12T06:12:40
| 29,337,344
| 2,818
| 570
|
BSD-3-Clause
| 2023-03-04T02:28:50
| 2015-01-16T07:17:33
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,357
|
py
|
models.py
|
from django.db import models
from django.core.cache import cache
from django.dispatch import receiver
from django.db.models.signals import post_save
from ..compat import _
try:
from django.utils.encoding import python_2_unicode_compatible
except ImportError:
# django 3.0+
python_2_unicode_compatible = lambda cls: cls # NOQA
class ModuleManager(models.Manager):
"""Base module state manager."""
def installed(self, module):
"""By default, all modules considered installed."""
installed_cache = cache.get('material.modules.installed')
if not installed_cache:
installed_cache = [mod.label for mod in self.get_queryset().filter(installed=True)]
cache.set('material.modules.installed', installed_cache, 60 * 60 * 24)
return module in installed_cache
@python_2_unicode_compatible
class Module(models.Model):
"""Keep module installed state in the database."""
label = models.SlugField(_('label'))
installed = models.BooleanField(_('installed'), default=True)
objects = ModuleManager()
class Meta:
verbose_name = _('module')
verbose_name_plural = _('modules')
def __str__(self):
return self.label
@receiver(post_save, sender=Module)
def _clean_installed_cache(sender, **kwargs):
cache.delete('material.modules.installed')
|
8996e989ebd1a0a1d14ad780d2e5157b67d0243a
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/rllib/policy/sample_batch.py
|
69953f040d8cf60ab030dd8a79a9b0f09aa4f4ed
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 69,297
|
py
|
sample_batch.py
|
import collections
from functools import partial
import itertools
import sys
from numbers import Number
from typing import Dict, Iterator, Set, Union
from typing import List, Optional
import numpy as np
import tree # pip install dm_tree
from ray.rllib.utils.annotations import DeveloperAPI, ExperimentalAPI, PublicAPI
from ray.rllib.utils.compression import pack, unpack, is_compressed
from ray.rllib.utils.deprecation import Deprecated, deprecation_warning
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.torch_utils import convert_to_torch_tensor
from ray.rllib.utils.typing import (
PolicyID,
TensorType,
SampleBatchType,
ViewRequirementsDict,
)
from ray.util import log_once
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
# Default policy id for single agent environments
DEFAULT_POLICY_ID = "default_policy"
@DeveloperAPI
def attempt_count_timesteps(tensor_dict: dict):
"""Attempt to count timesteps based on dimensions of individual elements.
Returns the first successfully counted number of timesteps.
We do not attempt to count on INFOS or any state_in_* and state_out_* keys. The
number of timesteps we count in cases where we are unable to count is zero.
Args:
tensor_dict: A SampleBatch or another dict.
Returns:
count: The inferred number of timesteps >= 0.
"""
# Try to infer the "length" of the SampleBatch by finding the first
# value that is actually a ndarray/tensor.
# Skip manual counting routine if we can directly infer count from sequence lengths
if (
tensor_dict.get(SampleBatch.SEQ_LENS) is not None
and not (tf and tf.is_tensor(tensor_dict[SampleBatch.SEQ_LENS]))
and len(tensor_dict[SampleBatch.SEQ_LENS]) > 0
):
if torch and torch.is_tensor(tensor_dict[SampleBatch.SEQ_LENS]):
return tensor_dict[SampleBatch.SEQ_LENS].sum().item()
else:
return sum(tensor_dict[SampleBatch.SEQ_LENS])
for k, v in tensor_dict.items():
if k == SampleBatch.SEQ_LENS:
continue
assert isinstance(k, str), tensor_dict
if (
k == SampleBatch.INFOS
or k.startswith("state_in_")
or k.startswith("state_out_")
):
# Don't attempt to count on infos since we make no assumptions
# about its content
# Don't attempt to count on state since nesting can potentially mess
# things up
continue
# If this is a nested dict (for example a nested observation),
# try to flatten it, assert that all elements have the same length (batch
# dimension)
v_list = tree.flatten(v) if isinstance(v, (dict, tuple)) else [v]
# TODO: Drop support for lists and Numbers as values.
# If v_list contains lists or Numbers, convert them to arrays, too.
v_list = [
np.array(_v) if isinstance(_v, (Number, list)) else _v for _v in v_list
]
try:
# Add one of the elements' length, since they are all the same
_len = len(v_list[0])
if _len:
return _len
except Exception:
pass
# Return zero if we are unable to count
return 0
@PublicAPI
class SampleBatch(dict):
"""Wrapper around a dictionary with string keys and array-like values.
For example, {"obs": [1, 2, 3], "reward": [0, -1, 1]} is a batch of three
samples, each with an "obs" and "reward" attribute.
"""
# On rows in SampleBatch:
# Each comment signifies how values relate to each other within a given row.
# A row generally signifies one timestep. Most importantly, at t=0, SampleBatch.OBS
# will usually be the reset-observation, while SampleBatch.ACTIONS will be the
# action based on the reset-observation and so on. This scheme is derived from
# RLlib's sampling logic.
# Outputs from interacting with the environment:
# Observation that we compute SampleBatch.ACTIONS from.
OBS = "obs"
# Observation returned after stepping with SampleBatch.ACTIONS.
NEXT_OBS = "new_obs"
# Action based on SampleBatch.OBS.
ACTIONS = "actions"
# Reward returned after stepping with SampleBatch.ACTIONS.
REWARDS = "rewards"
# Action chosen before SampleBatch.ACTIONS.
PREV_ACTIONS = "prev_actions"
# Reward received before SampleBatch.REWARDS.
PREV_REWARDS = "prev_rewards"
# Is the episode finished after stepping via SampleBatch.ACTIONS?
TERMINATEDS = "terminateds"
# Is the episode truncated (e.g. time limit) after stepping via SampleBatch.ACTIONS?
TRUNCATEDS = "truncateds"
# Infos returned after stepping with SampleBatch.ACTIONS
INFOS = "infos"
# Additional keys filled by RLlib to manage the data above:
SEQ_LENS = "seq_lens" # Groups rows into sequences by defining their length.
T = "t" # Timestep counter
EPS_ID = "eps_id" # Uniquely identifies an episode
ENV_ID = "env_id" # An env ID (e.g. the index for a vectorized sub-env).
AGENT_INDEX = "agent_index" # Uniquely identifies an agent within an episode.
# Uniquely identifies a sample batch. This is important to distinguish RNN
# sequences from the same episode when multiple sample batches are
# concatenated (fusing sequences across batches can be unsafe).
UNROLL_ID = "unroll_id"
# Algorithm-specific keys:
# Extra action fetches keys.
ACTION_DIST_INPUTS = "action_dist_inputs"
ACTION_PROB = "action_prob"
ACTION_LOGP = "action_logp"
ACTION_DIST = "action_dist"
# Value function predictions emitted by the behaviour policy.
VF_PREDS = "vf_preds"
# Values one ts beyond the last ts taken. These are usually calculated via the value
# function network using the final observation (and in case of an RNN: the last
# returned internal state).
VALUES_BOOTSTRAPPED = "values_bootstrapped"
# RE 3
# This is only computed and used when RE3 exploration strategy is enabled.
OBS_EMBEDS = "obs_embeds"
# Decision Transformer
RETURNS_TO_GO = "returns_to_go"
ATTENTION_MASKS = "attention_masks"
# Deprecated keys:
# Do not set this key directly. Instead, the values under this key are
# auto-computed via the values of the TERMINATEDS and TRUNCATEDS keys.
DONES = "dones"
# Use SampleBatch.OBS instead.
CUR_OBS = "obs"
@PublicAPI
def __init__(self, *args, **kwargs):
"""Constructs a sample batch (same params as dict constructor).
Note: All args and those kwargs not listed below will be passed
as-is to the parent dict constructor.
Args:
_time_major: Whether data in this sample batch
is time-major. This is False by default and only relevant
if the data contains sequences.
_max_seq_len: The max sequence chunk length
if the data contains sequences.
_zero_padded: Whether the data in this batch
contains sequences AND these sequences are right-zero-padded
according to the `_max_seq_len` setting.
_is_training: Whether this batch is used for
training. If False, batch may be used for e.g. action
computations (inference).
"""
if SampleBatch.DONES in kwargs:
raise KeyError(
"SampleBatch cannot be constructed anymore with a `DONES` key! "
"Instead, set the new TERMINATEDS and TRUNCATEDS keys. The values under"
" DONES will then be automatically computed using terminated|truncated."
)
# Possible seq_lens (TxB or BxT) setup.
self.time_major = kwargs.pop("_time_major", None)
# Maximum seq len value.
self.max_seq_len = kwargs.pop("_max_seq_len", None)
# Is alredy right-zero-padded?
self.zero_padded = kwargs.pop("_zero_padded", False)
# Whether this batch is used for training (vs inference).
self._is_training = kwargs.pop("_is_training", None)
# Weighted average number of grad updates that have been performed on the
# policy/ies that were used to collect this batch.
# E.g.: Two rollout workers collect samples of 50ts each
# (rollout_fragment_length=50). One of them has a policy that has undergone
# 2 updates thus far, the other worker uses a policy that has undergone 3
# updates thus far. The train batch size is 100, so we concatenate these 2
# batches to a new one that's 100ts long. This new 100ts batch will have its
# `num_gradient_updates` property set to 2.5 as it's the weighted average
# (both original batches contribute 50%).
self.num_grad_updates: Optional[float] = kwargs.pop("_num_grad_updates", None)
# Call super constructor. This will make the actual data accessible
# by column name (str) via e.g. self["some-col"].
dict.__init__(self, *args, **kwargs)
# Indicates whether, for this batch, sequence lengths should be slices by
# their index in the batch or by their index as a sequence.
# This is useful if a batch contains tensors of shape (B, T, ...), where each
# index of B indicates one sequence. In this case, when slicing the batch,
# we want one sequence to be slices out per index in B (
# `_slice_seq_lens_by_batch_index=True`. However, if the padded batch
# contains tensors of shape (B*T, ...), where each index of B*T indicates
# one timestep, we want one sequence to be sliced per T steps in B*T (
# `self._slice_seq_lens_in_B=False`).
# ._slice_seq_lens_in_B = True is only meant to be used for batches that we
# feed into Learner._update(), all other places in RLlib are not expected to
# need this.
self._slice_seq_lens_in_B = False
self.accessed_keys = set()
self.added_keys = set()
self.deleted_keys = set()
self.intercepted_values = {}
self.get_interceptor = None
# Clear out None seq-lens.
seq_lens_ = self.get(SampleBatch.SEQ_LENS)
if seq_lens_ is None or (isinstance(seq_lens_, list) and len(seq_lens_) == 0):
self.pop(SampleBatch.SEQ_LENS, None)
# Numpyfy seq_lens if list.
elif isinstance(seq_lens_, list):
self[SampleBatch.SEQ_LENS] = seq_lens_ = np.array(seq_lens_, dtype=np.int32)
elif (torch and torch.is_tensor(seq_lens_)) or (tf and tf.is_tensor(seq_lens_)):
self[SampleBatch.SEQ_LENS] = seq_lens_
if (
self.max_seq_len is None
and seq_lens_ is not None
and not (tf and tf.is_tensor(seq_lens_))
and len(seq_lens_) > 0
):
if torch and torch.is_tensor(seq_lens_):
self.max_seq_len = seq_lens_.max().item()
else:
self.max_seq_len = max(seq_lens_)
if self._is_training is None:
self._is_training = self.pop("is_training", False)
for k, v in self.items():
# TODO: Drop support for lists and Numbers as values.
# Convert lists of int|float into numpy arrays make sure all data
# has same length.
if isinstance(v, (Number, list)) and not k == SampleBatch.INFOS:
self[k] = np.array(v)
self.count = attempt_count_timesteps(self)
# A convenience map for slicing this batch into sub-batches along
# the time axis. This helps reduce repeated iterations through the
# batch's seq_lens array to find good slicing points. Built lazily
# when needed.
self._slice_map = []
@PublicAPI
def __len__(self) -> int:
"""Returns the amount of samples in the sample batch."""
return self.count
@PublicAPI
def agent_steps(self) -> int:
"""Returns the same as len(self) (number of steps in this batch).
To make this compatible with `MultiAgentBatch.agent_steps()`.
"""
return len(self)
@PublicAPI
def env_steps(self) -> int:
"""Returns the same as len(self) (number of steps in this batch).
To make this compatible with `MultiAgentBatch.env_steps()`.
"""
return len(self)
@DeveloperAPI
def enable_slicing_by_batch_id(self):
self._slice_seq_lens_in_B = True
@DeveloperAPI
def disable_slicing_by_batch_id(self):
self._slice_seq_lens_in_B = False
@ExperimentalAPI
def is_terminated_or_truncated(self) -> bool:
"""Returns True if `self` is either terminated or truncated at idx -1."""
return self[SampleBatch.TERMINATEDS][-1] or (
SampleBatch.TRUNCATEDS in self and self[SampleBatch.TRUNCATEDS][-1]
)
@ExperimentalAPI
def is_single_trajectory(self) -> bool:
"""Returns True if this SampleBatch only contains one trajectory.
This is determined by checking all timesteps (except for the last) for being
not terminated AND (if applicable) not truncated.
"""
return not any(self[SampleBatch.TERMINATEDS][:-1]) and (
SampleBatch.TRUNCATEDS not in self
or not any(self[SampleBatch.TRUNCATEDS][:-1])
)
@staticmethod
@PublicAPI
@Deprecated(new="concat_samples() from rllib.policy.sample_batch", error=True)
def concat_samples(samples):
pass
@PublicAPI
def concat(self, other: "SampleBatch") -> "SampleBatch":
"""Concatenates `other` to this one and returns a new SampleBatch.
Args:
other: The other SampleBatch object to concat to this one.
Returns:
The new SampleBatch, resulting from concating `other` to `self`.
Examples:
>>> import numpy as np
>>> from ray.rllib.policy.sample_batch import SampleBatch
>>> b1 = SampleBatch({"a": np.array([1, 2])}) # doctest: +SKIP
>>> b2 = SampleBatch({"a": np.array([3, 4, 5])}) # doctest: +SKIP
>>> print(b1.concat(b2)) # doctest: +SKIP
{"a": np.array([1, 2, 3, 4, 5])}
"""
return concat_samples([self, other])
@PublicAPI
def copy(self, shallow: bool = False) -> "SampleBatch":
"""Creates a deep or shallow copy of this SampleBatch and returns it.
Args:
shallow: Whether the copying should be done shallowly.
Returns:
A deep or shallow copy of this SampleBatch object.
"""
copy_ = {k: v for k, v in self.items()}
data = tree.map_structure(
lambda v: (
np.array(v, copy=not shallow) if isinstance(v, np.ndarray) else v
),
copy_,
)
copy_ = SampleBatch(
data,
_time_major=self.time_major,
_zero_padded=self.zero_padded,
_max_seq_len=self.max_seq_len,
_num_grad_updates=self.num_grad_updates,
)
copy_.set_get_interceptor(self.get_interceptor)
copy_.added_keys = self.added_keys
copy_.deleted_keys = self.deleted_keys
copy_.accessed_keys = self.accessed_keys
return copy_
@PublicAPI
def rows(self) -> Iterator[Dict[str, TensorType]]:
"""Returns an iterator over data rows, i.e. dicts with column values.
Note that if `seq_lens` is set in self, we set it to 1 in the rows.
Yields:
The column values of the row in this iteration.
Examples:
>>> from ray.rllib.policy.sample_batch import SampleBatch
>>> batch = SampleBatch({ # doctest: +SKIP
... "a": [1, 2, 3],
... "b": [4, 5, 6],
... "seq_lens": [1, 2]
... })
>>> for row in batch.rows(): # doctest: +SKIP
... print(row) # doctest: +SKIP
{"a": 1, "b": 4, "seq_lens": 1}
{"a": 2, "b": 5, "seq_lens": 1}
{"a": 3, "b": 6, "seq_lens": 1}
"""
seq_lens = None if self.get(SampleBatch.SEQ_LENS, 1) is None else 1
self_as_dict = {k: v for k, v in self.items()}
for i in range(self.count):
yield tree.map_structure_with_path(
lambda p, v: v[i] if p[0] != self.SEQ_LENS else seq_lens,
self_as_dict,
)
@PublicAPI
def columns(self, keys: List[str]) -> List[any]:
"""Returns a list of the batch-data in the specified columns.
Args:
keys: List of column names fo which to return the data.
Returns:
The list of data items ordered by the order of column
names in `keys`.
Examples:
>>> from ray.rllib.policy.sample_batch import SampleBatch
>>> batch = SampleBatch({"a": [1], "b": [2], "c": [3]}) # doctest: +SKIP
>>> print(batch.columns(["a", "b"])) # doctest: +SKIP
[[1], [2]]
"""
# TODO: (sven) Make this work for nested data as well.
out = []
for k in keys:
out.append(self[k])
return out
@PublicAPI
def shuffle(self) -> "SampleBatch":
"""Shuffles the rows of this batch in-place.
Returns:
This very (now shuffled) SampleBatch.
Raises:
ValueError: If self[SampleBatch.SEQ_LENS] is defined.
Examples:
>>> from ray.rllib.policy.sample_batch import SampleBatch
>>> batch = SampleBatch({"a": [1, 2, 3, 4]}) # doctest: +SKIP
>>> print(batch.shuffle()) # doctest: +SKIP
{"a": [4, 1, 3, 2]}
"""
# Shuffling the data when we have `seq_lens` defined is probably
# a bad idea!
if self.get(SampleBatch.SEQ_LENS) is not None:
raise ValueError(
"SampleBatch.shuffle not possible when your data has "
"`seq_lens` defined!"
)
# Get a permutation over the single items once and use the same
# permutation for all the data (otherwise, data would become
# meaningless).
permutation = np.random.permutation(self.count)
self_as_dict = {k: v for k, v in self.items()}
shuffled = tree.map_structure(lambda v: v[permutation], self_as_dict)
self.update(shuffled)
# Flush cache such that intercepted values are recalculated after the
# shuffling.
self.intercepted_values = {}
return self
@PublicAPI
def split_by_episode(self, key: Optional[str] = None) -> List["SampleBatch"]:
"""Splits by `eps_id` column and returns list of new batches.
If `eps_id` is not present, splits by `dones` instead.
Args:
key: If specified, overwrite default and use key to split.
Returns:
List of batches, one per distinct episode.
Raises:
KeyError: If the `eps_id` AND `dones` columns are not present.
Examples:
>>> from ray.rllib.policy.sample_batch import SampleBatch
>>> # "eps_id" is present
>>> batch = SampleBatch( # doctest: +SKIP
... {"a": [1, 2, 3], "eps_id": [0, 0, 1]})
>>> print(batch.split_by_episode()) # doctest: +SKIP
[{"a": [1, 2], "eps_id": [0, 0]}, {"a": [3], "eps_id": [1]}]
>>>
>>> # "eps_id" not present, split by "dones" instead
>>> batch = SampleBatch( # doctest: +SKIP
... {"a": [1, 2, 3, 4, 5], "dones": [0, 0, 1, 0, 1]})
>>> print(batch.split_by_episode()) # doctest: +SKIP
[{"a": [1, 2, 3], "dones": [0, 0, 1]}, {"a": [4, 5], "dones": [0, 1]}]
>>>
>>> # The last episode is appended even if it does not end with done
>>> batch = SampleBatch( # doctest: +SKIP
... {"a": [1, 2, 3, 4, 5], "dones": [0, 0, 1, 0, 0]})
>>> print(batch.split_by_episode()) # doctest: +SKIP
[{"a": [1, 2, 3], "dones": [0, 0, 1]}, {"a": [4, 5], "dones": [0, 0]}]
>>> batch = SampleBatch( # doctest: +SKIP
... {"a": [1, 2, 3, 4, 5], "dones": [0, 0, 0, 0, 0]})
>>> print(batch.split_by_episode()) # doctest: +SKIP
[{"a": [1, 2, 3, 4, 5], "dones": [0, 0, 0, 0, 0]}]
"""
assert key is None or key in [SampleBatch.EPS_ID, SampleBatch.DONES], (
f"`SampleBatch.split_by_episode(key={key})` invalid! "
f"Must be [None|'dones'|'eps_id']."
)
def slice_by_eps_id():
slices = []
# Produce a new slice whenever we find a new episode ID.
cur_eps_id = self[SampleBatch.EPS_ID][0]
offset = 0
for i in range(self.count):
next_eps_id = self[SampleBatch.EPS_ID][i]
if next_eps_id != cur_eps_id:
slices.append(self[offset:i])
offset = i
cur_eps_id = next_eps_id
# Add final slice.
slices.append(self[offset : self.count])
return slices
def slice_by_terminateds_or_truncateds():
slices = []
offset = 0
for i in range(self.count):
if self[SampleBatch.TERMINATEDS][i] or (
SampleBatch.TRUNCATEDS in self and self[SampleBatch.TRUNCATEDS][i]
):
# Since self[i] is the last timestep of the episode,
# append it to the batch, then set offset to the start
# of the next batch
slices.append(self[offset : i + 1])
offset = i + 1
# Add final slice.
if offset != self.count:
slices.append(self[offset:])
return slices
key_to_method = {
SampleBatch.EPS_ID: slice_by_eps_id,
SampleBatch.DONES: slice_by_terminateds_or_truncateds,
}
# If key not specified, default to this order.
key_resolve_order = [SampleBatch.EPS_ID, SampleBatch.DONES]
slices = None
if key is not None:
# If key specified, directly use it.
if key == SampleBatch.EPS_ID and key not in self:
raise KeyError(f"{self} does not have key `{key}`!")
slices = key_to_method[key]()
else:
# If key not specified, go in order.
for key in key_resolve_order:
if key == SampleBatch.DONES or key in self:
slices = key_to_method[key]()
break
if slices is None:
raise KeyError(f"{self} does not have keys {key_resolve_order}!")
assert (
sum(s.count for s in slices) == self.count
), f"Calling split_by_episode on {self} returns {slices}"
f"which should in total have {self.count} timesteps!"
return slices
def slice(
self, start: int, end: int, state_start=None, state_end=None
) -> "SampleBatch":
"""Returns a slice of the row data of this batch (w/o copying).
Args:
start: Starting index. If < 0, will left-zero-pad.
end: Ending index.
Returns:
A new SampleBatch, which has a slice of this batch's data.
"""
if (
self.get(SampleBatch.SEQ_LENS) is not None
and len(self[SampleBatch.SEQ_LENS]) > 0
):
if start < 0:
data = {
k: np.concatenate(
[
np.zeros(shape=(-start,) + v.shape[1:], dtype=v.dtype),
v[0:end],
]
)
for k, v in self.items()
if k != SampleBatch.SEQ_LENS and not k.startswith("state_in_")
}
else:
data = {
k: tree.map_structure(lambda s: s[start:end], v)
for k, v in self.items()
if k != SampleBatch.SEQ_LENS and not k.startswith("state_in_")
}
if state_start is not None:
assert state_end is not None
state_idx = 0
state_key = "state_in_{}".format(state_idx)
while state_key in self:
data[state_key] = self[state_key][state_start:state_end]
state_idx += 1
state_key = "state_in_{}".format(state_idx)
seq_lens = list(self[SampleBatch.SEQ_LENS][state_start:state_end])
# Adjust seq_lens if necessary.
data_len = len(data[next(iter(data))])
if sum(seq_lens) != data_len:
assert sum(seq_lens) > data_len
seq_lens[-1] = data_len - sum(seq_lens[:-1])
else:
# Fix state_in_x data.
count = 0
state_start = None
seq_lens = None
for i, seq_len in enumerate(self[SampleBatch.SEQ_LENS]):
count += seq_len
if count >= end:
state_idx = 0
state_key = "state_in_{}".format(state_idx)
if state_start is None:
state_start = i
while state_key in self:
data[state_key] = self[state_key][state_start : i + 1]
state_idx += 1
state_key = "state_in_{}".format(state_idx)
seq_lens = list(self[SampleBatch.SEQ_LENS][state_start:i]) + [
seq_len - (count - end)
]
if start < 0:
seq_lens[0] += -start
diff = sum(seq_lens) - (end - start)
if diff > 0:
seq_lens[0] -= diff
assert sum(seq_lens) == (end - start)
break
elif state_start is None and count > start:
state_start = i
return SampleBatch(
data,
seq_lens=seq_lens,
_is_training=self.is_training,
_time_major=self.time_major,
_num_grad_updates=self.num_grad_updates,
)
else:
return SampleBatch(
tree.map_structure(lambda value: value[start:end], self),
_is_training=self.is_training,
_time_major=self.time_major,
_num_grad_updates=self.num_grad_updates,
)
def _batch_slice(self, slice_: slice) -> "SampleBatch":
"""Helper method to handle SampleBatch slicing using a slice object.
The returned SampleBatch uses the same underlying data object as
`self`, so changing the slice will also change `self`.
Note that only zero or positive bounds are allowed for both start
and stop values. The slice step must be 1 (or None, which is the
same).
Args:
slice_: The python slice object to slice by.
Returns:
A new SampleBatch, however "linking" into the same data
(sliced) as self.
"""
start = slice_.start or 0
stop = slice_.stop or len(self[SampleBatch.SEQ_LENS])
# If stop goes beyond the length of this batch -> Make it go till the
# end only (including last item).
# Analogous to `l = [0, 1, 2]; l[:100] -> [0, 1, 2];`.
if stop > len(self):
stop = len(self)
assert start >= 0 and stop >= 0 and slice_.step in [1, None]
data = tree.map_structure(lambda value: value[start:stop], self)
return SampleBatch(
data,
_is_training=self.is_training,
_time_major=self.time_major,
_num_grad_updates=self.num_grad_updates,
)
@PublicAPI
def timeslices(
self,
size: Optional[int] = None,
num_slices: Optional[int] = None,
k: Optional[int] = None,
) -> List["SampleBatch"]:
"""Returns SampleBatches, each one representing a k-slice of this one.
Will start from timestep 0 and produce slices of size=k.
Args:
size: The size (in timesteps) of each returned SampleBatch.
num_slices: The number of slices to produce.
k: Deprecated: Use size or num_slices instead. The size
(in timesteps) of each returned SampleBatch.
Returns:
The list of `num_slices` (new) SampleBatches or n (new)
SampleBatches each one of size `size`.
"""
if size is None and num_slices is None:
deprecation_warning("k", "size or num_slices")
assert k is not None
size = k
if size is None:
assert isinstance(num_slices, int)
slices = []
left = len(self)
start = 0
while left:
len_ = left // (num_slices - len(slices))
stop = start + len_
slices.append(self[start:stop])
left -= len_
start = stop
return slices
else:
assert isinstance(size, int)
slices = []
left = len(self)
start = 0
while left:
stop = start + size
slices.append(self[start:stop])
left -= size
start = stop
return slices
@Deprecated(new="SampleBatch.right_zero_pad", error=True)
def zero_pad(self, max_seq_len, exclude_states=True):
pass
def right_zero_pad(self, max_seq_len: int, exclude_states: bool = True):
"""Right (adding zeros at end) zero-pads this SampleBatch in-place.
This will set the `self.zero_padded` flag to True and
`self.max_seq_len` to the given `max_seq_len` value.
Args:
max_seq_len: The max (total) length to zero pad to.
exclude_states: If False, also right-zero-pad all
`state_in_x` data. If True, leave `state_in_x` keys
as-is.
Returns:
This very (now right-zero-padded) SampleBatch.
Raises:
ValueError: If self[SampleBatch.SEQ_LENS] is None (not defined).
Examples:
>>> from ray.rllib.policy.sample_batch import SampleBatch
>>> batch = SampleBatch( # doctest: +SKIP
... {"a": [1, 2, 3], "seq_lens": [1, 2]})
>>> print(batch.right_zero_pad(max_seq_len=4)) # doctest: +SKIP
{"a": [1, 0, 0, 0, 2, 3, 0, 0], "seq_lens": [1, 2]}
>>> batch = SampleBatch({"a": [1, 2, 3], # doctest: +SKIP
... "state_in_0": [1.0, 3.0],
... "seq_lens": [1, 2]})
>>> print(batch.right_zero_pad(max_seq_len=5)) # doctest: +SKIP
{"a": [1, 0, 0, 0, 0, 2, 3, 0, 0, 0],
"state_in_0": [1.0, 3.0], # <- all state-ins remain as-is
"seq_lens": [1, 2]}
"""
seq_lens = self.get(SampleBatch.SEQ_LENS)
if seq_lens is None:
raise ValueError(
"Cannot right-zero-pad SampleBatch if no `seq_lens` field "
f"present! SampleBatch={self}"
)
length = len(seq_lens) * max_seq_len
def _zero_pad_in_place(path, value):
# Skip "state_in_..." columns and "seq_lens".
if (exclude_states is True and path[0].startswith("state_in_")) or path[
0
] == SampleBatch.SEQ_LENS:
return
# Generate zero-filled primer of len=max_seq_len.
if value.dtype == object or value.dtype.type is np.str_:
f_pad = [None] * length
else:
# Make sure type doesn't change.
f_pad = np.zeros((length,) + np.shape(value)[1:], dtype=value.dtype)
# Fill primer with data.
f_pad_base = f_base = 0
for len_ in self[SampleBatch.SEQ_LENS]:
f_pad[f_pad_base : f_pad_base + len_] = value[f_base : f_base + len_]
f_pad_base += max_seq_len
f_base += len_
assert f_base == len(value), value
# Update our data in-place.
curr = self
for i, p in enumerate(path):
if i == len(path) - 1:
curr[p] = f_pad
curr = curr[p]
self_as_dict = {k: v for k, v in self.items()}
tree.map_structure_with_path(_zero_pad_in_place, self_as_dict)
# Set flags to indicate, we are now zero-padded (and to what extend).
self.zero_padded = True
self.max_seq_len = max_seq_len
return self
@ExperimentalAPI
def to_device(self, device, framework="torch"):
"""TODO: transfer batch to given device as framework tensor."""
if framework == "torch":
assert torch is not None
for k, v in self.items():
self[k] = convert_to_torch_tensor(v, device)
else:
raise NotImplementedError
return self
@PublicAPI
def size_bytes(self) -> int:
"""Returns sum over number of bytes of all data buffers.
For numpy arrays, we use ``.nbytes``. For all other value types, we use
sys.getsizeof(...).
Returns:
The overall size in bytes of the data buffer (all columns).
"""
return sum(
v.nbytes if isinstance(v, np.ndarray) else sys.getsizeof(v)
for v in tree.flatten(self)
)
def get(self, key, default=None):
"""Returns one column (by key) from the data or a default value."""
try:
return self.__getitem__(key)
except KeyError:
return default
@PublicAPI
def as_multi_agent(self) -> "MultiAgentBatch":
"""Returns the respective MultiAgentBatch using DEFAULT_POLICY_ID.
Returns:
The MultiAgentBatch (using DEFAULT_POLICY_ID) corresponding
to this SampleBatch.
"""
return MultiAgentBatch({DEFAULT_POLICY_ID: self}, self.count)
@PublicAPI
def __getitem__(self, key: Union[str, slice]) -> TensorType:
"""Returns one column (by key) from the data or a sliced new batch.
Args:
key: The key (column name) to return or
a slice object for slicing this SampleBatch.
Returns:
The data under the given key or a sliced version of this batch.
"""
if isinstance(key, slice):
return self._slice(key)
# Special key DONES -> Translate to `TERMINATEDS | TRUNCATEDS` to reflect
# the old meaning of DONES.
if key == SampleBatch.DONES:
return self[SampleBatch.TERMINATEDS]
# Backward compatibility for when "input-dicts" were used.
elif key == "is_training":
if log_once("SampleBatch['is_training']"):
deprecation_warning(
old="SampleBatch['is_training']",
new="SampleBatch.is_training",
error=False,
)
return self.is_training
if not hasattr(self, key) and key in self:
self.accessed_keys.add(key)
value = dict.__getitem__(self, key)
if self.get_interceptor is not None:
if key not in self.intercepted_values:
self.intercepted_values[key] = self.get_interceptor(value)
value = self.intercepted_values[key]
return value
@PublicAPI
def __setitem__(self, key, item) -> None:
"""Inserts (overrides) an entire column (by key) in the data buffer.
Args:
key: The column name to set a value for.
item: The data to insert.
"""
# Disallow setting DONES key directly.
if key == SampleBatch.DONES:
raise KeyError(
"Cannot set `DONES` anymore in a SampleBatch! "
"Instead, set the new TERMINATEDS and TRUNCATEDS keys. The values under"
" DONES will then be automatically computed using terminated|truncated."
)
# Defend against creating SampleBatch via pickle (no property
# `added_keys` and first item is already set).
elif not hasattr(self, "added_keys"):
dict.__setitem__(self, key, item)
return
# Backward compatibility for when "input-dicts" were used.
if key == "is_training":
if log_once("SampleBatch['is_training']"):
deprecation_warning(
old="SampleBatch['is_training']",
new="SampleBatch.is_training",
error=False,
)
self._is_training = item
return
if key not in self:
self.added_keys.add(key)
dict.__setitem__(self, key, item)
if key in self.intercepted_values:
self.intercepted_values[key] = item
@property
def is_training(self):
if self.get_interceptor is not None and isinstance(self._is_training, bool):
if "_is_training" not in self.intercepted_values:
self.intercepted_values["_is_training"] = self.get_interceptor(
self._is_training
)
return self.intercepted_values["_is_training"]
return self._is_training
def set_training(self, training: Union[bool, "tf1.placeholder"] = True):
"""Sets the `is_training` flag for this SampleBatch."""
self._is_training = training
self.intercepted_values.pop("_is_training", None)
@PublicAPI
def __delitem__(self, key):
self.deleted_keys.add(key)
dict.__delitem__(self, key)
@DeveloperAPI
def compress(
self, bulk: bool = False, columns: Set[str] = frozenset(["obs", "new_obs"])
) -> "SampleBatch":
"""Compresses the data buffers (by column) in place.
Args:
bulk: Whether to compress across the batch dimension (0)
as well. If False will compress n separate list items, where n
is the batch size.
columns: The columns to compress. Default: Only
compress the obs and new_obs columns.
Returns:
This very (now compressed) SampleBatch.
"""
def _compress_in_place(path, value):
if path[0] not in columns:
return
curr = self
for i, p in enumerate(path):
if i == len(path) - 1:
if bulk:
curr[p] = pack(value)
else:
curr[p] = np.array([pack(o) for o in value])
curr = curr[p]
tree.map_structure_with_path(_compress_in_place, self)
return self
@DeveloperAPI
def decompress_if_needed(
self, columns: Set[str] = frozenset(["obs", "new_obs"])
) -> "SampleBatch":
"""Decompresses data buffers (per column if not compressed) in place.
Args:
columns: The columns to decompress. Default: Only
decompress the obs and new_obs columns.
Returns:
This very (now uncompressed) SampleBatch.
"""
def _decompress_in_place(path, value):
if path[0] not in columns:
return
curr = self
for p in path[:-1]:
curr = curr[p]
# Bulk compressed.
if is_compressed(value):
curr[path[-1]] = unpack(value)
# Non bulk compressed.
elif len(value) > 0 and is_compressed(value[0]):
curr[path[-1]] = np.array([unpack(o) for o in value])
tree.map_structure_with_path(_decompress_in_place, self)
return self
@DeveloperAPI
def set_get_interceptor(self, fn):
"""Sets a function to be called on every getitem."""
# If get-interceptor changes, must erase old intercepted values.
if fn is not self.get_interceptor:
self.intercepted_values = {}
self.get_interceptor = fn
def __repr__(self):
keys = list(self.keys())
if self.get(SampleBatch.SEQ_LENS) is None:
return f"SampleBatch({self.count}: {keys})"
else:
keys.remove(SampleBatch.SEQ_LENS)
return (
f"SampleBatch({self.count} " f"(seqs={len(self['seq_lens'])}): {keys})"
)
def _slice(self, slice_: slice) -> "SampleBatch":
"""Helper method to handle SampleBatch slicing using a slice object.
The returned SampleBatch uses the same underlying data object as
`self`, so changing the slice will also change `self`.
Note that only zero or positive bounds are allowed for both start
and stop values. The slice step must be 1 (or None, which is the
same).
Args:
slice_: The python slice object to slice by.
Returns:
A new SampleBatch, however "linking" into the same data
(sliced) as self.
"""
if self._slice_seq_lens_in_B:
return self._batch_slice(slice_)
start = slice_.start or 0
stop = slice_.stop or len(self)
# If stop goes beyond the length of this batch -> Make it go till the
# end only (including last item).
# Analogous to `l = [0, 1, 2]; l[:100] -> [0, 1, 2];`.
if stop > len(self):
stop = len(self)
if (
self.get(SampleBatch.SEQ_LENS) is not None
and len(self[SampleBatch.SEQ_LENS]) > 0
):
# Build our slice-map, if not done already.
if not self._slice_map:
sum_ = 0
for i, l in enumerate(map(int, self[SampleBatch.SEQ_LENS])):
self._slice_map.extend([(i, sum_)] * l)
sum_ = sum_ + l
# In case `stop` points to the very end (lengths of this
# batch), return the last sequence (the -1 here makes sure we
# never go beyond it; would result in an index error below).
self._slice_map.append((len(self[SampleBatch.SEQ_LENS]), sum_))
start_seq_len, start_unpadded = self._slice_map[start]
stop_seq_len, stop_unpadded = self._slice_map[stop]
start_padded = start_unpadded
stop_padded = stop_unpadded
if self.zero_padded:
start_padded = start_seq_len * self.max_seq_len
stop_padded = stop_seq_len * self.max_seq_len
def map_(path, value):
if path[0] != SampleBatch.SEQ_LENS and not path[0].startswith(
"state_in_"
):
if path[0] != SampleBatch.INFOS:
return value[start_padded:stop_padded]
else:
if (
(isinstance(value, np.ndarray) and value.size > 0)
or (
torch
and torch.is_tensor(value)
and len(list(value.shape)) > 0
)
or (tf and tf.is_tensor(value) and tf.size(value) > 0)
):
return value[start_unpadded:stop_unpadded]
else:
# Since infos should be stored as lists and not arrays,
# we return the values here and slice them separately
# TODO(Artur): Clean this hack up.
return value
else:
return value[start_seq_len:stop_seq_len]
data = tree.map_structure_with_path(map_, self)
# Since we don't slice in the above map_ function, we do it here.
if isinstance(data.get(SampleBatch.INFOS), list):
data[SampleBatch.INFOS] = data[SampleBatch.INFOS][
start_unpadded:stop_unpadded
]
return SampleBatch(
data,
_is_training=self.is_training,
_time_major=self.time_major,
_zero_padded=self.zero_padded,
_max_seq_len=self.max_seq_len if self.zero_padded else None,
_num_grad_updates=self.num_grad_updates,
)
else:
def map_(value):
if (
isinstance(value, np.ndarray)
or (torch and torch.is_tensor(value))
or (tf and tf.is_tensor(value))
):
return value[start:stop]
else:
# Since infos should be stored as lists and not arrays,
# we return the values here and slice them separately
# TODO(Artur): Clean this hack up.
return value
data = tree.map_structure(map_, self)
return SampleBatch(
data,
_is_training=self.is_training,
_time_major=self.time_major,
_num_grad_updates=self.num_grad_updates,
)
@Deprecated(error=False)
def _get_slice_indices(self, slice_size):
data_slices = []
data_slices_states = []
if (
self.get(SampleBatch.SEQ_LENS) is not None
and len(self[SampleBatch.SEQ_LENS]) > 0
):
assert np.all(self[SampleBatch.SEQ_LENS] < slice_size), (
"ERROR: `slice_size` must be larger than the max. seq-len "
"in the batch!"
)
start_pos = 0
current_slize_size = 0
actual_slice_idx = 0
start_idx = 0
idx = 0
while idx < len(self[SampleBatch.SEQ_LENS]):
seq_len = self[SampleBatch.SEQ_LENS][idx]
current_slize_size += seq_len
actual_slice_idx += (
seq_len if not self.zero_padded else self.max_seq_len
)
# Complete minibatch -> Append to data_slices.
if current_slize_size >= slice_size:
end_idx = idx + 1
# We are not zero-padded yet; all sequences are
# back-to-back.
if not self.zero_padded:
data_slices.append((start_pos, start_pos + slice_size))
start_pos += slice_size
if current_slize_size > slice_size:
overhead = current_slize_size - slice_size
start_pos -= seq_len - overhead
idx -= 1
# We are already zero-padded: Cut in chunks of max_seq_len.
else:
data_slices.append((start_pos, actual_slice_idx))
start_pos = actual_slice_idx
data_slices_states.append((start_idx, end_idx))
current_slize_size = 0
start_idx = idx + 1
idx += 1
else:
i = 0
while i < self.count:
data_slices.append((i, i + slice_size))
i += slice_size
return data_slices, data_slices_states
@ExperimentalAPI
def get_single_step_input_dict(
self,
view_requirements: ViewRequirementsDict,
index: Union[str, int] = "last",
) -> "SampleBatch":
"""Creates single ts SampleBatch at given index from `self`.
For usage as input-dict for model (action or value function) calls.
Args:
view_requirements: A view requirements dict from the model for
which to produce the input_dict.
index: An integer index value indicating the
position in the trajectory for which to generate the
compute_actions input dict. Set to "last" to generate the dict
at the very end of the trajectory (e.g. for value estimation).
Note that "last" is different from -1, as "last" will use the
final NEXT_OBS as observation input.
Returns:
The (single-timestep) input dict for ModelV2 calls.
"""
last_mappings = {
SampleBatch.OBS: SampleBatch.NEXT_OBS,
SampleBatch.PREV_ACTIONS: SampleBatch.ACTIONS,
SampleBatch.PREV_REWARDS: SampleBatch.REWARDS,
}
input_dict = {}
for view_col, view_req in view_requirements.items():
if view_req.used_for_compute_actions is False:
continue
# Create batches of size 1 (single-agent input-dict).
data_col = view_req.data_col or view_col
if index == "last":
data_col = last_mappings.get(data_col, data_col)
# Range needed.
if view_req.shift_from is not None:
# Batch repeat value > 1: We have single frames in the
# batch at each timestep (for the `data_col`).
data = self[view_col][-1]
traj_len = len(self[data_col])
missing_at_end = traj_len % view_req.batch_repeat_value
# Index into the observations column must be shifted by
# -1 b/c index=0 for observations means the current (last
# seen) observation (after having taken an action).
obs_shift = (
-1 if data_col in [SampleBatch.OBS, SampleBatch.NEXT_OBS] else 0
)
from_ = view_req.shift_from + obs_shift
to_ = view_req.shift_to + obs_shift + 1
if to_ == 0:
to_ = None
input_dict[view_col] = np.array(
[
np.concatenate([data, self[data_col][-missing_at_end:]])[
from_:to_
]
]
)
# Single index.
else:
input_dict[view_col] = tree.map_structure(
lambda v: v[-1:], # keep as array (w/ 1 element)
self[data_col],
)
# Single index somewhere inside the trajectory (non-last).
else:
input_dict[view_col] = self[data_col][
index : index + 1 if index != -1 else None
]
return SampleBatch(input_dict, seq_lens=np.array([1], dtype=np.int32))
@PublicAPI
class MultiAgentBatch:
"""A batch of experiences from multiple agents in the environment.
Attributes:
policy_batches (Dict[PolicyID, SampleBatch]): Mapping from policy
ids to SampleBatches of experiences.
count: The number of env steps in this batch.
"""
@PublicAPI
def __init__(self, policy_batches: Dict[PolicyID, SampleBatch], env_steps: int):
"""Initialize a MultiAgentBatch instance.
Args:
policy_batches: Mapping from policy
ids to SampleBatches of experiences.
env_steps: The number of environment steps in the environment
this batch contains. This will be less than the number of
transitions this batch contains across all policies in total.
"""
for v in policy_batches.values():
assert isinstance(v, SampleBatch)
self.policy_batches = policy_batches
# Called "count" for uniformity with SampleBatch.
# Prefer to access this via the `env_steps()` method when possible
# for clarity.
self.count = env_steps
@PublicAPI
def env_steps(self) -> int:
"""The number of env steps (there are >= 1 agent steps per env step).
Returns:
The number of environment steps contained in this batch.
"""
return self.count
@PublicAPI
def __len__(self) -> int:
"""Same as `self.env_steps()`."""
return self.count
@PublicAPI
def agent_steps(self) -> int:
"""The number of agent steps (there are >= 1 agent steps per env step).
Returns:
The number of agent steps total in this batch.
"""
ct = 0
for batch in self.policy_batches.values():
ct += batch.count
return ct
@PublicAPI
def timeslices(self, k: int) -> List["MultiAgentBatch"]:
"""Returns k-step batches holding data for each agent at those steps.
For examples, suppose we have agent1 observations [a1t1, a1t2, a1t3],
for agent2, [a2t1, a2t3], and for agent3, [a3t3] only.
Calling timeslices(1) would return three MultiAgentBatches containing
[a1t1, a2t1], [a1t2], and [a1t3, a2t3, a3t3].
Calling timeslices(2) would return two MultiAgentBatches containing
[a1t1, a1t2, a2t1], and [a1t3, a2t3, a3t3].
This method is used to implement "lockstep" replay mode. Note that this
method does not guarantee each batch contains only data from a single
unroll. Batches might contain data from multiple different envs.
"""
from ray.rllib.evaluation.sample_batch_builder import SampleBatchBuilder
# Build a sorted set of (eps_id, t, policy_id, data...)
steps = []
for policy_id, batch in self.policy_batches.items():
for row in batch.rows():
steps.append(
(
row[SampleBatch.EPS_ID],
row[SampleBatch.T],
row[SampleBatch.AGENT_INDEX],
policy_id,
row,
)
)
steps.sort()
finished_slices = []
cur_slice = collections.defaultdict(SampleBatchBuilder)
cur_slice_size = 0
def finish_slice():
nonlocal cur_slice_size
assert cur_slice_size > 0
batch = MultiAgentBatch(
{k: v.build_and_reset() for k, v in cur_slice.items()}, cur_slice_size
)
cur_slice_size = 0
cur_slice.clear()
finished_slices.append(batch)
# For each unique env timestep.
for _, group in itertools.groupby(steps, lambda x: x[:2]):
# Accumulate into the current slice.
for _, _, _, policy_id, row in group:
cur_slice[policy_id].add_values(**row)
cur_slice_size += 1
# Slice has reached target number of env steps.
if cur_slice_size >= k:
finish_slice()
assert cur_slice_size == 0
if cur_slice_size > 0:
finish_slice()
assert len(finished_slices) > 0, finished_slices
return finished_slices
@staticmethod
@PublicAPI
def wrap_as_needed(
policy_batches: Dict[PolicyID, SampleBatch], env_steps: int
) -> Union[SampleBatch, "MultiAgentBatch"]:
"""Returns SampleBatch or MultiAgentBatch, depending on given policies.
If policy_batches is empty (i.e. {}) it returns an empty MultiAgentBatch.
Args:
policy_batches: Mapping from policy ids to SampleBatch.
env_steps: Number of env steps in the batch.
Returns:
The single default policy's SampleBatch or a MultiAgentBatch
(more than one policy).
"""
if len(policy_batches) == 1 and DEFAULT_POLICY_ID in policy_batches:
return policy_batches[DEFAULT_POLICY_ID]
return MultiAgentBatch(policy_batches=policy_batches, env_steps=env_steps)
@staticmethod
@PublicAPI
@Deprecated(new="concat_samples() from rllib.policy.sample_batch", error=True)
def concat_samples(samples: List["MultiAgentBatch"]) -> "MultiAgentBatch":
return concat_samples_into_ma_batch(samples)
@PublicAPI
def copy(self) -> "MultiAgentBatch":
"""Deep-copies self into a new MultiAgentBatch.
Returns:
The copy of self with deep-copied data.
"""
return MultiAgentBatch(
{k: v.copy() for (k, v) in self.policy_batches.items()}, self.count
)
@PublicAPI
def size_bytes(self) -> int:
"""
Returns:
The overall size in bytes of all policy batches (all columns).
"""
return sum(b.size_bytes() for b in self.policy_batches.values())
@DeveloperAPI
def compress(
self, bulk: bool = False, columns: Set[str] = frozenset(["obs", "new_obs"])
) -> None:
"""Compresses each policy batch (per column) in place.
Args:
bulk: Whether to compress across the batch dimension (0)
as well. If False will compress n separate list items, where n
is the batch size.
columns: Set of column names to compress.
"""
for batch in self.policy_batches.values():
batch.compress(bulk=bulk, columns=columns)
@DeveloperAPI
def decompress_if_needed(
self, columns: Set[str] = frozenset(["obs", "new_obs"])
) -> "MultiAgentBatch":
"""Decompresses each policy batch (per column), if already compressed.
Args:
columns: Set of column names to decompress.
Returns:
Self.
"""
for batch in self.policy_batches.values():
batch.decompress_if_needed(columns)
return self
@DeveloperAPI
def as_multi_agent(self) -> "MultiAgentBatch":
"""Simply returns `self` (already a MultiAgentBatch).
Returns:
This very instance of MultiAgentBatch.
"""
return self
def __getitem__(self, key: str) -> SampleBatch:
"""Returns the SampleBatch for the given policy id."""
return self.policy_batches[key]
def __str__(self):
return "MultiAgentBatch({}, env_steps={})".format(
str(self.policy_batches), self.count
)
def __repr__(self):
return "MultiAgentBatch({}, env_steps={})".format(
str(self.policy_batches), self.count
)
@PublicAPI
def concat_samples(samples: List[SampleBatchType]) -> SampleBatchType:
"""Concatenates a list of SampleBatches or MultiAgentBatches.
If all items in the list are or SampleBatch typ4, the output will be
a SampleBatch type. Otherwise, the output will be a MultiAgentBatch type.
If input is a mixture of SampleBatch and MultiAgentBatch types, it will treat
SampleBatch objects as MultiAgentBatch types with 'default_policy' key and
concatenate it with th rest of MultiAgentBatch objects.
Empty samples are simply ignored.
Args:
samples: List of SampleBatches or MultiAgentBatches to be
concatenated.
Returns:
A new (concatenated) SampleBatch or MultiAgentBatch.
Examples:
>>> import numpy as np
>>> from ray.rllib.policy.sample_batch import SampleBatch
>>> b1 = SampleBatch({"a": np.array([1, 2]), # doctest: +SKIP
... "b": np.array([10, 11])})
>>> b2 = SampleBatch({"a": np.array([3]), # doctest: +SKIP
... "b": np.array([12])})
>>> print(concat_samples([b1, b2])) # doctest: +SKIP
{"a": np.array([1, 2, 3]), "b": np.array([10, 11, 12])}
>>> c1 = MultiAgentBatch({'default_policy': { # doctest: +SKIP
... "a": np.array([1, 2]),
... "b": np.array([10, 11])
... }}, env_steps=2)
>>> c2 = SampleBatch({"a": np.array([3]), # doctest: +SKIP
... "b": np.array([12])})
>>> print(concat_samples([b1, b2])) # doctest: +SKIP
MultiAgentBatch = {'default_policy': {"a": np.array([1, 2, 3]),
"b": np.array([10, 11, 12])}}
"""
if any(isinstance(s, MultiAgentBatch) for s in samples):
return concat_samples_into_ma_batch(samples)
# the output is a SampleBatch type
concatd_seq_lens = []
concatd_num_grad_updates = [0, 0.0] # [0]=count; [1]=weighted sum values
concated_samples = []
# Make sure these settings are consistent amongst all batches.
zero_padded = max_seq_len = time_major = None
for s in samples:
if s.count > 0:
if max_seq_len is None:
zero_padded = s.zero_padded
max_seq_len = s.max_seq_len
time_major = s.time_major
# Make sure these settings are consistent amongst all batches.
if s.zero_padded != zero_padded or s.time_major != time_major:
raise ValueError(
"All SampleBatches' `zero_padded` and `time_major` settings "
"must be consistent!"
)
if (
s.max_seq_len is None or max_seq_len is None
) and s.max_seq_len != max_seq_len:
raise ValueError(
"Samples must consistently either provide or omit " "`max_seq_len`!"
)
elif zero_padded and s.max_seq_len != max_seq_len:
raise ValueError(
"For `zero_padded` SampleBatches, the values of `max_seq_len` "
"must be consistent!"
)
if max_seq_len is not None:
max_seq_len = max(max_seq_len, s.max_seq_len)
if s.get(SampleBatch.SEQ_LENS) is not None:
concatd_seq_lens.extend(s[SampleBatch.SEQ_LENS])
if s.num_grad_updates is not None:
concatd_num_grad_updates[0] += s.count
concatd_num_grad_updates[1] += s.num_grad_updates * s.count
concated_samples.append(s)
# If we don't have any samples (0 or only empty SampleBatches),
# return an empty SampleBatch here.
if len(concated_samples) == 0:
return SampleBatch()
# Collect the concat'd data.
concatd_data = {}
for k in concated_samples[0].keys():
try:
if k == "infos":
concatd_data[k] = _concat_values(
*[s[k] for s in concated_samples],
time_major=time_major,
)
else:
values_to_concat = [c[k] for c in concated_samples]
_concat_values_w_time = partial(_concat_values, time_major=time_major)
concatd_data[k] = tree.map_structure(
_concat_values_w_time, *values_to_concat
)
except RuntimeError as e:
# This should catch torch errors that occur when concatenating
# tensors from different devices.
raise e
except Exception as e:
# Other errors are likely due to mismatching sub-structures.
raise ValueError(
f"Cannot concat data under key '{k}', b/c "
"sub-structures under that key don't match. "
f"`samples`={samples}\n Original error: \n {e}"
)
if concatd_seq_lens != [] and torch and torch.is_tensor(concatd_seq_lens[0]):
concatd_seq_lens = torch.Tensor(concatd_seq_lens)
elif concatd_seq_lens != [] and tf and tf.is_tensor(concatd_seq_lens[0]):
concatd_seq_lens = tf.convert_to_tensor(concatd_seq_lens)
# Return a new (concat'd) SampleBatch.
return SampleBatch(
concatd_data,
seq_lens=concatd_seq_lens,
_time_major=time_major,
_zero_padded=zero_padded,
_max_seq_len=max_seq_len,
# Compute weighted average of the num_grad_updates for the batches
# (assuming they all come from the same policy).
_num_grad_updates=(
concatd_num_grad_updates[1] / (concatd_num_grad_updates[0] or 1.0)
),
)
@PublicAPI
def concat_samples_into_ma_batch(samples: List[SampleBatchType]) -> "MultiAgentBatch":
"""Concatenates a list of SampleBatchTypes to a single MultiAgentBatch type.
This function, as opposed to concat_samples() forces the output to always be
MultiAgentBatch which is more generic than SampleBatch.
Args:
samples: List of SampleBatches or MultiAgentBatches to be
concatenated.
Returns:
A new (concatenated) MultiAgentBatch.
Examples:
>>> import numpy as np
>>> from ray.rllib.policy.sample_batch import SampleBatch
>>> b1 = MultiAgentBatch({'default_policy': { # doctest: +SKIP
... "a": np.array([1, 2]),
... "b": np.array([10, 11])
... }}, env_steps=2)
>>> b2 = SampleBatch({"a": np.array([3]), # doctest: +SKIP
... "b": np.array([12])})
>>> print(concat_samples([b1, b2])) # doctest: +SKIP
MultiAgentBatch = {'default_policy': {"a": np.array([1, 2, 3]),
"b": np.array([10, 11, 12])}}
"""
policy_batches = collections.defaultdict(list)
env_steps = 0
for s in samples:
# Some batches in `samples` may be SampleBatch.
if isinstance(s, SampleBatch):
# If empty SampleBatch: ok (just ignore).
if len(s) <= 0:
continue
else:
# if non-empty: just convert to MA-batch and move forward
s = s.as_multi_agent()
elif not isinstance(s, MultiAgentBatch):
# Otherwise: Error.
raise ValueError(
"`concat_samples_into_ma_batch` can only concat "
"SampleBatch|MultiAgentBatch objects, not {}!".format(type(s).__name__)
)
for key, batch in s.policy_batches.items():
policy_batches[key].append(batch)
env_steps += s.env_steps()
out = {}
for key, batches in policy_batches.items():
out[key] = concat_samples(batches)
return MultiAgentBatch(out, env_steps)
def _concat_values(*values, time_major=None) -> TensorType:
"""Concatenates a list of values.
Args:
values: The values to concatenate.
time_major: Whether to concatenate along the first axis
(time_major=False) or the second axis (time_major=True).
"""
if torch and torch.is_tensor(values[0]):
return torch.cat(values, dim=1 if time_major else 0)
elif isinstance(values[0], np.ndarray):
return np.concatenate(values, axis=1 if time_major else 0)
elif tf and tf.is_tensor(values[0]):
return tf.concat(values, axis=1 if time_major else 0)
elif isinstance(values[0], list):
concatenated_list = []
for sublist in values:
concatenated_list.extend(sublist)
return concatenated_list
else:
raise ValueError(
f"Unsupported type for concatenation: {type(values[0])} "
f"first element: {values[0]}"
)
@DeveloperAPI
def convert_ma_batch_to_sample_batch(batch: SampleBatchType) -> SampleBatch:
"""Converts a MultiAgentBatch to a SampleBatch if neccessary.
Args:
batch: The SampleBatchType to convert.
Returns:
batch: the converted SampleBatch
Raises:
ValueError if the MultiAgentBatch has more than one policy_id
or if the policy_id is not `DEFAULT_POLICY_ID`
"""
if isinstance(batch, MultiAgentBatch):
policy_keys = batch.policy_batches.keys()
if len(policy_keys) == 1 and DEFAULT_POLICY_ID in policy_keys:
batch = batch.policy_batches[DEFAULT_POLICY_ID]
else:
raise ValueError(
"RLlib tried to convert a multi agent-batch with data from more "
"than one policy to a single-agent batch. This is not supported and "
"may be due to a number of issues. Here are two possible ones:"
"1) Off-Policy Estimation is not implemented for "
"multi-agent batches. You can set `off_policy_estimation_methods: {}` "
"to resolve this."
"2) Loading multi-agent data for offline training is not implemented."
"Load single-agent data instead to resolve this."
)
return batch
|
e19831e01db7395bb920847a5c1c723a440931e8
|
ee87c715e5d937b0380ddb87d56e9ebc4877a02b
|
/sklearn/covariance/tests/test_graphical_lasso.py
|
317bf2aa8512473d258d0c0ed193e5f3c6b81d0b
|
[
"BSD-3-Clause"
] |
permissive
|
scikit-learn/scikit-learn
|
27a2196f3173e0f32f7a5c5d652b70a6c57c7644
|
061f8777b48e5491b0c57bb8e0bc7067c103079d
|
refs/heads/main
| 2023-08-18T15:32:59.764468
| 2023-08-18T14:39:08
| 2023-08-18T14:39:08
| 843,222
| 58,456
| 29,777
|
BSD-3-Clause
| 2023-09-14T19:08:34
| 2010-08-17T09:43:38
|
Python
|
UTF-8
|
Python
| false
| false
| 9,995
|
py
|
test_graphical_lasso.py
|
""" Test the graphical_lasso module.
"""
import sys
from io import StringIO
import numpy as np
import pytest
from numpy.testing import assert_allclose
from scipy import linalg
from sklearn import datasets
from sklearn.covariance import (
GraphicalLasso,
GraphicalLassoCV,
empirical_covariance,
graphical_lasso,
)
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.utils import check_random_state
from sklearn.utils._testing import (
_convert_container,
assert_array_almost_equal,
assert_array_less,
)
def test_graphical_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=0.95, random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0.0, 0.1, 0.25):
covs = dict()
icovs = dict()
for method in ("cd", "lars"):
cov_, icov_, costs = graphical_lasso(
emp_cov, return_costs=True, alpha=alpha, mode=method
)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs["cd"], covs["lars"], decimal=4)
assert_array_almost_equal(icovs["cd"], icovs["lars"], decimal=4)
# Smoke test the estimator
model = GraphicalLasso(alpha=0.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs["cd"], decimal=4)
assert_array_almost_equal(model.covariance_, covs["lars"], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphicalLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graphical_lasso_when_alpha_equals_0():
"""Test graphical_lasso's early return condition when alpha=0."""
X = np.random.randn(100, 10)
emp_cov = empirical_covariance(X, assume_centered=True)
model = GraphicalLasso(alpha=0, covariance="precomputed").fit(emp_cov)
assert_allclose(model.precision_, np.linalg.inv(emp_cov))
_, precision = graphical_lasso(emp_cov, alpha=0)
assert_allclose(precision, np.linalg.inv(emp_cov))
@pytest.mark.parametrize("mode", ["cd", "lars"])
def test_graphical_lasso_n_iter(mode):
X, _ = datasets.make_classification(n_samples=5_000, n_features=20, random_state=0)
emp_cov = empirical_covariance(X)
_, _, n_iter = graphical_lasso(
emp_cov, 0.2, mode=mode, max_iter=2, return_n_iter=True
)
assert n_iter == 2
def test_graphical_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# (need to set penalize.diagonal to FALSE)
cov_R = np.array(
[
[0.68112222, 0.0000000, 0.265820, 0.02464314],
[0.00000000, 0.1887129, 0.000000, 0.00000000],
[0.26582000, 0.0000000, 3.095503, 0.28697200],
[0.02464314, 0.0000000, 0.286972, 0.57713289],
]
)
icov_R = np.array(
[
[1.5190747, 0.000000, -0.1304475, 0.0000000],
[0.0000000, 5.299055, 0.0000000, 0.0000000],
[-0.1304475, 0.000000, 0.3498624, -0.1683946],
[0.0000000, 0.000000, -0.1683946, 1.8164353],
]
)
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ("cd", "lars"):
cov, icov = graphical_lasso(emp_cov, alpha=1.0, return_costs=False, mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_2D():
# Hard-coded solution from Python skggm package
# obtained by calling `quic(emp_cov, lam=.1, tol=1e-8)`
cov_skggm = np.array([[3.09550269, 1.186972], [1.186972, 0.57713289]])
icov_skggm = np.array([[1.52836773, -3.14334831], [-3.14334831, 8.19753385]])
X = datasets.load_iris().data[:, 2:]
emp_cov = empirical_covariance(X)
for method in ("cd", "lars"):
cov, icov = graphical_lasso(emp_cov, alpha=0.1, return_costs=False, mode=method)
assert_array_almost_equal(cov, cov_skggm)
assert_array_almost_equal(icov, icov_skggm)
def test_graphical_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array(
[
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222],
]
)
icov_R = np.array(
[
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5],
]
)
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ("cd", "lars"):
cov, icov = graphical_lasso(
emp_cov, alpha=0.01, return_costs=False, mode=method
)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graphical_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=0.96, random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphicalLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
@pytest.mark.parametrize("alphas_container_type", ["list", "tuple", "array"])
def test_graphical_lasso_cv_alphas_iterable(alphas_container_type):
"""Check that we can pass an array-like to `alphas`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/22489
"""
true_cov = np.array(
[
[0.8, 0.0, 0.2, 0.0],
[0.0, 0.4, 0.0, 0.0],
[0.2, 0.0, 0.3, 0.1],
[0.0, 0.0, 0.1, 0.7],
]
)
rng = np.random.RandomState(0)
X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
alphas = _convert_container([0.02, 0.03], alphas_container_type)
GraphicalLassoCV(alphas=alphas, tol=1e-1, n_jobs=1).fit(X)
@pytest.mark.parametrize(
"alphas,err_type,err_msg",
[
([-0.02, 0.03], ValueError, "must be > 0"),
([0, 0.03], ValueError, "must be > 0"),
(["not_number", 0.03], TypeError, "must be an instance of float"),
],
)
def test_graphical_lasso_cv_alphas_invalid_array(alphas, err_type, err_msg):
"""Check that if an array-like containing a value
outside of (0, inf] is passed to `alphas`, a ValueError is raised.
Check if a string is passed, a TypeError is raised.
"""
true_cov = np.array(
[
[0.8, 0.0, 0.2, 0.0],
[0.0, 0.4, 0.0, 0.0],
[0.2, 0.0, 0.3, 0.1],
[0.0, 0.0, 0.1, 0.7],
]
)
rng = np.random.RandomState(0)
X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
with pytest.raises(err_type, match=err_msg):
GraphicalLassoCV(alphas=alphas, tol=1e-1, n_jobs=1).fit(X)
def test_graphical_lasso_cv_scores():
splits = 4
n_alphas = 5
n_refinements = 3
true_cov = np.array(
[
[0.8, 0.0, 0.2, 0.0],
[0.0, 0.4, 0.0, 0.0],
[0.2, 0.0, 0.3, 0.1],
[0.0, 0.0, 0.1, 0.7],
]
)
rng = np.random.RandomState(0)
X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
cov = GraphicalLassoCV(cv=splits, alphas=n_alphas, n_refinements=n_refinements).fit(
X
)
cv_results = cov.cv_results_
# alpha and one for each split
total_alphas = n_refinements * n_alphas + 1
keys = ["alphas"]
split_keys = [f"split{i}_test_score" for i in range(splits)]
for key in keys + split_keys:
assert key in cv_results
assert len(cv_results[key]) == total_alphas
cv_scores = np.asarray([cov.cv_results_[key] for key in split_keys])
expected_mean = cv_scores.mean(axis=0)
expected_std = cv_scores.std(axis=0)
assert_allclose(cov.cv_results_["mean_test_score"], expected_mean)
assert_allclose(cov.cv_results_["std_test_score"], expected_std)
# TODO(1.5): remove in 1.5
def test_graphical_lasso_cov_init_deprecation():
"""Check that we raise a deprecation warning if providing `cov_init` in
`graphical_lasso`."""
rng, dim, n_samples = np.random.RandomState(0), 20, 100
prec = make_sparse_spd_matrix(dim, alpha=0.95, random_state=0)
cov = linalg.inv(prec)
X = rng.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
with pytest.warns(FutureWarning, match="cov_init parameter is deprecated"):
graphical_lasso(emp_cov, alpha=0.1, cov_init=emp_cov)
|
80b845362dab04fd2ab38a0b3d8e1a5363e343c3
|
279f415dd1e06c594c6c87deda57e201c73c4542
|
/test/espnet2/torch_utils/test_initialize.py
|
ae77d23fe65fd5773f3ab8346854eca70fb15c87
|
[
"Apache-2.0"
] |
permissive
|
espnet/espnet
|
f7ba47271c1a6b1ed606dbbfb04a7f14220bb585
|
bcd20948db7846ee523443ef9fd78c7a1248c95e
|
refs/heads/master
| 2023-08-28T23:43:34.238336
| 2023-08-23T02:51:39
| 2023-08-23T02:51:39
| 114,054,873
| 7,242
| 2,244
|
Apache-2.0
| 2023-09-14T08:01:11
| 2017-12-13T00:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,083
|
py
|
test_initialize.py
|
import pytest
import torch
from espnet2.torch_utils.initialize import initialize
initialize_types = {}
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 3)
self.l1 = torch.nn.Linear(2, 2)
self.rnn_cell = torch.nn.LSTMCell(2, 2)
self.rnn = torch.nn.LSTM(2, 2)
self.emb = torch.nn.Embedding(1, 1)
self.norm = torch.nn.LayerNorm(1)
class Model2(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv3d(2, 2, 3)
@pytest.mark.parametrize(
"init",
[
"chainer",
"xavier_uniform",
"xavier_normal",
"kaiming_normal",
"kaiming_uniform",
"dummy",
],
)
def test_initialize(init):
model = Model()
if init == "dummy":
with pytest.raises(ValueError):
initialize(model, init)
else:
initialize(model, init)
def test_5dim():
model = Model2()
with pytest.raises(NotImplementedError):
initialize(model, "chainer")
|
5157a584bde37dae410548095adf72a657158946
|
1bc67a91d85a7106106ca31307ef9ee93f1d1a20
|
/src/py/flwr_tool/protoc_test.py
|
07e7dd833e9d4a65386c1bc452f107acf862f925
|
[
"Apache-2.0"
] |
permissive
|
adap/flower
|
4915d143c674eb675504d585e1e90ed06833812f
|
55be690535e5f3feb33c888c3e4a586b7bdbf489
|
refs/heads/main
| 2023-08-17T01:18:12.168723
| 2023-08-16T17:17:48
| 2023-08-16T17:17:48
| 241,095,326
| 2,999
| 658
|
Apache-2.0
| 2023-09-14T15:43:22
| 2020-02-17T11:51:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,106
|
py
|
protoc_test.py
|
# Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module contains functions related to proto compilation."""
from os import path
from .protoc import IN_PATH, OUT_PATH, PROTO_FILES
def test_directories() -> None:
"""Test if all directories exist."""
assert path.isdir(IN_PATH)
assert path.isdir(OUT_PATH)
def test_proto_file_count() -> None:
"""Test if the correct number of proto files were captured by the glob."""
assert len(PROTO_FILES) == 5
|
aa604bd8a706b703519f9a4d68e01acd9de7027b
|
29bd0e9b5bdef7e2b4ff79edda3be258d5bde63c
|
/xonsh/webconfig/tags.py
|
5b1c454fdc988d8f01995a15a5c97e75ec98db83
|
[
"BSD-2-Clause"
] |
permissive
|
xonsh/xonsh
|
4dec5e4c14a4a82f81277a89d8ab6091869fc29e
|
60f0145ed893cb73bbfcf336c448238981010d41
|
refs/heads/main
| 2023-08-31T03:37:57.786839
| 2023-08-23T15:30:20
| 2023-08-23T15:30:20
| 29,620,400
| 6,374
| 684
|
NOASSERTION
| 2023-09-11T02:52:37
| 2015-01-21T22:05:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,760
|
py
|
tags.py
|
import logging
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import Iterable
import xml.etree.ElementTree as etree
from functools import partial
class Elem(etree.Element):
def __init__(
self,
tag: "str",
*cls: str,
**kwargs: "str",
):
super().__init__(tag)
self.set_attrib(*cls, **kwargs)
def __getitem__(self, item: "int|str|Elem|Iterable[Elem]"): # type: ignore
"""nice sub-tree"""
if isinstance(item, int):
return super().__getitem__(item)
if isinstance(item, str):
self.text = (self.text or "") + item
elif isinstance(item, etree.Element):
self.append(item)
else:
for ele in item:
try:
_ = self[ele] # recursive call
except Exception as ex:
logging.error(
f"Failed to append to node list. {ex!r} : {item!r}>{ele!r} : {self.to_str()!r}"
)
break
return self
def set_attrib(self, *cls: str, **kwargs: str):
klass = " ".join(cls)
classes = [klass, self.attrib.pop("class", "")]
cls_str = " ".join(filter(None, classes))
if cls_str:
self.attrib["class"] = cls_str
self.attrib.update(kwargs)
def __call__(self, *cls: str, **kwargs: str):
self.set_attrib(*cls, **kwargs)
return self
def to_str(self) -> bytes:
return etree.tostring(self)
div = partial(Elem, "div")
row = partial(div, "row")
col = partial(div, "col")
col_sm = partial(div, "col-sm")
col_md = partial(div, "col-md")
alert = partial(div, "alert", role="alert")
br = partial(Elem, "br")
h3 = partial(Elem, "h3")
h4 = partial(Elem, "h4")
h5 = partial(Elem, "h5")
card_title = partial(h5, "card-title")
li = partial(Elem, "li")
nav_item = partial(li, "nav-item")
p = partial(Elem, "p")
small = partial(Elem, "small")
pre = partial(Elem, "pre")
code = partial(Elem, "code")
a = partial(Elem, "a")
nav_link = partial(a, "nav-link")
form = partial(Elem, "form")
inline_form = partial(form, "d-inline")
card = partial(div, "card")
card_header = partial(div, "card-header")
card_body = partial(div, "card-body")
card_text = partial(div, "card-text")
card_footer = partial(div, "card-footer")
textarea = partial(Elem, "textarea")
table = partial(Elem, "table")
tbl = partial(table, "table") # bootstrap table
tr = partial(Elem, "tr")
th = partial(Elem, "th")
td = partial(Elem, "td")
btn = partial(Elem, "button", "btn", type="button")
btn_primary = partial(btn, "btn-primary")
btn_primary_sm = partial(btn_primary, "btn-sm")
def to_pretty(txt: str):
import xml.dom.minidom
dom = xml.dom.minidom.parseString(txt)
txt = dom.toprettyxml()
return "".join(txt.splitlines(keepends=True)[1:])
def to_str(elems: "Iterable[Elem]|Elem", debug=False) -> str:
def _to_str():
if isinstance(elems, etree.Element):
yield etree.tostring(elems)
else:
for idx, el in enumerate(elems):
try:
yield etree.tostring(el)
except Exception:
logging.error(
f"Failed to serialize {el!r}. ({elems!r}.{idx!r})",
exc_info=True,
)
txt = b"".join(_to_str()).decode()
if debug:
txt = to_pretty(txt)
return txt
if __name__ == "__main__":
nav = nav_item()[nav_link(href="/")["Colors"],]
gen = to_str(nav, debug=True)
print(gen)
assert gen.splitlines() == [
'<li class="nav-item">',
'\t<a class="nav-link" href="/">Colors</a>',
"</li>",
]
|
36e9f9e3846a72e30a50302d910e7a71c52696a9
|
184b40438287d124117dcd48cf9abdab71e116c7
|
/tests/unittests/test_mutate_and_test.py
|
9422bb35390bb4ff9b8507f9bca214f3ada0a2e8
|
[
"MIT"
] |
permissive
|
sixty-north/cosmic-ray
|
45bea97513eb75d00c514e0df2c5c3d156268f0f
|
aa63d36ef84659d941c22da9d4d39ae0408d488e
|
refs/heads/master
| 2023-08-26T07:57:57.939516
| 2023-03-14T13:49:49
| 2023-03-14T13:49:49
| 34,157,278
| 569
| 77
|
MIT
| 2023-01-02T07:58:54
| 2015-04-18T07:44:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
test_mutate_and_test.py
|
"Tests for worker."
import asyncio
from pathlib import Path
from cosmic_ray.work_item import MutationSpec, WorkerOutcome, WorkResult
from cosmic_ray.mutating import mutate_and_test
def test_no_test_return_value(path_utils, data_dir):
with path_utils.excursion(data_dir):
result = asyncio.get_event_loop().run_until_complete(
mutate_and_test(
[
MutationSpec(
Path("a/b.py"),
"core/ReplaceTrueWithFalse",
100,
# TODO: As in other places, these are placeholder position values. How can we not have to provide them?
(0, 0),
(0, 1),
)
],
"python -m unittest tests",
1000,
)
)
expected = WorkResult(output=None, test_outcome=None, diff=None, worker_outcome=WorkerOutcome.NO_TEST)
assert result == expected
|
eaac62a7daa1986bca3454b2ab0ca4ace99a79e2
|
73a0f661f1423d63e86489d4b2673f0103698aab
|
/python/oneflow/test/exceptions/test_sparse_softmax_cross_entropy_op.py
|
f890d28b82a185bae7b5a5af2bfade6c92885625
|
[
"Apache-2.0"
] |
permissive
|
Oneflow-Inc/oneflow
|
4fc3e081e45db0242a465c4330d8bcc8b21ee924
|
0aab78ea24d4b1c784c30c57d33ec69fe5605e4a
|
refs/heads/master
| 2023-08-25T16:58:30.576596
| 2023-08-22T14:15:46
| 2023-08-22T14:15:46
| 81,634,683
| 5,495
| 786
|
Apache-2.0
| 2023-09-14T09:44:31
| 2017-02-11T06:09:53
|
C++
|
UTF-8
|
Python
| false
| false
| 2,580
|
py
|
test_sparse_softmax_cross_entropy_op.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import oneflow as flow
import oneflow.unittest
class TestSparseSoftmaxCrossEntropyError(flow.unittest.TestCase):
def test_sparse_softmax_cross_entropy_prediction_numaxes_err(test_case):
with test_case.assertRaises(RuntimeError) as context:
prediction = flow.randn(10)
label = flow.randint(0, 10, (10, 10), dtype=flow.int64)
flow._C.sparse_softmax_cross_entropy(prediction, label)
test_case.assertTrue(
"The dimension of prediction must be greater than or equal to 2, but found"
in str(context.exception)
)
def test_sparse_softmax_cross_entropy_label_numaxes_err(test_case):
with test_case.assertRaises(RuntimeError) as context:
prediction = flow.randn(10, 10, 5)
label = flow.randint(0, 10, (10, 10, 5), dtype=flow.int64)
flow._C.sparse_softmax_cross_entropy(prediction, label)
test_case.assertTrue(
"The dimension of label is expected to be less than that of prediction by 1"
in str(context.exception)
)
def test_sparse_softmax_cross_entropy_prediction_i_shape_err(test_case):
with test_case.assertRaises(RuntimeError) as context:
prediction = flow.randn(10, 10, 5)
label = flow.randint(0, 10, (10, 9), dtype=flow.int64)
flow._C.sparse_softmax_cross_entropy(prediction, label)
test_case.assertTrue("must match the size of label" in str(context.exception))
def test_sparse_softmax_cross_entropy_label_dtype_err(test_case):
with test_case.assertRaises(TypeError) as context:
prediction = flow.randn(10, 10, 5)
label = flow.randn(10, 10, dtype=flow.float32)
flow._C.sparse_softmax_cross_entropy(prediction, label)
test_case.assertTrue(
"The dtype of label must be integer, but found " in str(context.exception)
)
if __name__ == "__main__":
unittest.main()
|
11b540635ee3a930d61b11b62c48d31e8ac7cd42
|
e4c5238c86c8a114d49b7ba3ecc5ef9d5157e152
|
/tests/test_state_props/test_is_separable.py
|
ad6289457912a1729612fdb09167f2468f5fd8ba
|
[
"MIT"
] |
permissive
|
vprusso/toqito
|
64a9963c02b73127836b76d886543a0642b93664
|
7e6869d783f98cb241579ea89e0f9ff61eff9d9b
|
refs/heads/master
| 2023-07-22T17:08:18.392204
| 2023-07-19T07:27:37
| 2023-07-19T07:27:37
| 235,493,396
| 116
| 53
|
MIT
| 2023-09-12T13:35:38
| 2020-01-22T03:47:16
|
Python
|
UTF-8
|
Python
| false
| false
| 3,502
|
py
|
test_is_separable.py
|
"""Test is_separable."""
import numpy as np
from toqito.state_props.is_separable import is_separable
from toqito.channels import partial_trace
from toqito.matrix_props import is_density
from toqito.states import basis, bell, isotropic, tile
from toqito.random import random_density_matrix
def test_non_positive_semidefinite_matrix():
"""Ensure separability of non-positive semidefinite matrix is invalid."""
with np.testing.assert_raises(ValueError):
state = np.array([[-1, -1], [-1, -1]])
is_separable(state)
def test_psd_matrix_local_dim_one():
"""Every positive semidefinite matrix is separable when one of the local dimensions is 1."""
np.testing.assert_equal(is_separable(np.identity(2)), True)
def test_invalid_dim_parameter():
"""The dimension of the state must evenly divide the length of the state."""
with np.testing.assert_raises(ValueError):
dim = 3
rho = isotropic(dim, 1 / (dim + 1))
is_separable(rho, dim + 1)
def test_entangled_ppt_criterion():
"""Determined to be entangled via the PPT criterion."""
rho = bell(0) * bell(0).conj().T
np.testing.assert_equal(is_separable(rho), False)
def test_ppt_small_dimensions():
"""Determined to be separable via sufficiency of the PPT criterion in small dimensions."""
e_0, e_1, e_2 = basis(3, 0), basis(3, 1), basis(3, 2)
psi = 1 / np.sqrt(3) * e_0 + 1 / np.sqrt(3) * e_1 + 1 / np.sqrt(3) * e_2
e_0, e_1 = basis(2, 0), basis(2, 1)
phi = np.kron((1 / np.sqrt(2) * e_0 + 1 / np.sqrt(2) * e_1), psi)
sigma = phi * phi.conj().T
np.testing.assert_equal(is_separable(sigma), True)
def test_ppt_low_rank():
"""Determined to be separable via the operational criterion for low-rank operators."""
m = 6
n = m
rho = random_density_matrix(m)
u, s, v_h = np.linalg.svd(rho)
rho_cut = u[:, : m - 1] @ np.diag(s[: m - 1]) @ v_h[: m - 1]
rho_cut = rho_cut / np.trace(rho_cut)
pt_state_alice = partial_trace(rho_cut, [1], [3, 2])
np.testing.assert_equal(is_density(rho_cut), True)
np.testing.assert_equal(is_density(np.array(pt_state_alice)), True)
np.testing.assert_equal(
np.linalg.matrix_rank(rho_cut) + np.linalg.matrix_rank(pt_state_alice)
<= 2 * m * n - m - n + 2,
True,
)
# TODO
# np.testing.assert_equal(is_separable(rho), True)
def test_entangled_realignment_criterion():
"""Determined to be entangled via the realignment criterion."""
# Construct bound entangled state:
# :math:`\rho = \frac{1}{4} \mathbb{I}_3 \otimes \mathbb{I}_3 - \sum_{i=0}^4 | \psi_i \rangle \langle \psi_i |`
rho = np.identity(9)
for i in range(5):
rho = rho - tile(i) * tile(i).conj().T
rho = rho / 4
np.testing.assert_equal(is_density(rho), True)
np.testing.assert_equal(is_separable(rho), False)
def test_entangled_cross_norm_realignment_criterion():
"""Determined to be entangled by using Theorem 1 and Remark 1 of Quantum Inf. Comput., 3:193-202, 2003."""
p_var, a_var, b_var = 0.4, 0.8, 0.64
rho = np.array(
[
[p_var * a_var**2, 0, 0, p_var * a_var * b_var],
[0, (1 - p_var) * a_var**2, (1 - p_var) * a_var * b_var, 0],
[0, (1 - p_var) * a_var * b_var, (1 - p_var) * a_var**2, 0],
[p_var * a_var * b_var, 0, 0, p_var * a_var**2],
]
)
np.testing.assert_equal(is_separable(rho), False)
if __name__ == "__main__":
np.testing.run_module_suite()
|
35765824a8a5c67b805f3441020658fdc1d194c2
|
93316c24c36ddd7886dd13fac75e50b115ddbdde
|
/supar/structs/dist.py
|
6ab594d4e4c72374122e66672529394f656d788e
|
[
"Python-2.0",
"MIT"
] |
permissive
|
yzhangcs/parser
|
6d5f44af454ac7de705344fd1242023b5eade13e
|
831df043073d0fe919601feaba6a9a569a742c7f
|
refs/heads/main
| 2023-08-31T08:40:13.716513
| 2023-08-31T06:26:52
| 2023-08-31T06:26:52
| 152,071,156
| 683
| 133
|
MIT
| 2022-10-01T03:58:12
| 2018-10-08T11:49:19
|
Python
|
UTF-8
|
Python
| false
| false
| 4,332
|
py
|
dist.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
from typing import Iterable, Union
import torch
import torch.autograd as autograd
from supar.structs.semiring import (CrossEntropySemiring, EntropySemiring,
KLDivergenceSemiring, KMaxSemiring,
LogSemiring, MaxSemiring, SampledSemiring,
Semiring)
from torch.distributions.distribution import Distribution
from torch.distributions.utils import lazy_property
class StructuredDistribution(Distribution):
r"""
Base class for structured distribution :math:`p(y)` :cite:`eisner-2016-inside,goodman-1999-semiring,li-eisner-2009-first`.
Args:
scores (torch.Tensor):
Log potentials, also for high-order cases.
"""
def __init__(self, scores: torch.Tensor, **kwargs) -> StructuredDistribution:
self.scores = scores.requires_grad_() if isinstance(scores, torch.Tensor) else [s.requires_grad_() for s in scores]
self.kwargs = kwargs
def __repr__(self):
return f"{self.__class__.__name__}()"
def __add__(self, other: 'StructuredDistribution') -> StructuredDistribution:
return self.__class__(torch.stack((self.scores, other.scores), -1), lens=self.lens)
@lazy_property
def log_partition(self):
r"""
Computes the log partition function of the distribution :math:`p(y)`.
"""
return self.forward(LogSemiring)
@lazy_property
def marginals(self):
r"""
Computes marginal probabilities of the distribution :math:`p(y)`.
"""
return self.backward(self.log_partition.sum())
@lazy_property
def max(self):
r"""
Computes the max score of the distribution :math:`p(y)`.
"""
return self.forward(MaxSemiring)
@lazy_property
def argmax(self):
r"""
Computes :math:`\arg\max_y p(y)` of the distribution :math:`p(y)`.
"""
return self.backward(self.max.sum())
@lazy_property
def mode(self):
return self.argmax
def kmax(self, k: int) -> torch.Tensor:
r"""
Computes the k-max of the distribution :math:`p(y)`.
"""
return self.forward(KMaxSemiring(k))
def topk(self, k: int) -> Union[torch.Tensor, Iterable]:
r"""
Computes the k-argmax of the distribution :math:`p(y)`.
"""
raise NotImplementedError
def sample(self):
r"""
Obtains a structured sample from the distribution :math:`y \sim p(y)`.
TODO: multi-sampling.
"""
return self.backward(self.forward(SampledSemiring).sum()).detach()
@lazy_property
def entropy(self):
r"""
Computes entropy :math:`H[p]` of the distribution :math:`p(y)`.
"""
return self.forward(EntropySemiring)
def cross_entropy(self, other: 'StructuredDistribution') -> torch.Tensor:
r"""
Computes cross-entropy :math:`H[p,q]` of self and another distribution.
Args:
other (~supar.structs.dist.StructuredDistribution): Comparison distribution.
"""
return (self + other).forward(CrossEntropySemiring)
def kl(self, other: 'StructuredDistribution') -> torch.Tensor:
r"""
Computes KL-divergence :math:`KL[p \parallel q]=H[p,q]-H[p]` of self and another distribution.
Args:
other (~supar.structs.dist.StructuredDistribution): Comparison distribution.
"""
return (self + other).forward(KLDivergenceSemiring)
def log_prob(self, value: torch.LongTensor, *args, **kwargs) -> torch.Tensor:
"""
Computes log probability over values :math:`p(y)`.
"""
return self.score(value, *args, **kwargs) - self.log_partition
def score(self, value: torch.LongTensor, *args, **kwargs) -> torch.Tensor:
raise NotImplementedError
@torch.enable_grad()
def forward(self, semiring: Semiring) -> torch.Tensor:
raise NotImplementedError
def backward(self, log_partition: torch.Tensor) -> Union[torch.Tensor, Iterable[torch.Tensor]]:
grads = autograd.grad(log_partition, self.scores, create_graph=True)
return grads[0] if isinstance(self.scores, torch.Tensor) else grads
|
1faa92efd2659c7bb35a2fd40c4ccbf185ef2ae7
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayCloudCloudbaseResourcepackageAlterQueryResponse.py
|
1fd5cf482378a340eee3d724e37a127200b2dc1f
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,516
|
py
|
AlipayCloudCloudbaseResourcepackageAlterQueryResponse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.ResourcePackageInfo import ResourcePackageInfo
class AlipayCloudCloudbaseResourcepackageAlterQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCloudCloudbaseResourcepackageAlterQueryResponse, self).__init__()
self._current_spec_code = None
self._resource_pkgs = None
@property
def current_spec_code(self):
return self._current_spec_code
@current_spec_code.setter
def current_spec_code(self, value):
self._current_spec_code = value
@property
def resource_pkgs(self):
return self._resource_pkgs
@resource_pkgs.setter
def resource_pkgs(self, value):
if isinstance(value, list):
self._resource_pkgs = list()
for i in value:
if isinstance(i, ResourcePackageInfo):
self._resource_pkgs.append(i)
else:
self._resource_pkgs.append(ResourcePackageInfo.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayCloudCloudbaseResourcepackageAlterQueryResponse, self).parse_response_content(response_content)
if 'current_spec_code' in response:
self.current_spec_code = response['current_spec_code']
if 'resource_pkgs' in response:
self.resource_pkgs = response['resource_pkgs']
|
4bc6c246057b83736428218f6445940b26f5f0c1
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Sklearn_scipy_numpy/source/sklearn/datasets/tests/test_covtype.py
|
f32511d7c9aa85fd623213bd9c3553b1ed7f9d4d
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 857
|
py
|
test_covtype.py
|
"""Test the covtype loader.
Skipped if covtype is not already downloaded to data_home.
"""
import errno
from sklearn.datasets import fetch_covtype
from sklearn.utils.testing import assert_equal, SkipTest
def fetch(*args, **kwargs):
return fetch_covtype(*args, download_if_missing=False, **kwargs)
def test_fetch():
try:
data1 = fetch(shuffle=True, random_state=42)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Covertype dataset can not be loaded.")
data2 = fetch(shuffle=True, random_state=37)
X1, X2 = data1['data'], data2['data']
assert_equal((581012, 54), X1.shape)
assert_equal(X1.shape, X2.shape)
assert_equal(X1.sum(), X2.sum())
y1, y2 = data1['target'], data2['target']
assert_equal((X1.shape[0],), y1.shape)
assert_equal((X1.shape[0],), y2.shape)
|
8709fc4de695a5fc11b1f337bb91040da6eafcd2
|
32809f6f425bf5665fc19de2bc929bacc3eeb469
|
/src/1420-Build-Array-Where-You-Can-Find-The-Maximum-Exactly-K-Comparisons/1420.py
|
ff9495cca6c41e8139a833a58ee32b1433055c7a
|
[] |
no_license
|
luliyucoordinate/Leetcode
|
9f6bf01f79aa680e2dff11e73e4d10993467f113
|
bcc04d49969654cb44f79218a7ef2fd5c1e5449a
|
refs/heads/master
| 2023-05-25T04:58:45.046772
| 2023-05-24T11:57:20
| 2023-05-24T11:57:20
| 132,753,892
| 1,575
| 569
| null | 2023-05-24T11:57:22
| 2018-05-09T12:30:59
|
C++
|
UTF-8
|
Python
| false
| false
| 549
|
py
|
1420.py
|
class Solution:
def numOfArrays(self, n: int, m: int, k: int) -> int:
mod = 10**9 + 7
dp = [[[0] * (m + 1) for _ in range(k + 1)] for _ in range(n + 1)]
for c in range(m + 1):
dp[1][1][c] = c
for a in range(2, n + 1):
for b in range(1, k + 1):
for c in range(b, m + 1):
dp[a][b][c] = (dp[a][b][c - 1] + dp[a - 1][b - 1][c - 1] + \
(dp[a - 1][b][c] - dp[a - 1][b][c - 1] + mod) * c) % mod
return dp[-1][-1][-1]
|
07f970a112af1fc3d0b42a554f208ba797e58cd4
|
5db0fab37c2b8a618d85d3b60fab9f806c416474
|
/src/python/pants/backend/codegen/protobuf/python/register.py
|
3211453c3909baa320c495d24e5de301a8414c8a
|
[
"Apache-2.0"
] |
permissive
|
pantsbuild/pants
|
4988d1ac5474ec95f94ce2218aeb759401e4b011
|
98cbda8545f0d58c586ed2daa76fefd729d5e0d5
|
refs/heads/main
| 2023-09-05T03:44:17.646899
| 2023-09-01T19:52:09
| 2023-09-01T19:52:09
| 7,209,075
| 2,708
| 593
|
Apache-2.0
| 2023-09-14T19:33:33
| 2012-12-17T17:39:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,405
|
py
|
register.py
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Generate Python sources from Protocol Buffers (Protobufs).
See https://www.pantsbuild.org/docs/protobuf.
"""
from pants.backend.codegen.protobuf import protobuf_dependency_inference
from pants.backend.codegen.protobuf import tailor as protobuf_tailor
from pants.backend.codegen.protobuf.python import (
additional_fields,
python_protobuf_module_mapper,
python_protobuf_subsystem,
)
from pants.backend.codegen.protobuf.python.rules import rules as python_rules
from pants.backend.codegen.protobuf.target_types import (
ProtobufSourcesGeneratorTarget,
ProtobufSourceTarget,
)
from pants.backend.codegen.protobuf.target_types import rules as protobuf_target_rules
from pants.backend.python.dependency_inference import module_mapper
from pants.core.util_rules import stripped_source_files
def rules():
return [
*additional_fields.rules(),
*python_protobuf_subsystem.rules(),
*python_rules(),
*python_protobuf_module_mapper.rules(),
*protobuf_dependency_inference.rules(),
*protobuf_tailor.rules(),
*protobuf_target_rules(),
*module_mapper.rules(),
*stripped_source_files.rules(),
]
def target_types():
return [ProtobufSourcesGeneratorTarget, ProtobufSourceTarget]
|
2c5aa631628f6656f5f74d69700380010870f203
|
22531d5431acb0630c600be31ae3e8a9954b252c
|
/dpkt/http.py
|
19f7bc30427e31ad1f08d8aca56ee636188f7710
|
[
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] |
permissive
|
kbandla/dpkt
|
f33cae70d587637fe46303ca9cd467ac1f2f0990
|
440447ae17afc83af9e8d0167e8f13a907d9b83b
|
refs/heads/master
| 2023-08-18T08:58:41.459495
| 2023-01-27T16:44:52
| 2023-01-27T16:44:52
| 4,576,441
| 1,071
| 350
|
NOASSERTION
| 2023-09-09T15:21:08
| 2012-06-06T18:39:31
|
Python
|
UTF-8
|
Python
| false
| false
| 22,114
|
py
|
http.py
|
# $Id: http.py 86 2013-03-05 19:25:19Z andrewflnr@gmail.com $
# -*- coding: utf-8 -*-
"""Hypertext Transfer Protocol."""
from __future__ import print_function
from __future__ import absolute_import
from collections import OrderedDict
from . import dpkt
from .compat import BytesIO, iteritems
def parse_headers(f):
"""Return dict of HTTP headers parsed from a file object."""
d = OrderedDict()
while 1:
# The following logic covers two kinds of loop exit criteria.
# 1) If the header is valid, when we reached the end of the header,
# f.readline() would return with '\r\n', then after strip(),
# we can break the loop.
# 2) If this is a weird header, which do not ends with '\r\n',
# f.readline() would return with '', then after strip(),
# we still get an empty string, also break the loop.
line = f.readline().strip().decode("ascii", "ignore")
if not line:
break
l_ = line.split(':', 1)
if len(l_[0].split()) != 1:
raise dpkt.UnpackError('invalid header: %r' % line)
k = l_[0].lower()
v = len(l_) != 1 and l_[1].lstrip() or ''
if k in d:
if not type(d[k]) is list:
d[k] = [d[k]]
d[k].append(v)
else:
d[k] = v
return d
def parse_body(f, headers):
"""Return HTTP body parsed from a file object, given HTTP header dict."""
if headers.get('transfer-encoding', '').lower() == 'chunked':
l_ = []
found_end = False
while 1:
try:
sz = f.readline().split(None, 1)[0]
except IndexError:
raise dpkt.UnpackError('missing chunk size')
try:
n = int(sz, 16)
except ValueError:
raise dpkt.UnpackError('invalid chunk size')
if n == 0:
found_end = True
buf = f.read(n)
if f.readline().strip():
break
if n and len(buf) == n:
l_.append(buf)
else:
# only possible when len(buf) < n, which will happen if the
# file object ends before reading a complete file chunk
break
if not found_end:
raise dpkt.NeedData('premature end of chunked body')
body = b''.join(l_)
elif 'content-length' in headers:
n = int(headers['content-length'])
body = f.read(n)
if len(body) != n:
raise dpkt.NeedData('short body (missing %d bytes)' % (n - len(body)))
elif 'content-type' in headers:
body = f.read()
else:
# XXX - need to handle HTTP/0.9
body = b''
return body
class Message(dpkt.Packet):
"""Hypertext Transfer Protocol headers + body.
HTTP messages are how data is exchanged between a server and a client. There are two types of messages: requests
sent by the client to trigger an action on the server, and responses, the answer from the server. HTTP messages are
composed of textual information encoded in ASCII, and span over multiple lines.
Attributes:
__hdr__: Header fields of HTTP.
The start-line and HTTP headers of the HTTP message are collectively known as the head of the requests,
whereas its payload is known as the body.
"""
__metaclass__ = type
__hdr_defaults__ = {}
headers = None
body = None
def __init__(self, *args, **kwargs):
if args:
self.unpack(args[0])
else:
self.headers = OrderedDict()
self.body = b''
self.data = b''
# NOTE: changing this to iteritems breaks py3 compatibility
for k, v in self.__hdr_defaults__.items():
setattr(self, k, v)
for k, v in iteritems(kwargs):
setattr(self, k, v)
def unpack(self, buf, is_body_allowed=True):
f = BytesIO(buf)
# Parse headers
self.headers = parse_headers(f)
# Parse body
if is_body_allowed:
self.body = parse_body(f, self.headers)
else:
self.body = b''
# Save the rest
self.data = f.read()
def pack_hdr(self):
return ''.join(['%s: %s\r\n' % t for t in iteritems(self.headers)])
def __len__(self):
return len(str(self))
def __str__(self):
return '%s\r\n%s' % (self.pack_hdr(), self.body.decode("utf8", "ignore"))
def __bytes__(self):
return self.pack_hdr().encode("ascii", "ignore") + b'\r\n' + (self.body or b'')
class Request(Message):
"""Hypertext Transfer Protocol Request.
HTTP requests are messages sent by the client to initiate an action on the server. Their start-line contain three
elements. An HTTP method, a verb (like GET, PUT or POST) or a noun (like HEAD or OPTIONS), The request target,
usually a URL, or the absolute path of the protocol, port, and domain are usually characterized by the request
context and The HTTP version, which defines the structure of the remaining message, acting as an indicator of the
expected version to use for the response.
Attributes:
__hdr__: Header fields of HTTP request.
Many headers can appear in requests. They can be divided in several groups:
General headers, like Via, apply to the message as a whole.
Request headers, like User-Agent or Accept, modify the request by specifying it further (like Accept-
Language), by giving context (like Referer), or by conditionally restricting it (like If-None).
Representation headers like Content-Type that describe the original format of the message data and
any encoding applied (only present if the message has a body).
"""
__hdr_defaults__ = {
'method': 'GET',
'uri': '/',
'version': '1.0',
}
__methods = dict.fromkeys((
'GET', 'PUT', 'ICY',
'COPY', 'HEAD', 'LOCK', 'MOVE', 'POLL', 'POST',
'BCOPY', 'BMOVE', 'MKCOL', 'TRACE', 'LABEL', 'MERGE',
'DELETE', 'SEARCH', 'UNLOCK', 'REPORT', 'UPDATE', 'NOTIFY',
'BDELETE', 'CONNECT', 'OPTIONS', 'CHECKIN',
'PROPFIND', 'CHECKOUT', 'CCM_POST',
'SUBSCRIBE', 'PROPPATCH', 'BPROPFIND',
'BPROPPATCH', 'UNCHECKOUT', 'MKACTIVITY',
'MKWORKSPACE', 'UNSUBSCRIBE', 'RPC_CONNECT',
'VERSION-CONTROL',
'BASELINE-CONTROL'
))
__proto = 'HTTP'
def unpack(self, buf):
f = BytesIO(buf)
line = f.readline().decode("ascii", "ignore")
l_ = line.strip().split()
if len(l_) < 2:
raise dpkt.UnpackError('invalid request: %r' % line)
if l_[0] not in self.__methods:
raise dpkt.UnpackError('invalid http method: %r' % l_[0])
if len(l_) == 2:
# HTTP/0.9 does not specify a version in the request line
self.version = '0.9'
else:
if not l_[2].startswith(self.__proto):
raise dpkt.UnpackError('invalid http version: %r' % l_[2])
self.version = l_[2][len(self.__proto) + 1:]
self.method = l_[0]
self.uri = l_[1]
Message.unpack(self, f.read())
def __str__(self):
return '%s %s %s/%s\r\n' % (self.method, self.uri, self.__proto,
self.version) + Message.__str__(self)
def __bytes__(self):
str_out = '%s %s %s/%s\r\n' % (self.method, self.uri, self.__proto,
self.version)
return str_out.encode("ascii", "ignore") + Message.__bytes__(self)
class Response(Message):
"""Hypertext Transfer Protocol Response.
The start line of an HTTP response, called the status line, contains the following information. The protocol
version, usually HTTP/1.1, a status code, indicating success or failure of the request. Common status codes are 200,
404, or 302, a status text. A brief, purely informational, textual description of the status code to help a human
understand the HTTP message. A typical status line looks like: HTTP/1.1 404 Not Found.
Attributes:
__hdr__: Header fields of HTTP Response.
Many headers can appear in responses. These can be divided into several groups:
General headers, like Via, apply to the whole message.
Response headers, like Vary and Accept-Ranges, give additional information about the server which
doesn't fit in the status line.
Representation headers like Content-Type that describe the original format of the message data and any
encoding applied (only present if the message has a body).
"""
__hdr_defaults__ = {
'version': '1.0',
'status': '200',
'reason': 'OK'
}
__proto = 'HTTP'
def unpack(self, buf):
f = BytesIO(buf)
line = f.readline()
l_ = line.strip().decode("ascii", "ignore").split(None, 2)
if len(l_) < 2 or not l_[0].startswith(self.__proto) or not l_[1].isdigit():
raise dpkt.UnpackError('invalid response: %r' % line)
self.version = l_[0][len(self.__proto) + 1:]
self.status = l_[1]
self.reason = l_[2] if len(l_) > 2 else ''
# RFC Sec 4.3.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3.
# For response messages, whether or not a message-body is included with
# a message is dependent on both the request method and the response
# status code (section 6.1.1). All responses to the HEAD request method
# MUST NOT include a message-body, even though the presence of entity-
# header fields might lead one to believe they do. All 1xx
# (informational), 204 (no content), and 304 (not modified) responses
# MUST NOT include a message-body. All other responses do include a
# message-body, although it MAY be of zero length.
is_body_allowed = int(self.status) >= 200 and 204 != int(self.status) != 304
Message.unpack(self, f.read(), is_body_allowed)
def __str__(self):
return '%s/%s %s %s\r\n' % (self.__proto, self.version, self.status,
self.reason) + Message.__str__(self)
def __bytes__(self):
str_out = '%s/%s %s %s\r\n' % (self.__proto, self.version, self.status,
self.reason)
return str_out.encode("ascii", "ignore") + Message.__bytes__(self)
def test_parse_request():
s = (b"""POST /main/redirect/ab/1,295,,00.html HTTP/1.0\r\nReferer: http://www.email.com/login/snap/login.jhtml\r\n"""
b"""Connection: Keep-Alive\r\nUser-Agent: Mozilla/4.75 [en] (X11; U; OpenBSD 2.8 i386; Nav)\r\n"""
b"""Host: ltd.snap.com\r\nAccept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, image/png, */*\r\n"""
b"""Accept-Encoding: gzip\r\nAccept-Language: en\r\nAccept-Charset: iso-8859-1,*,utf-8\r\n"""
b"""Content-type: application/x-www-form-urlencoded\r\nContent-length: 61\r\n\r\n"""
b"""sn=em&mn=dtest4&pw=this+is+atest&fr=true&login=Sign+in&od=www""")
r = Request(s)
assert r.method == 'POST'
assert r.uri == '/main/redirect/ab/1,295,,00.html'
assert r.body == b'sn=em&mn=dtest4&pw=this+is+atest&fr=true&login=Sign+in&od=www'
assert r.headers['content-type'] == 'application/x-www-form-urlencoded'
Request(s[:60])
def test_format_request():
r = Request()
assert str(r) == 'GET / HTTP/1.0\r\n\r\n'
r.method = 'POST'
r.uri = '/foo/bar/baz.html'
r.headers['content-type'] = 'text/plain'
r.headers['content-length'] = '5'
r.body = b'hello'
s = str(r)
assert s.startswith('POST /foo/bar/baz.html HTTP/1.0\r\n')
assert s.endswith('\r\n\r\nhello')
assert '\r\ncontent-length: 5\r\n' in s
assert '\r\ncontent-type: text/plain\r\n' in s
s = bytes(r)
assert s.startswith(b'POST /foo/bar/baz.html HTTP/1.0\r\n')
assert s.endswith(b'\r\n\r\nhello')
assert b'\r\ncontent-length: 5\r\n' in s
assert b'\r\ncontent-type: text/plain\r\n' in s
r = Request(bytes(r))
assert bytes(r) == s
def test_chunked_response():
from binascii import unhexlify
header = (
b"HTTP/1.1 200 OK\r\n"
b"Cache-control: no-cache\r\n"
b"Pragma: no-cache\r\n"
b"Content-Type: text/javascript; charset=utf-8\r\n"
b"Content-Encoding: gzip\r\n"
b"Transfer-Encoding: chunked\r\n"
b"Set-Cookie: S=gmail=agg:gmail_yj=v2s:gmproxy=JkU; Domain=.google.com; Path=/\r\n"
b"Server: GFE/1.3\r\n"
b"Date: Mon, 12 Dec 2005 22:33:23 GMT\r\n"
b"\r\n"
)
body = unhexlify(
'610d0a1f8b08000000000000000d0a3135320d0a6d914d4fc4201086effe0a82c99e58'
'4a4be9b6eec1e81e369e34f1e061358652da12596880bafaef85ee1a2ff231990cef30'
'3cc381a0c301e610c13ca765595435a1a4ace1db153aa49d0cfa354b00f62eaaeb86d5'
'79cd485995348ebc2a688c8e214c3759e627eb82575acf3e381e6487853158d863e6bc'
'175a898fac208465de0a215d961769b5027b7bc27a301e0f23379c77337699329dfcc2'
'6338ea5b2f4550d6bcce84d0ceabf760271fac53d2c7d2fb94024edc040feeba195803'
'547457d7b4d9920abc58a73bb09b2710243f46fdf3437a50748a55efb8c88b2d18edec'
'3ce083850821f8225bb0d36a826893b8cfd89bbadad09214a4610d630d654dfd873d58'
'3b68d96a3be0646217c202bdb046c2696e23fb3ab6c47815d69f8aafcf290b5ebce769'
'11808b004401d82f8278f6d8f74a28ae2f11701f2bc470093afefddfa359faae347f00'
'c5a595a1e20100000d0a300d0a0d0a'
)
buf = header + body
r = Response(buf)
assert r.version == '1.1'
assert r.status == '200'
assert r.reason == 'OK'
def test_multicookie_response():
s = (b"""HTTP/1.x 200 OK\r\nSet-Cookie: first_cookie=cookie1; path=/; domain=.example.com\r\n"""
b"""Set-Cookie: second_cookie=cookie2; path=/; domain=.example.com\r\nContent-Length: 0\r\n\r\n""")
r = Response(s)
assert type(r.headers['set-cookie']) is list
assert len(r.headers['set-cookie']) == 2
def test_noreason_response():
s = b"""HTTP/1.1 200 \r\n\r\n"""
r = Response(s)
assert r.reason == ''
assert bytes(r) == s
def test_response_with_body():
r = Response()
r.body = b'foo'
assert str(r) == 'HTTP/1.0 200 OK\r\n\r\nfoo'
assert bytes(r) == b'HTTP/1.0 200 OK\r\n\r\nfoo'
repr(r)
def test_body_forbidden_response():
s = b'HTTP/1.1 304 Not Modified\r\n'\
b'Content-Type: text/css\r\n'\
b'Last-Modified: Wed, 14 Jan 2009 16:42:11 GMT\r\n'\
b'ETag: "3a7-496e15e3"\r\n'\
b'Cache-Control: private, max-age=414295\r\n'\
b'Date: Wed, 22 Sep 2010 17:55:54 GMT\r\n'\
b'Connection: keep-alive\r\n'\
b'Vary: Accept-Encoding\r\n\r\n'\
b'HTTP/1.1 200 OK\r\n'\
b'Server: Sun-ONE-Web-Server/6.1\r\n'\
b'ntCoent-length: 257\r\n'\
b'Content-Type: application/x-javascript\r\n'\
b'Last-Modified: Wed, 06 Jan 2010 19:34:06 GMT\r\n'\
b'ETag: "101-4b44e5ae"\r\n'\
b'Accept-Ranges: bytes\r\n'\
b'Content-Encoding: gzip\r\n'\
b'Cache-Control: private, max-age=439726\r\n'\
b'Date: Wed, 22 Sep 2010 17:55:54 GMT\r\n'\
b'Connection: keep-alive\r\n'\
b'Vary: Accept-Encoding\r\n'
result = []
while s:
msg = Response(s)
s = msg.data
result.append(msg)
# the second HTTP response should be an standalone message
assert len(result) == 2
def test_request_version():
s = b"""GET / HTTP/1.0\r\n\r\n"""
r = Request(s)
assert r.method == 'GET'
assert r.uri == '/'
assert r.version == '1.0'
s = b"""GET /\r\n\r\n"""
r = Request(s)
assert r.method == 'GET'
assert r.uri == '/'
assert r.version == '0.9'
import pytest
s = b"""GET / CHEESE/1.0\r\n\r\n"""
with pytest.raises(dpkt.UnpackError, match="invalid http version: u?'CHEESE/1.0'"):
Request(s)
def test_valid_header():
# valid header.
s = b'POST /main/redirect/ab/1,295,,00.html HTTP/1.0\r\n' \
b'Referer: http://www.email.com/login/snap/login.jhtml\r\n' \
b'Connection: Keep-Alive\r\n' \
b'User-Agent: Mozilla/4.75 [en] (X11; U; OpenBSD 2.8 i386; Nav)\r\n' \
b'Host: ltd.snap.com\r\n' \
b'Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, image/png, */*\r\n' \
b'Accept-Encoding: gzip\r\n' \
b'Accept-Language: en\r\n' \
b'Accept-Charset: iso-8859-1,*,utf-8\r\n' \
b'Content-type: application/x-www-form-urlencoded\r\n' \
b'Content-length: 61\r\n\r\n' \
b'sn=em&mn=dtest4&pw=this+is+atest&fr=true&login=Sign+in&od=www'
r = Request(s)
assert r.method == 'POST'
assert r.uri == '/main/redirect/ab/1,295,,00.html'
assert r.body == b'sn=em&mn=dtest4&pw=this+is+atest&fr=true&login=Sign+in&od=www'
assert r.headers['content-type'] == 'application/x-www-form-urlencoded'
def test_weird_end_header():
s_weird_end = b'POST /main/redirect/ab/1,295,,00.html HTTP/1.0\r\n' \
b'Referer: http://www.email.com/login/snap/login.jhtml\r\n' \
b'Connection: Keep-Alive\r\n' \
b'User-Agent: Mozilla/4.75 [en] (X11; U; OpenBSD 2.8 i386; Nav)\r\n' \
b'Host: ltd.snap.com\r\n' \
b'Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, image/png, */*\r\n' \
b'Accept-Encoding: gzip\r\n' \
b'Accept-Language: en\r\n' \
b'Accept-Charset: iso-8859-1,*,utf-8\r\n' \
b'Content-type: application/x-www-form-urlencoded\r\n' \
b'Cookie: TrackID=1PWdcr3MO_C611BGW'
r = Request(s_weird_end)
assert r.method == 'POST'
assert r.uri == '/main/redirect/ab/1,295,,00.html'
assert r.headers['content-type'] == 'application/x-www-form-urlencoded'
def test_gzip_response():
import zlib
# valid response, compressed using gzip
s = b'HTTP/1.0 200 OK\r\n' \
b'Server: SimpleHTTP/0.6 Python/2.7.12\r\n' \
b'Date: Fri, 10 Mar 2017 20:43:08 GMT\r\n' \
b'Content-type: text/plain\r\n' \
b'Content-Encoding: gzip\r\n' \
b'Content-Length: 68\r\n' \
b'Last-Modified: Fri, 10 Mar 2017 20:40:43 GMT\r\n\r\n' \
b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\x03\x0b\xc9\xc8,V\x00\xa2D' \
b'\x85\xb2\xd4\xa2J\x85\xe2\xdc\xc4\x9c\x1c\x85\xb4\xcc\x9cT\x85\x92' \
b'|\x85\x92\xd4\xe2\x12\x85\xf4\xaa\xcc\x02\x85\xa2\xd4\xe2\x82\xfc' \
b'\xbc\xe2\xd4b=.\x00\x01(m\xad2\x00\x00\x00'
r = Response(s)
assert r.version == '1.0'
assert r.status == '200'
assert r.reason == 'OK'
# Make a zlib compressor with the appropriate gzip options
decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS)
body = decompressor.decompress(r.body)
assert body.startswith(b'This is a very small file')
def test_message():
# s = b'Date: Fri, 10 Mar 2017 20:43:08 GMT\r\n' # FIXME - unused
r = Message(content_length=68)
assert r.content_length == 68
assert len(r) == 2
def test_invalid():
import pytest
s = b'INVALID / HTTP/1.0\r\n'
with pytest.raises(dpkt.UnpackError, match="invalid http method: u?'INVALID'"):
Request(s)
s = b'A'
with pytest.raises(dpkt.UnpackError, match="invalid response: b?'A'"):
Response(s)
s = b'HTTT 200 OK'
with pytest.raises(dpkt.UnpackError, match="invalid response: b?'HTTT 200 OK'"):
Response(s)
s = b'HTTP TWO OK'
with pytest.raises(dpkt.UnpackError, match="invalid response: b?'HTTP TWO OK'"):
Response(s)
s = (
b'HTTP/1.0 200 OK\r\n'
b'Invalid Header: invalid\r\n'
)
with pytest.raises(dpkt.UnpackError, match="invalid header: "):
Response(s)
s = (
b"HTTP/1.1 200 OK\r\n"
b"Transfer-Encoding: chunked\r\n"
b"\r\n"
b"\r\n"
)
with pytest.raises(dpkt.UnpackError, match="missing chunk size"):
Response(s)
s = (
b"HTTP/1.1 200 OK\r\n"
b"Transfer-Encoding: chunked\r\n"
b"\r\n"
b"\x01\r\na"
)
with pytest.raises(dpkt.UnpackError, match="invalid chunk size"):
Response(s)
s = (
b"HTTP/1.1 200 OK\r\n"
b"Transfer-Encoding: chunked\r\n"
b"\r\n"
b"2\r\n"
b"abcd"
)
with pytest.raises(dpkt.NeedData, match="premature end of chunked body"):
Response(s)
s = (
b"HTTP/1.1 200 OK\r\n"
b"Content-Length: 68\r\n"
b"\r\n"
b"a\r\n"
)
with pytest.raises(dpkt.NeedData, match=r"short body \(missing 65 bytes\)"):
Response(s)
# messy header.
s_messy_header = b'aaaaaaaaa\r\nbbbbbbbbb'
with pytest.raises(dpkt.UnpackError, match="invalid request: u?'aaaaaaaa"):
Request(s_messy_header)
def test_response_str():
s = (
b'HTTP/1.0 200 OK\r\n'
b'Server: SimpleHTTP/0.6 Python/2.7.12\r\n'
b'Date: Fri, 10 Mar 2017 20:43:08 GMT\r\n'
b'Content-type: text/plain\r\n'
)
# the headers are processed to lowercase keys
resp = [
'HTTP/1.0 200 OK',
'server: SimpleHTTP/0.6 Python/2.7.12',
'date: Fri, 10 Mar 2017 20:43:08 GMT',
'content-type: text/plain',
'',
'',
]
r_str = str(Response(s))
s_arr = sorted(resp)
resp_arr = sorted(r_str.split('\r\n'))
for line1, line2 in zip(s_arr, resp_arr):
assert line1 == line2
def test_request_str():
s = b'GET / HTTP/1.0\r\n'
r = Request(s)
req = 'GET / HTTP/1.0\r\n\r\n'
assert req == str(r)
def test_parse_body():
import pytest
from .compat import BytesIO
buf = BytesIO(
b'05\r\n' # size
b'ERR' # longer than size
)
buf.seek(0)
headers = {
'transfer-encoding': 'chunked',
}
with pytest.raises(dpkt.NeedData, match="premature end of chunked body"):
parse_body(buf, headers)
|
1fa49b2371abd1020665a705f97a7313f00a77d9
|
5039cfbbee5cbf54c201ae13d191b299aeda08bc
|
/nanopore-human-transcriptome/scripts/mod.py
|
da1e5d3ccfb7fd97e4ffe2b47ba818d8fc58f864
|
[
"CC-BY-4.0"
] |
permissive
|
nanopore-wgs-consortium/NA12878
|
f5a15241fcbdd2e9fa2ee190dd56e10ee3eb12c3
|
f4c6bd99be07468f9de590dedc1db68d3ea17de7
|
refs/heads/master
| 2022-11-28T14:41:59.145117
| 2021-09-06T17:07:14
| 2021-09-06T17:07:14
| 75,269,921
| 379
| 104
|
NOASSERTION
| 2022-11-22T03:14:06
| 2016-12-01T08:06:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,341
|
py
|
mod.py
|
"""
By: Roham Razaghi (rrazagh1@jhu.edu)
"""
import numpy as np
from pybiomart import Dataset
import pandas as pd
# input files
isoform_file = open('/dilithium/Data/Nanopore/rna/isoforms/nvrna.180828.180927.read.isoform.map.nodup.txt')
evalign_file = open('/dilithium/Data/Nanopore/rna/evalign/180716_dRNAall.mq5_GGACU.evalign.txt')
# output files
evalign_df = "/dilithium/Data/Nanopore/rna/analysis/out_to_R/mod/nvrna_evalign_GGACU_dataframe.txt"
evalign_stat = "/dilithium/Data/Nanopore/rna/analysis/out_to_R/mod/nvrna_evalign_GGACU_stats.txt"
read_to_isogene = {}
data_dict = {}
for line in isoform_file:
line_arr = line.strip().split('\t')
read_id = line_arr[0]
isoform_id = line_arr[1].split('_')[0].split('.')[0]
gene_id = line_arr[1].split('_')[1].split('.')[0]
iso_gene_id = gene_id + '_' + isoform_id
read_to_isogene[read_id] = iso_gene_id
outF2 = open(evalign_df, "w")
print("Position\tIsoform\tGene_ID\tEvent_mean", end="\n", file=outF2)
for line in evalign_file:
read_id = line.split('\t')[3]
event_mean = line.split('\t')[6]
model_kmer = line.split('\t')[9]
pos = line.split('\t')[1]
if read_id in read_to_isogene.keys() and model_kmer == 'GGACU':
key = read_to_isogene[read_id] + '_' + pos
print(pos + '\t' + key.split('_')[1] + '\t' + key.split('_')[0] + '\t' + event_mean, end="\n", file=outF2)
data_dict.setdefault(key, []).append(float(event_mean))
outF = open(evalign_stat, "w")
print("Position\tIsoform\tGene_ID\tReads\tEvent_mean\tEvent_median\tSD\tDistance", end="\n", file=outF)
[print(k.split('_')[2] + '\t' + k.split('_')[1] + '\t' + k.split('_')[0] + '\t' + str(len(v)) + '\t' + str(
np.mean(v)) + '\t' + str(np.median(v)) + '\t' + str(np.std(v)) + '\t' + str(np.mean(v) - 123.83), end="\n",
file=outF) for k, v in data_dict.items()]
evalign_file.close()
isoform_file.close()
outF.close()
outF2.close()
dataset = Dataset(name='hsapiens_gene_ensembl', host='http://www.ensembl.org')
conversion = dataset.query(attributes=['ensembl_gene_id', 'external_gene_name'])
conversion.columns = ['Gene_ID', 'Gene_symbol']
for file in [evalign_stat,evalign_df]:
df = pd.read_csv(file, sep='\t')
df_merge = pd.merge(df, conversion, how='inner', on=['Gene_ID'])
df_merge.to_csv(file, header=True, index=False, sep='\t')
|
ab0f4dca8b46039943f31f23f8ad4c3fbff36edd
|
54292bb222c6525217458e92ddacfc4e2635b83e
|
/tools/python/allcheck.py
|
e0a5b43c0890b7a9a73c358cba4882ae7a10eb62
|
[
"Apache-2.0"
] |
permissive
|
daviddrysdale/python-phonenumbers
|
0d69b48033d1464c0a6c358274062f1db2ee8c4a
|
2f06ef6db2ca83f3856fbb8019a0c665f5971b13
|
refs/heads/dev
| 2023-08-31T09:37:20.570690
| 2023-08-22T05:18:22
| 2023-08-22T05:18:22
| 1,643,611
| 2,944
| 406
|
Apache-2.0
| 2023-08-08T06:49:07
| 2011-04-21T03:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,933
|
py
|
allcheck.py
|
#!/usr/bin/env python
import sys
import re
import glob
# Use the local code in preference to any pre-installed version
sys.path.insert(0, '../../python')
import phonenumbers
import phonenumbers.geocoder
import phonenumbers.carrier
import phonenumbers.timezone
from phonenumbers.util import prnt
# Manually grep for top-level identifiers
INTERNAL_FILES = ['../../python/phonenumbers/util.py',
'../../python/phonenumbers/re_util.py',
'../../python/phonenumbers/unicode_util.py']
CLASS_RE = re.compile(r"^class +([A-Za-z][_A-Za-z0-9]+)[ \(:]")
FUNCTION_RE = re.compile(r"^def +([A-Za-z][_A-Za-z0-9]+)[ \(]")
CONSTANT_RE = re.compile("^([A-Z][_A-Z0-9]+) *= *")
grepped_all = set()
for filename in glob.glob('../../python/phonenumbers/*.py'):
if filename in INTERNAL_FILES:
continue
with open(filename, "r") as infile:
for line in infile:
m = CLASS_RE.match(line)
if m:
grepped_all.add(m.group(1))
m = FUNCTION_RE.match(line)
if m:
grepped_all.add(m.group(1))
m = CONSTANT_RE.match(line)
if m:
grepped_all.add(m.group(1))
# Pull in the declared identifiers
code_all = (set(phonenumbers.__all__) |
set(phonenumbers.geocoder.__all__) |
set(phonenumbers.carrier.__all__) |
set(phonenumbers.timezone.__all__))
# Compare
code_not_grepped = (code_all - grepped_all)
grepped_not_code = (grepped_all - code_all)
if len(code_not_grepped) > 0:
prnt("Found the following in __all__ but not in grepped code:", file=sys.stderr)
for identifier in code_not_grepped:
prnt(" %s" % identifier, file=sys.stderr)
if len(grepped_not_code) > 0:
prnt("Found the following in grepped code but not in __all__:", file=sys.stderr)
for identifier in grepped_not_code:
prnt(" %s" % identifier, file=sys.stderr)
|
92a2dd073ca282268bfad5217aa9b9d19b4a1279
|
b04cc98a746d1df457183bc14908094a8be00ba1
|
/tests/test_client_connect.py
|
abf637ddf26925f15e7abc6df494f84530618037
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleSlim
|
a3bcaef0c92016b7f6946d58787f87c7db8ff3f8
|
bb02b103a89a09635941bc0bbbd38506d7412468
|
refs/heads/develop
| 2023-08-31T01:47:27.824722
| 2023-08-25T08:06:08
| 2023-08-25T08:06:08
| 228,290,594
| 1,534
| 402
|
Apache-2.0
| 2023-08-29T09:37:55
| 2019-12-16T02:56:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,735
|
py
|
test_client_connect.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import paddle
sys.path.append("../")
import os
import time
import signal
import unittest
from static_case import StaticCase
from paddleslim.nas import SANAS
from paddleslim.common.controller_client import ControllerClient
import numpy as np
from multiprocessing import Process
import socket
def start_client(configs, addr, port):
client_sanas = SANAS(
configs=configs,
server_addr=(addr, port),
save_checkpoint=None,
is_server=False)
for _ in range(2):
arch = client_sanas.next_archs()[0]
time.sleep(1)
client_sanas.reward(0.1)
def start_server(configs, port):
server_sanas = SANAS(
configs=configs, server_addr=("", port), save_checkpoint=None)
server_sanas.next_archs()[0]
return server_sanas
class TestClientConnect(StaticCase):
def setUp(self):
self.configs = [('MobileNetV2BlockSpace', {'block_mask': [0]})]
self.port = np.random.randint(8337, 8773)
self.addr = socket.gethostbyname(socket.gethostname())
def test_client_start_first(self):
p = Process(
target=start_client, args=(self.configs, self.addr, self.port))
p.start()
start_server(self.configs, self.port)
class TestClientConnectCase1(StaticCase):
def setUp(self):
self.configs = [('MobileNetV2BlockSpace', {'block_mask': [0]})]
self.port = np.random.randint(8337, 8773)
self.addr = socket.gethostbyname(socket.gethostname())
def test_client_start_first(self):
p = Process(
target=start_client, args=(self.configs, self.addr, self.port))
p.start()
time.sleep(60)
server_sanas = start_server(self.configs, self.port)
os.kill(os.getpid(), 0)
class TestClientConnectCase2(StaticCase):
def setUp(self):
self.port = np.random.randint(8337, 8773)
self.addr = socket.gethostbyname(socket.gethostname())
def test_request_current_info(self):
client = ControllerClient(self.addr, self.port)
client.request_current_info()
if __name__ == '__main__':
unittest.main()
|
25a26af1154199180e550832123e68f39496977b
|
d6c306567c2293c9e093edb1823e731db005bb04
|
/deslib/dcs/lca.py
|
5e0d90f3562a35b97d4b7e9d78ba69f8a1f26eda
|
[
"BSD-3-Clause"
] |
permissive
|
scikit-learn-contrib/DESlib
|
96036c36842d0f2194a0e4a9dad321ae2d4cf856
|
90c2eadc33333469b4826ec8ac4118f3e3b713f6
|
refs/heads/master
| 2023-08-10T20:54:48.013884
| 2023-06-18T21:56:30
| 2023-06-18T21:56:30
| 113,624,260
| 370
| 68
|
BSD-3-Clause
| 2023-06-18T21:56:31
| 2017-12-08T22:49:49
|
Python
|
UTF-8
|
Python
| false
| false
| 8,422
|
py
|
lca.py
|
# coding=utf-8
# Author: Rafael Menelau Oliveira e Cruz <rafaelmenelau@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from deslib.dcs.base import BaseDCS
class LCA(BaseDCS):
"""Local Class Accuracy (LCA).
Evaluates the competence level of each individual classifiers and
select the most competent one to predict the label of each test sample.
The competence of each base classifier is calculated based on its local
accuracy with respect to some output class. Consider a classifier
:math:`c_{i}` that assigns a test sample to class :math:`w_{l}`. The
competence level of :math:`c_{i}` is estimated by the percentage of the
local training samples assigned to class :math:`w_{l}` that it predicts
the correct class label.
The LCA method selects the base classifier presenting the highest
competence level. In a case where more than one base classifier achieves
the same competence level, the one that was evaluated first is selected.
The selection methodology can be modified by changing the hyper-parameter
selection_method.
Parameters
----------
pool_classifiers : list of classifiers (Default = None)
The generated_pool of classifiers trained for the corresponding
classification problem. Each base classifiers should support the method
"predict". If None, then the pool of classifiers is a bagging
classifier.
k : int (Default = 7)
Number of neighbors used to estimate the competence of the base
classifiers.
DFP : Boolean (Default = False)
Determines if the dynamic frienemy pruning is applied.
with_IH : Boolean (Default = False)
Whether the hardness level of the region of competence is used to
decide between using the DS algorithm or the KNN for classification of
a given query sample.
safe_k : int (default = None)
The size of the indecision region.
IH_rate : float (default = 0.3)
Hardness threshold. If the hardness level of the competence region is
lower than the IH_rate the KNN classifier is used. Otherwise, the DS
algorithm is used for classification.
selection_method : String (Default = "best")
Determines which method is used to select the base classifier after
the competences are estimated.
diff_thresh : float (Default = 0.1)
Threshold to measure the difference between the competence level of the
base classifiers for the random and diff selection schemes. If the
difference is lower than the threshold, their performance are
considered equivalent.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
knn_classifier : {'knn', 'faiss', None} (Default = 'knn')
The algorithm used to estimate the region of competence:
- 'knn' : will use :class:`KNeighborsClassifier` from sklearn
:class:`KNNE`.
- 'faiss' : will use Facebook's Faiss similarity search through the
class :class:`FaissKNNClassifier`
- `None` : will use sklearn :class:`KNeighborsClassifier`.
knn_metric : {'minkowski', 'cosine', 'mahalanobis'} (Default = 'minkowski')
The metric used by the k-NN classifier to estimate distances.
- 'minkowski' will use minkowski distance.
- 'cosine' will use the cosine distance.
- 'mahalanobis' will use the mahalonibis distance.
DSEL_perc : float (Default = 0.5)
Percentage of the input data used to fit DSEL.
Note: This parameter is only used if the pool of classifier is None or
unfitted.
n_jobs : int, default=-1
The number of parallel jobs to run. None means 1 unless in
a joblib.parallel_backend context. -1 means using all processors.
Doesn’t affect fit method.
References
----------
Woods, Kevin, W. Philip Kegelmeyer, and Kevin Bowyer. "Combination of
multiple classifiers using local accuracy estimates." IEEE transactions on
pattern analysis and machine intelligence 19.4 (1997): 405-410.
Britto, Alceu S., Robert Sabourin, and Luiz ES Oliveira. "Dynamic selection
of classifiers—a comprehensive review."
Pattern Recognition 47.11 (2014): 3665-3680.
R. M. O. Cruz, R. Sabourin, and G. D. Cavalcanti, “Dynamic classifier
selection: Recent advances and perspectives,”
Information Fusion, vol. 41, pp. 195 – 216, 2018.
"""
def __init__(self, pool_classifiers=None, k=7, DFP=False, with_IH=False,
safe_k=None, IH_rate=0.30, selection_method='best',
diff_thresh=0.1, random_state=None, knn_classifier='knn',
knn_metric='minkowski', DSEL_perc=0.5,
knne=False, n_jobs=-1):
super(LCA, self).__init__(pool_classifiers=pool_classifiers, k=k,
DFP=DFP, with_IH=with_IH, safe_k=safe_k,
IH_rate=IH_rate,
selection_method=selection_method,
diff_thresh=diff_thresh,
random_state=random_state,
knn_classifier=knn_classifier,
knn_metric=knn_metric,
DSEL_perc=DSEL_perc,
knne=knne,
n_jobs=n_jobs)
def estimate_competence(self, competence_region, distances=None,
predictions=None):
"""estimate the competence of each base classifier :math:`c_{i}` for
the classification of the query sample using the local class accuracy
method.
In this algorithm the k-Nearest Neighbors of the test sample are
estimated. Then, the local accuracy of the base classifiers is
estimated by its classification accuracy taking into account only the
samples from the class :math:`w_{l}` in this neighborhood. In this
case, :math:`w_{l}` is the class predicted by the base classifier
:math:`c_{i}`, for the query sample. The competence level estimate is
represented by the following equation:
.. math:: \\delta_{i,j} = \\frac{\\sum_{\\mathbf{x}_{k} \\in
\\omega_{l}}P(\\omega_{l} \\mid \\mathbf{x}_{k},
c_{i} )}{\\sum_{k = 1}^{K}P(\\omega_{l} \\mid
\\mathbf{x}_{k}, c_{i} )}
where :math:`\\delta_{i,j}` represents the competence level of
:math:`c_{i}` for the classification of query.
Parameters
----------
competence_region : array of shape (n_samples, n_neighbors)
Indices of the k nearest neighbors.
distances : array of shape (n_samples, n_neighbors)
Distances from the k nearest neighbors to the query.
predictions : array of shape (n_samples, n_classifiers)
Predictions of the base classifiers for the test examples.
Returns
-------
competences : array of shape (n_samples, n_classifiers)
Competence level estimated for each base classifier and test
example.
"""
predictions = np.atleast_2d(predictions)
# Expanding the dimensions of the predictions and target arrays in
# order to compare both.
predictions_3d = np.expand_dims(predictions, axis=1)
target_3d = np.expand_dims(self.DSEL_target_[competence_region],
axis=2)
# Create a mask to remove the neighbors belonging to a different class
# than the predicted by the base classifier
mask = (predictions_3d != target_3d)
masked_preprocessed = np.ma.MaskedArray(
self.DSEL_processed_[competence_region, :], mask=mask)
competences_masked = np.mean(masked_preprocessed, axis=1)
# Fill 0 to the masked values in the resulting array (when no neighbors
# belongs to the class predicted by
# the corresponding base classifier)
competences = np.ma.filled(competences_masked, 0)
return competences
|
95c39343605d27fce025316b71b874fe166656d3
|
a9fdace9236af6c73133fd8dddb80843697efc7d
|
/catalyst/callbacks/mixup.py
|
94e084bac4963e4d3a2f0e56926f254180f29a47
|
[
"Apache-2.0"
] |
permissive
|
catalyst-team/catalyst
|
026c38f26dad471cd77347adbc13423b156a5d8b
|
e99f90655d0efcf22559a46e928f0f98c9807ebf
|
refs/heads/master
| 2023-08-26T23:12:49.277005
| 2022-04-29T04:19:24
| 2022-04-29T04:19:24
| 145,385,156
| 3,038
| 487
|
Apache-2.0
| 2023-08-12T03:40:14
| 2018-08-20T07:56:13
|
Python
|
UTF-8
|
Python
| false
| false
| 5,899
|
py
|
mixup.py
|
from typing import List, Union
from catalyst.core.callback import Callback, CallbackOrder
from catalyst.core.runner import IRunner
from catalyst.utils.torch import mixup_batch
class MixupCallback(Callback):
"""
Callback to do mixup augmentation. More details about mixin can be found in the paper
`mixup: Beyond Empirical Risk Minimization`: https://arxiv.org/abs/1710.09412 .
Args:
keys: batch keys to which you want to apply augmentation
alpha: beta distribution a=b parameters. Must be >=0.
The more alpha closer to zero the less effect of the mixup.
mode: mode determines the method of use. Must be in ["replace", "add"].
If "replace" then replaces the batch with a mixed one,
while the batch size is not changed.
If "add", concatenates mixed examples to the current ones,
the batch size increases by 2 times.
on_train_only: apply to train only.
As the mixup use the proxy inputs, the targets are also proxy.
We are not interested in them, are we? So, if ``on_train_only``
is ``True`` use a standard output/metric for validation.
Examples:
.. code-block:: python
from typing import Any, Dict
import os
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader
from catalyst import dl
from catalyst.callbacks import MixupCallback
from catalyst.contrib.datasets import MNIST
class SimpleNet(nn.Module):
def __init__(self, in_channels, in_hw, out_features):
super().__init__()
self.encoder = nn.Sequential(nn.Conv2d(in_channels,
in_channels, 3, 1, 1), nn.Tanh())
self.clf = nn.Linear(in_channels * in_hw * in_hw, out_features)
def forward(self, x):
features = self.encoder(x)
features = features.view(features.size(0), -1)
logits = self.clf(features)
return logits
class SimpleDataset(torch.utils.data.Dataset):
def __init__(self, train: bool = False):
self.mnist = MNIST(os.getcwd(), train=train)
def __len__(self) -> int:
return len(self.mnist)
def __getitem__(self, idx: int) -> Dict[str, Any]:
x, y = self.mnist.__getitem__(idx)
y_one_hot = np.zeros(10)
y_one_hot[y] = 1
return {"image": x,
"clf_targets": y,
"clf_targets_one_hot": torch.Tensor(y_one_hot)}
model = SimpleNet(1, 28, 10)
criterion = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.02)
loaders = {
"train": DataLoader(SimpleDataset(train=True), batch_size=32),
"valid": DataLoader(SimpleDataset(train=False), batch_size=32),
}
class CustomRunner(dl.Runner):
def handle_batch(self, batch):
image = batch["image"]
clf_logits = self.model(image)
self.batch["clf_logits"] = clf_logits
runner = CustomRunner()
runner.train(
loaders=loaders,
model=model,
criterion=criterion,
optimizer=optimizer,
logdir="./logdir14",
num_epochs=2,
verbose=True,
valid_loader="valid",
valid_metric="loss",
minimize_valid_metric=True,
callbacks={
"mixup": MixupCallback(keys=["image", "clf_targets_one_hot"]),
"criterion": dl.CriterionCallback(
metric_key="loss",
input_key="clf_logits",
target_key="clf_targets_one_hot"
),
"backward": dl.BackwardCallback(metric_key="loss"),
"optimizer": dl.OptimizerCallback(metric_key="loss"),
"classification": dl.ControlFlowCallback(
dl.PrecisionRecallF1SupportCallback(
input_key="clf_logits", target_key="clf_targets", num_classes=10
),
ignore_loaders="train",
),
},
)
.. By running::
With running this callback, many metrics (accuracy, etc) become undefined, so
use ControlFlowCallback in order to evaluate model(see example)
"""
def __init__(
self, keys: Union[str, List[str]], alpha=0.2, mode="replace", on_train_only=True
):
"""Init."""
assert isinstance(keys, (str, list, tuple)), (
f"keys must be str of list[str]," f" get: {type(keys)}"
)
assert alpha >= 0, "alpha must be>=0"
assert mode in (
"add",
"replace",
), f"mode must be in 'add', 'replace', get: {mode}"
super().__init__(order=CallbackOrder.Internal)
if isinstance(keys, str):
keys = [keys]
self.keys = keys
self.on_train_only = on_train_only
self.alpha = alpha
self.mode = mode
self._is_required = True
def on_loader_start(self, runner: "IRunner") -> None:
"""Event handler."""
self._is_required = not self.on_train_only or runner.is_train_loader
def on_batch_start(self, runner: "IRunner") -> None:
"""Event handler."""
if self._is_required:
mixuped_batch = [runner.batch[key] for key in self.keys]
mixuped_batch = mixup_batch(mixuped_batch, alpha=self.alpha, mode=self.mode)
for key, mixuped_value in zip(self.keys, mixuped_batch):
runner.batch[key] = mixuped_value
__all__ = ["MixupCallback"]
|
3966791c4b99f6f88ee09f0c7418cf435ac6e8a3
|
84ecf73288879a396d1ff67e5779e07fadde80fb
|
/projects/mmdet3d_plugin/uniad/dense_heads/motion_head_plugin/__init__.py
|
79b6df5daded235da9dfb8d529641c932765475a
|
[
"Apache-2.0"
] |
permissive
|
OpenDriveLab/UniAD
|
1ef9d2203ad35bbf8e1eb1a305d409817d90b888
|
2f38ff1357d3956af11c5609d5275db56c559c20
|
refs/heads/main
| 2023-08-09T05:28:26.497452
| 2023-08-07T07:27:30
| 2023-08-07T07:27:30
| 575,306,900
| 2,156
| 233
|
Apache-2.0
| 2023-08-29T02:44:05
| 2022-12-07T08:05:49
|
Python
|
UTF-8
|
Python
| false
| false
| 225
|
py
|
__init__.py
|
from .motion_optimization import MotionNonlinearSmoother
from .modules import MotionTransformerDecoder
from .motion_deformable_attn import MotionTransformerAttentionLayer, MotionDeformableAttention
from .motion_utils import *
|
096e88c430827e80ba3d7f7ae95753dc5a3f15c7
|
1b32a80362ce9c2d8f0eb1948637c6599d85aa99
|
/torchlayers/_dev_utils/__init__.py
|
f3634fa1f7a4561469db267d26f3a48142ad0a1a
|
[
"MIT"
] |
permissive
|
szymonmaszke/torchlayers
|
4492c628a49f4db30a76a17b5d38591a85109964
|
1eff7c55fdb3733e0acc180be79354ed35e4167c
|
refs/heads/master
| 2022-07-06T18:02:48.567112
| 2021-05-25T13:58:50
| 2022-06-13T19:09:28
| 201,987,932
| 599
| 47
|
MIT
| 2022-06-13T19:09:29
| 2019-08-12T18:35:56
|
Python
|
UTF-8
|
Python
| false
| false
| 20
|
py
|
__init__.py
|
from . import infer
|
94f6b226ed12b15947bf5ba2c614e0f4b7d29c0f
|
bdaa910baf85fba41e44849d4037d9940e03e4f6
|
/popmon/analysis/profiling/hist_profiler.py
|
b067bdd85555b9af240899a7e0781a98918ec591
|
[
"LicenseRef-scancode-free-unknown",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
ing-bank/popmon
|
428693596663405e2e3a290d4ae6afa96762b088
|
ac79d212a519368d01525950142e0a282f5287c3
|
refs/heads/master
| 2023-08-08T03:06:42.714926
| 2023-07-18T10:21:06
| 2023-07-18T10:24:07
| 258,180,016
| 463
| 38
|
MIT
| 2023-07-18T10:05:37
| 2020-04-23T11:21:14
|
Python
|
UTF-8
|
Python
| false
| false
| 8,254
|
py
|
hist_profiler.py
|
# Copyright (c) 2023 ING Analytics Wholesale Banking
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import pandas as pd
from popmon.base import Module
from popmon.hist.hist_utils import get_bin_centers, is_numeric, is_timestamp
class HistProfiler(Module):
"""Generate profiles of histograms using default statistical functions.
Profiles are:
- 1 dim histograms, all: 'count', 'filled', 'distinct', 'nan', 'most_probable_value', 'overflow', 'underflow'.
- 1 dim histograms, numeric: mean, std, min, max, p01, p05, p16, p50, p84, p95, p99.
- 1 dim histograms, boolean: fraction of true entries.
- 2 dim histograms: count, phi_k correlation constant, p-value and Z-score of contingency test.
- n dim histograms: count (n >= 3)
:param str read_key: key of the input test data to read from the datastore
:param str store_key: key of the output data to store in the datastore
:param list features: features of data-frames to pick up from input data (optional)
:param list ignore_features: features to ignore (optional)
:param list var_timestamp: list of timestamp variables (optional)
:param str hist_col: key for histogram in split dictionary
:param str index_col: key for index in split dictionary
:param dict stats_functions: function_name, function(bin_labels, bin_counts) dictionary
"""
_input_keys = ("read_key",)
_output_keys = ("store_key",)
def __init__(
self,
read_key,
store_key,
features=None,
ignore_features=None,
var_timestamp=None,
hist_col: str = "histogram",
index_col: str = "date",
stats_functions=None,
) -> None:
super().__init__()
self.read_key = read_key
self.store_key = store_key
self.features = features or []
self.ignore_features = ignore_features or []
self.var_timestamp = var_timestamp or []
self.hist_col = hist_col
self.index_col = index_col
if stats_functions is not None:
raise NotImplementedError
def _profile_1d_histogram(self, name, hist):
from popmon.analysis import Profiles
# preprocessing value counts and TS
is_num = is_numeric(hist)
is_ts = is_timestamp(hist) or name in self.var_timestamp
bin_labels, values = get_bin_centers(hist)
bin_counts = np.array([v.entries for v in values])
if len(bin_counts) == 0:
self.logger.warning(f'Histogram "{name}" is empty; skipping.')
return {}
if is_ts:
to_timestamp = np.vectorize(lambda x: pd.to_datetime(x).value)
bin_labels = to_timestamp(bin_labels)
otype = "num" if is_num else "cat"
# calc 1d-histogram statistics
profile = {}
if otype == "cat":
args = [bin_labels, bin_counts]
else:
bin_width = hist.bin_width()
args = [bin_labels, bin_counts, bin_width]
profile.update(Profiles.run(args, dim=1, htype=otype))
args = [bin_labels, bin_counts]
profile.update(Profiles.run(args, dim=1, htype="all"))
# difference between htype=None and htype="all" are arguments (bin labels vs hist)
profile.update(Profiles.run([hist], dim=1, htype=None))
profile.update(Profiles.run([hist], dim=-1, htype=None))
# postprocessing TS
if is_ts:
profile = {
k: pd.Timestamp(v) if k != "std" else pd.Timedelta(v)
for k, v in profile.items()
}
return profile
def _profile_nd_histogram(self, name, hist, dim):
from popmon.analysis import Profiles
if hist.n_dim < dim:
self.logger.warning(
f"Histogram {name} has {hist.n_dim} dimensions (<{dim}); cannot profile. Returning empty."
)
return {}
# calc nd-histogram statistics
profile = Profiles.run([hist], dim=dim, htype=None)
profile.update(Profiles.run([hist], dim=dim, htype="all"))
profile.update(Profiles.run([hist], dim=dim, htype="num"))
profile.update(Profiles.run([hist], dim=dim, htype="cat"))
profile.update(Profiles.run([hist], dim=-1, htype=None))
return profile
def _profile_hist(self, split, hist_name):
from popmon.analysis.profiling import Profiles
if len(split) == 0:
self.logger.error(f'Split histograms dict "{hist_name}" is empty. Return.')
return []
hist0 = split[0][self.hist_col]
dimension = hist0.n_dim
is_num = is_numeric(hist0)
htype = "num" if is_num else "cat"
# these are the profiled quantities we will monitor
expected_fields = (
Profiles.get_keys_by_dim_and_htype(dim=dimension, htype=htype)
+ Profiles.get_keys_by_dim_and_htype(dim=dimension, htype="all")
+ Profiles.get_keys_by_dim_and_htype(dim=dimension, htype=None)
)
# profiles regardless of dim and htype (e.g. count)
expected_fields += Profiles.get_keys_by_dim_and_htype(dim=None, htype=None)
# profiles regardless of dim
expected_fields += Profiles.get_keys_by_dim_and_htype(dim=-1, htype=htype)
expected_fields += Profiles.get_keys_by_dim_and_htype(dim=-1, htype="all")
expected_fields += Profiles.get_keys_by_dim_and_htype(dim=-1, htype=None)
expected_fields += [self.index_col, self.hist_col]
# now loop over split-axis, e.g. time index, and profile each sub-hist x:y
profile_list = []
for hist_dict in split:
index, hist = hist_dict[self.index_col], hist_dict[self.hist_col]
profile = {self.index_col: index, self.hist_col: hist}
if dimension == 1:
profile.update(self._profile_1d_histogram(hist_name, hist))
else:
profile.update(
self._profile_nd_histogram(hist_name, hist, dim=dimension)
)
if sorted(profile.keys()) != sorted(expected_fields):
self.logger.error(
f'Could not extract full profile for sub-hist "{hist_name} {index}".'
f"Differences: {set(profile.keys()).symmetric_difference(set(expected_fields))}. Skipping."
)
else:
profile_list.append(profile)
return profile_list
def transform(self, data: dict) -> dict:
self.logger.info(
f'Profiling histograms "{self.read_key}" as "{self.store_key}"'
)
features = self.get_features(list(data.keys()))
profiled = {}
for feature in features[:]:
df = self.get_datastore_object(data, feature, dtype=pd.DataFrame)
hist_split_list = df.reset_index().to_dict("records")
self.logger.debug(f'Profiling histogram "{feature}".')
profile_list = self._profile_hist(split=hist_split_list, hist_name=feature)
if len(profile_list) > 0:
profiled[feature] = pd.DataFrame(profile_list).set_index(
[self.index_col]
)
return profiled
|
6b5dd45bb0b6a2137dc60915f8e0c5eeec903854
|
a5653f0dd8e382c6df2ca0e83d324c6e1d94c2d0
|
/AppiumLibrary/locators/__init__.py
|
6542a3733792e78f4b51395bb62e15ad0cf160c7
|
[
"Apache-2.0"
] |
permissive
|
serhatbolsu/robotframework-appiumlibrary
|
b85c97a0fd379cf9a3eb86c205d539c37c783c8b
|
5f278ba0a5dcdad78e6b979ce50c7b7eb18f1648
|
refs/heads/master
| 2023-09-04T00:11:55.612669
| 2022-12-27T06:00:06
| 2022-12-27T06:00:06
| 20,088,879
| 304
| 232
|
Apache-2.0
| 2023-07-12T10:13:23
| 2014-05-23T06:18:58
|
Python
|
UTF-8
|
Python
| false
| false
| 109
|
py
|
__init__.py
|
# -*- coding: utf-8 -*-
from .elementfinder import ElementFinder
__all__ = [
"ElementFinder",
]
|
07e7ade9bc807d7ec9b34eeae858cf1455e6896c
|
cb4f118412a55c52d720bc79e4074606622920ac
|
/arcade/experimental/geo_culling_check.py
|
b7d9fabc9282d9095d0f87423e473f88fcc23a43
|
[
"MIT"
] |
permissive
|
pythonarcade/arcade
|
3e536306f0c44f911de149b58958d8b609ffad4b
|
908664efc256697d3098a347f63d217d97841782
|
refs/heads/development
| 2023-08-29T02:53:01.599145
| 2023-08-26T16:54:34
| 2023-08-26T16:54:34
| 49,003,082
| 786
| 215
|
NOASSERTION
| 2023-09-12T18:38:54
| 2016-01-04T14:46:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,673
|
py
|
geo_culling_check.py
|
"""
An experiment trying to bug out the geometry shader sprite culling.
If the culling algorithm is wrong sprites can disappear before they
leave the screen.
Simply run the program and move draw the sprites around using the mouse.
"""
from __future__ import annotations
from arcade.sprite import Sprite
import PIL
import arcade
class GeoCullingTest(arcade.Window):
def __init__(self):
super().__init__(800, 400, "Cull test", resizable=True)
self.proj = self.ctx.projection_2d
self.texture = arcade.Texture(
PIL.Image.new("RGBA", (2048, 2), (255, 255, 255, 255)),
hash="weird_texture",
)
self.spritelist = arcade.SpriteList()
self.spritelist.append(Sprite(
":resources:images/tiles/boxCrate_double.png",
center_x=400, center_y=300, scale=6)
)
for i in range(0, 360, 36):
self.spritelist.append(
arcade.Sprite(self.texture, center_x=400, center_y=300, angle=i)
)
self.spritelist.append(Sprite(":resources:images/items/gold_1.png", center_x=400, center_y=300))
def on_draw(self):
self.clear()
self.ctx.projection_2d = self.proj
self.spritelist.draw()
def on_resize(self, width, height):
super().on_resize(width, height)
self.proj = self.ctx.projection_2d
def on_mouse_drag(self, x: float, y: float, dx: float, dy: float, buttons: int, modifiers: int):
self.proj = (
self.proj[0] - dx,
self.proj[1] - dx,
self.proj[2] - dy,
self.proj[3] - dy,
)
window = GeoCullingTest()
arcade.run()
|
6ddacbcd39e28ef1c85a9d2aab4c47f91a33feb6
|
52a32a93942b7923b7c0c6ca5a4d5930bbba384b
|
/dojo/tools/sonarqube/parser.py
|
d05c70d040b67592b34ba227e4d8ec1355a35c05
|
[
"MIT-open-group",
"GCC-exception-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LGPL-3.0-only",
"GPL-3.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-3.0-or-later",
"IJG",
"Zlib",
"LicenseRef-scancode-proprietary-license",
"PSF-2.0",
"LicenseRef-scancode-python-cwi",
"GPL-2.0-or-later",
"HPND",
"libtiff",
"LGPL-2.1-or-later",
"EPL-2.0",
"GPL-3.0-only",
"MIT",
"BSD-3-Clause-Modification",
"LicenseRef-scancode-public-domain-disclaimer",
"HPND-Markus-Kuhn",
"CC-BY-SA-4.0",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only",
"LicenseRef-scancode-openssl-exception-lgpl3.0plus",
"Libpng",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"BSD-Advertising-Acknowledgement",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"MIT-Modern-Variant",
"ISC",
"GPL-2.0-only",
"LicenseRef-scancode-xfree86-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown",
"BSD-2-Clause"
] |
permissive
|
DefectDojo/django-DefectDojo
|
43bfb1c728451335661dadc741be732a50cd2a12
|
b98093dcb966ffe972f8719337de2209bf3989ec
|
refs/heads/master
| 2023-08-21T13:42:07.238370
| 2023-08-14T18:00:34
| 2023-08-14T18:00:34
| 31,028,375
| 2,719
| 1,666
|
BSD-3-Clause
| 2023-09-14T19:46:49
| 2015-02-19T17:53:47
|
HTML
|
UTF-8
|
Python
| false
| false
| 9,664
|
py
|
parser.py
|
import logging
import re
from django.utils.html import strip_tags
from lxml import etree
from dojo.models import Finding
logger = logging.getLogger(__name__)
class SonarQubeParser(object):
mode = None
def set_mode(self, mode):
self.mode = mode
def get_scan_types(self):
return ["SonarQube Scan", "SonarQube Scan detailed"]
def get_label_for_scan_types(self, scan_type):
return scan_type # no custom label for now
def get_description_for_scan_types(self, scan_type):
if scan_type == "SonarQube Scan":
return "Aggregates findings per cwe, title, description, file_path. SonarQube output file can be imported in HTML format. Generate with https://github.com/soprasteria/sonar-report version >= 1.1.0"
else:
return "Import all findings from sonarqube html report. SonarQube output file can be imported in HTML format. Generate with https://github.com/soprasteria/sonar-report version >= 1.1.0"
def get_findings(self, filename, test):
parser = etree.HTMLParser()
tree = etree.parse(filename, parser)
if self.mode not in [None, "detailed"]:
raise ValueError(
"Internal error: Invalid mode "
+ self.mode
+ ". Expected: one of None, 'detailed'"
)
return self.get_items(tree, test, self.mode)
def get_items(self, tree, test, mode):
# Check that there is at least one vulnerability (the vulnerabilities
# table is absent when no vuln are found)
detailTbody = tree.xpath(
"/html/body/div[contains(@class,'detail')]/table/tbody"
)
dupes = dict()
if len(detailTbody) == 2:
# First is "Detail of the Detected Vulnerabilities" (not present if no vuln)
# Second is "Known Security Rules"
vulnerabilities_table = list(detailTbody[0].iter("tr"))
rules_table = list(detailTbody[1].xpath("tr"))
# iterate over the rules once to get the information we need
rulesDic = dict()
for rule in rules_table:
rule_properties = list(rule.iter("td"))
rule_name = list(rule_properties[0].iter("a"))[0].text
rule_details = list(rule_properties[1].iter("details"))[0]
rulesDic[rule_name] = rule_details
for vuln in vulnerabilities_table:
vuln_properties = list(vuln.iter("td"))
vuln_rule_name = list(vuln_properties[0].iter("a"))[0].text
vuln_severity = self.convert_sonar_severity(
vuln_properties[1].text
)
vuln_file_path = vuln_properties[2].text
vuln_line = vuln_properties[3].text
vuln_title = vuln_properties[4].text
vuln_mitigation = vuln_properties[5].text
vuln_key = vuln_properties[6].text
if vuln_title is None or vuln_mitigation is None:
raise ValueError(
"Parser ValueError: can't find a title or a mitigation for vulnerability of name "
+ vuln_rule_name
)
try:
vuln_details = rulesDic[vuln_rule_name]
vuln_description = self.get_description(vuln_details)
vuln_references = self.get_references(
vuln_rule_name, vuln_details
)
vuln_cwe = self.get_cwe(vuln_references)
except KeyError:
vuln_description = "No description provided"
vuln_references = ""
vuln_cwe = 0
if mode is None:
self.process_result_file_name_aggregated(
test,
dupes,
vuln_title,
vuln_cwe,
vuln_description,
vuln_file_path,
vuln_line,
vuln_severity,
vuln_mitigation,
vuln_references,
)
else:
self.process_result_detailed(
test,
dupes,
vuln_title,
vuln_cwe,
vuln_description,
vuln_file_path,
vuln_line,
vuln_severity,
vuln_mitigation,
vuln_references,
vuln_key,
)
return list(dupes.values())
# Process one vuln from the report for "SonarQube Scan detailed"
# Create the finding and add it into the dupes list
def process_result_detailed(
self,
test,
dupes,
vuln_title,
vuln_cwe,
vuln_description,
vuln_file_path,
vuln_line,
vuln_severity,
vuln_mitigation,
vuln_references,
vuln_key,
):
# vuln_key is the unique id from tool which means that there is
# basically no aggregation except real duplicates
aggregateKeys = "{}{}{}{}{}".format(
vuln_cwe, vuln_title, vuln_description, vuln_file_path, vuln_key
)
find = Finding(
title=vuln_title,
cwe=int(vuln_cwe),
description=vuln_description,
file_path=vuln_file_path,
line=vuln_line,
test=test,
severity=vuln_severity,
mitigation=vuln_mitigation,
references=vuln_references,
false_p=False,
duplicate=False,
out_of_scope=False,
mitigated=None,
impact="No impact provided",
static_finding=True,
dynamic_finding=False,
unique_id_from_tool=vuln_key,
)
dupes[aggregateKeys] = find
# Process one vuln from the report for "SonarQube Scan"
# Create the finding and add it into the dupes list
# For aggregated findings:
# - the description is enriched with each finding line number
# - the mitigation (message) is concatenated with each finding's mitigation value
def process_result_file_name_aggregated(
self,
test,
dupes,
vuln_title,
vuln_cwe,
vuln_description,
vuln_file_path,
vuln_line,
vuln_severity,
vuln_mitigation,
vuln_references,
):
aggregateKeys = "{}{}{}{}".format(
vuln_cwe, vuln_title, vuln_description, vuln_file_path
)
descriptionOneOccurence = "Line: {}".format(vuln_line)
if aggregateKeys not in dupes:
find = Finding(
title=vuln_title,
cwe=int(vuln_cwe),
description=vuln_description
+ "\n\n-----\nOccurences:\n"
+ descriptionOneOccurence,
file_path=vuln_file_path,
# No line number because we have aggregated different
# vulnerabilities that may have different line numbers
test=test,
severity=vuln_severity,
mitigation=vuln_mitigation,
references=vuln_references,
false_p=False,
duplicate=False,
out_of_scope=False,
mitigated=None,
impact="No impact provided",
static_finding=True,
dynamic_finding=False,
nb_occurences=1,
)
dupes[aggregateKeys] = find
else:
# We have already created a finding for this aggregate: updates the
# description, nb_occurences and mitigation (message field in the
# report which may vary for each vuln)
find = dupes[aggregateKeys]
find.description = "{}\n{}".format(
find.description, descriptionOneOccurence
)
find.mitigation = "{}\n______\n{}".format(
find.mitigation, vuln_mitigation
)
find.nb_occurences = find.nb_occurences + 1
def convert_sonar_severity(self, sonar_severity):
sev = sonar_severity.lower()
if sev == "blocker":
return "Critical"
elif sev == "critical":
return "High"
elif sev == "major":
return "Medium"
elif sev == "minor":
return "Low"
else:
return "Info"
def get_description(self, vuln_details):
rule_description = etree.tostring(
vuln_details, pretty_print=True
).decode("utf-8", errors="replace")
rule_description = rule_description.split("<h2>See", 1)[0]
rule_description = (str(rule_description)).replace("<h2>", "**")
rule_description = (str(rule_description)).replace("</h2>", "**")
rule_description = strip_tags(rule_description).strip()
return rule_description
def get_references(self, rule_name, vuln_details):
rule_references = rule_name
for a in vuln_details.iter("a"):
rule_references += "\n" + str(a.text)
return rule_references
def get_cwe(self, vuln_references):
# Match only the first CWE!
cweSearch = re.search("CWE-([0-9]*)", vuln_references, re.IGNORECASE)
if cweSearch:
return cweSearch.group(1)
else:
return 0
|
0051cf3be124d1ee1dc79fe88753aad7716b59dc
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/logilab/logilab/common/__pkginfo__.py
|
55a2cc37e19a2da7369629921f6ffdbb699ae679
|
[
"GPL-2.0-only",
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 1,945
|
py
|
__pkginfo__.py
|
# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""logilab.common packaging information"""
__docformat__ = "restructuredtext en"
import sys
import os
distname = 'logilab-common'
modname = 'common'
subpackage_of = 'logilab'
subpackage_master = True
numversion = (0, 63, 2)
version = '.'.join([str(num) for num in numversion])
license = 'LGPL' # 2.1 or later
description = "collection of low-level Python packages and modules used by Logilab projects"
web = "http://www.logilab.org/project/%s" % distname
mailinglist = "mailto://python-projects@lists.logilab.org"
author = "Logilab"
author_email = "contact@logilab.fr"
from os.path import join
scripts = [join('bin', 'pytest')]
include_dirs = [join('test', 'data')]
install_requires = [
'six >= 1.4.0',
]
test_require = ['pytz']
if sys.version_info < (2, 7):
install_requires.append('unittest2 >= 0.5.1')
if os.name == 'nt':
install_requires.append('colorama')
classifiers = ["Topic :: Utilities",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
]
|
ed08efd6498666c40c1050625bc3400210941436
|
97714d8e2b019d0b68be75ea9aaf1cba374a7ef2
|
/medallion/views/manifest.py
|
3536790427733f984285add1d6f510cd2b9c9a9a
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
oasis-open/cti-taxii-server
|
ae96521d20ca10c27521d9664998ce17bba05b85
|
39e76bf18be5371e9570de7e5f340c3937b69c0d
|
refs/heads/master
| 2023-07-24T04:26:32.451890
| 2023-06-06T18:01:41
| 2023-06-06T18:01:41
| 99,020,025
| 101
| 83
|
BSD-3-Clause
| 2023-07-11T13:35:38
| 2017-08-01T16:08:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,610
|
py
|
manifest.py
|
from flask import Blueprint, Response, current_app, json, request
from . import MEDIA_TYPE_TAXII_V21, validate_version_parameter_in_accept_header
from .. import auth
from .discovery import api_root_exists
from .objects import (
collection_exists, permission_to_read, validate_limit_parameter
)
manifest_bp = Blueprint("manifest", __name__)
@manifest_bp.route("/<string:api_root>/collections/<string:collection_id>/manifest/", methods=["GET"])
@auth.login_required
def get_object_manifest(api_root, collection_id):
"""
Defines TAXII API - Collections:
Get Object Manifests section (5.3) `here <https://docs.oasis-open.org/cti/taxii/v2.1/cs01/taxii-v2.1-cs01.html#_Toc31107537>`__
Args:
api_root (str): the base URL of the API Root
collection_id (str): the `identifier` of the Collection being requested
Returns:
manifest: A Manifest Resource upon successful requests. Additional information
`here <https://docs.oasis-open.org/cti/taxii/v2.1/cs01/taxii-v2.1-cs01.html#_Toc31107538>`__.
"""
validate_version_parameter_in_accept_header()
api_root_exists(api_root)
collection_exists(api_root, collection_id)
permission_to_read(api_root, collection_id)
limit = validate_limit_parameter()
manifests, headers = current_app.medallion_backend.get_object_manifest(
api_root, collection_id, request.args.to_dict(), ("id", "type", "version", "spec_version"), limit
)
return Response(
response=json.dumps(manifests),
status=200,
headers=headers,
mimetype=MEDIA_TYPE_TAXII_V21,
)
|
69dd2e3176a59d4ce068c11633ba6f2d70f452c2
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/WebMirror/management/rss_parser_funcs/feed_parse_extractJeruTzsBlog.py
|
919d771b5c2159c276e184bd2bd44efecccb8e84
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051
| 2023-08-26T16:08:46
| 2023-08-26T16:08:46
| 39,611,770
| 207
| 20
|
BSD-3-Clause
| 2023-09-11T15:48:15
| 2015-07-24T04:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 409
|
py
|
feed_parse_extractJeruTzsBlog.py
|
def extractJeruTzsBlog(item):
"""
JeruTz's Blog
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if 'Vandread' in item['tags'] and 'Extra Stage' in item['tags']:
return buildReleaseMessageWithType(item, "VANDREAD the Extra Stage", vol, chp, frag=frag, postfix=postfix)
return False
|
32b05e6fae1299c8307a9892253d599697a691b3
|
9ed3b16b3da72e4c47a04f2f2e3ef395e9fd9f20
|
/main/xdg-desktop-portal-gnome/template.py
|
0e8618e35693c49267f440ef86e82e45ef782f9a
|
[
"BSD-2-Clause"
] |
permissive
|
chimera-linux/cports
|
fdae59dc25856942be3041e10e3533dbf8f883c3
|
714680161cd719dd047452c95fbb9b447bc23a86
|
refs/heads/master
| 2023-09-03T19:30:40.720670
| 2023-09-03T15:07:40
| 2023-09-03T15:07:40
| 374,000,317
| 118
| 37
|
BSD-2-Clause
| 2023-09-14T20:31:08
| 2021-06-05T02:07:34
|
Python
|
UTF-8
|
Python
| false
| false
| 924
|
py
|
template.py
|
pkgname = "xdg-desktop-portal-gnome"
pkgver = "44.2"
pkgrel = 0
build_style = "meson"
configure_args = ["-Dsystemduserunitdir=/tmp/delete_me"]
hostmakedepends = ["meson", "pkgconf", "gettext", "glib-devel"]
makedepends = [
"xdg-desktop-portal-devel",
"libadwaita-devel",
"gsettings-desktop-schemas-devel",
"gnome-desktop-devel",
]
depends = ["xdg-desktop-portal-gtk", "dbus"]
pkgdesc = "Backend implementation for xdg-desktop-portal for GNOME"
maintainer = "eater <=@eater.me>"
license = "LGPL-2.1-or-later"
url = "https://gitlab.gnome.org/GNOME/xdg-desktop-portal-gnome"
source = f"https://download.gnome.org/sources/xdg-desktop-portal-gnome/{pkgver.split('.')[0]}/xdg-desktop-portal-gnome-{pkgver}.tar.xz"
sha256 = "50ed1aac290303ea0bc5c840a9c1165918546ab4a73a24c52dcaa27072b5ff21"
def post_install(self):
# systemd service destination dir
self.rm(self.destdir / "tmp/delete_me", recursive=True)
|
b4961935559a738faa13ee391629cf919b82b564
|
48d1002394d233cf5932c7ef69300400af79118a
|
/kivy/lib/__init__.py
|
f99c263eb6022d61576501efd192a3606eff55c9
|
[
"LGPL-2.1-only",
"MIT",
"Apache-2.0"
] |
permissive
|
kivy/kivy
|
ba2668bffe4e125fd1c5aace54f671343802850e
|
ca1b918c656f23e401707388f25f4a63d9b8ae7d
|
refs/heads/master
| 2023-09-04T02:27:05.311875
| 2023-08-26T08:00:20
| 2023-08-26T08:00:20
| 1,049,095
| 16,076
| 4,161
|
MIT
| 2023-09-09T07:55:18
| 2010-11-03T20:27:32
|
Python
|
UTF-8
|
Python
| false
| false
| 699
|
py
|
__init__.py
|
'''
External libraries
==================
Kivy comes with other python/C libraries:
* :mod:`~kivy.lib.ddsfile` - used for parsing and saving
`DDS <https://en.wikipedia.org/wiki/DirectDraw_Surface>`_ files.
* :mod:`~kivy.lib.osc` - a modified/optimized version of PyOSC for using
the `Open Sound Control <https://en.wikipedia.org/wiki/Open_Sound_Control>`_
protocol.
* :mod:`~kivy.lib.mtdev` - provides support for the
`Kernel multi-touch transformation library <https://launchpad.net/mtdev>`_.
.. warning::
Even though Kivy comes with these external libraries, we do not provide any
support for them and they might change in the future.
Don't rely on them in your code.
'''
|
cd5f690c4eb27f70357fe68dddd0af9e4174fdd7
|
72dcf0dc76ded35dfd7fa0561f26c4b165e6ed83
|
/vpython/qtbrowser.py
|
5a293b5e78c991dc9a8baba0868f29789618b1ee
|
[
"MIT"
] |
permissive
|
vpython/vpython-jupyter
|
69e6029b461d6eadb36670545e35a8719a5d033e
|
55aab58357a4ecbd1a6daced50bf548b859d3603
|
refs/heads/master
| 2023-06-23T01:55:38.455580
| 2023-06-08T23:22:32
| 2023-06-08T23:22:32
| 187,905,226
| 127
| 64
|
MIT
| 2023-06-08T23:22:33
| 2019-05-21T20:01:34
|
Python
|
UTF-8
|
Python
| false
| false
| 579
|
py
|
qtbrowser.py
|
import sys
import PyQt5.QtCore
import PyQt5.QtWebEngineWidgets
from PyQt5.QtWidgets import QApplication
if len(sys.argv) > 1:
if sys.argv[1]:
app = QApplication(sys.argv)
web = PyQt5.QtWebEngineWidgets.QWebEngineView()
web.load(PyQt5.QtCore.QUrl(sys.argv[1]))
web.show()
sys.exit(app.exec_())
else:
print("Please give a URL as the first command-line argument "
"when running the program.")
else:
print("Please give a URL as the first command-line argument "
"when running the program.")
|
248202decb18c87d9246a582795018b7fc3b3d98
|
12fa6ac5fb9472acbe2eb1871d3bfaa893940335
|
/intermediate_source/spatial_transformer_tutorial.py
|
49b6b0f0a2b60a74bcf25c318e919ddef9b30956
|
[
"BSD-3-Clause"
] |
permissive
|
pytorch/tutorials
|
e6670dc301fd48edbca5a554df2af44e016712c7
|
32d834139b8627eeacb5fb2862be9f095fcb0b52
|
refs/heads/main
| 2023-08-31T19:27:17.448171
| 2023-08-28T15:45:25
| 2023-08-28T15:45:25
| 69,709,572
| 7,919
| 4,731
|
BSD-3-Clause
| 2023-09-14T17:25:49
| 2016-09-30T23:48:36
|
Python
|
UTF-8
|
Python
| false
| false
| 8,560
|
py
|
spatial_transformer_tutorial.py
|
# -*- coding: utf-8 -*-
"""
Spatial Transformer Networks Tutorial
=====================================
**Author**: `Ghassen HAMROUNI <https://github.com/GHamrouni>`_
.. figure:: /_static/img/stn/FSeq.png
In this tutorial, you will learn how to augment your network using
a visual attention mechanism called spatial transformer
networks. You can read more about the spatial transformer
networks in the `DeepMind paper <https://arxiv.org/abs/1506.02025>`__
Spatial transformer networks are a generalization of differentiable
attention to any spatial transformation. Spatial transformer networks
(STN for short) allow a neural network to learn how to perform spatial
transformations on the input image in order to enhance the geometric
invariance of the model.
For example, it can crop a region of interest, scale and correct
the orientation of an image. It can be a useful mechanism because CNNs
are not invariant to rotation and scale and more general affine
transformations.
One of the best things about STN is the ability to simply plug it into
any existing CNN with very little modification.
"""
# License: BSD
# Author: Ghassen Hamrouni
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import numpy as np
plt.ion() # interactive mode
######################################################################
# Loading the data
# ----------------
#
# In this post we experiment with the classic MNIST dataset. Using a
# standard convolutional network augmented with a spatial transformer
# network.
from six.moves import urllib
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Training dataset
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(root='.', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])), batch_size=64, shuffle=True, num_workers=4)
# Test dataset
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(root='.', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])), batch_size=64, shuffle=True, num_workers=4)
######################################################################
# Depicting spatial transformer networks
# --------------------------------------
#
# Spatial transformer networks boils down to three main components :
#
# - The localization network is a regular CNN which regresses the
# transformation parameters. The transformation is never learned
# explicitly from this dataset, instead the network learns automatically
# the spatial transformations that enhances the global accuracy.
# - The grid generator generates a grid of coordinates in the input
# image corresponding to each pixel from the output image.
# - The sampler uses the parameters of the transformation and applies
# it to the input image.
#
# .. figure:: /_static/img/stn/stn-arch.png
#
# .. Note::
# We need the latest version of PyTorch that contains
# affine_grid and grid_sample modules.
#
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
# Spatial transformer localization-network
self.localization = nn.Sequential(
nn.Conv2d(1, 8, kernel_size=7),
nn.MaxPool2d(2, stride=2),
nn.ReLU(True),
nn.Conv2d(8, 10, kernel_size=5),
nn.MaxPool2d(2, stride=2),
nn.ReLU(True)
)
# Regressor for the 3 * 2 affine matrix
self.fc_loc = nn.Sequential(
nn.Linear(10 * 3 * 3, 32),
nn.ReLU(True),
nn.Linear(32, 3 * 2)
)
# Initialize the weights/bias with identity transformation
self.fc_loc[2].weight.data.zero_()
self.fc_loc[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))
# Spatial transformer network forward function
def stn(self, x):
xs = self.localization(x)
xs = xs.view(-1, 10 * 3 * 3)
theta = self.fc_loc(xs)
theta = theta.view(-1, 2, 3)
grid = F.affine_grid(theta, x.size())
x = F.grid_sample(x, grid)
return x
def forward(self, x):
# transform the input
x = self.stn(x)
# Perform the usual forward pass
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
model = Net().to(device)
######################################################################
# Training the model
# ------------------
#
# Now, let's use the SGD algorithm to train the model. The network is
# learning the classification task in a supervised way. In the same time
# the model is learning STN automatically in an end-to-end fashion.
optimizer = optim.SGD(model.parameters(), lr=0.01)
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 500 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
#
# A simple test procedure to measure the STN performances on MNIST.
#
def test():
with torch.no_grad():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# sum up batch loss
test_loss += F.nll_loss(output, target, size_average=False).item()
# get the index of the max log-probability
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'
.format(test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
######################################################################
# Visualizing the STN results
# ---------------------------
#
# Now, we will inspect the results of our learned visual attention
# mechanism.
#
# We define a small helper function in order to visualize the
# transformations while training.
def convert_image_np(inp):
"""Convert a Tensor to numpy image."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
return inp
# We want to visualize the output of the spatial transformers layer
# after the training, we visualize a batch of input images and
# the corresponding transformed batch using STN.
def visualize_stn():
with torch.no_grad():
# Get a batch of training data
data = next(iter(test_loader))[0].to(device)
input_tensor = data.cpu()
transformed_input_tensor = model.stn(data).cpu()
in_grid = convert_image_np(
torchvision.utils.make_grid(input_tensor))
out_grid = convert_image_np(
torchvision.utils.make_grid(transformed_input_tensor))
# Plot the results side-by-side
f, axarr = plt.subplots(1, 2)
axarr[0].imshow(in_grid)
axarr[0].set_title('Dataset Images')
axarr[1].imshow(out_grid)
axarr[1].set_title('Transformed Images')
for epoch in range(1, 20 + 1):
train(epoch)
test()
# Visualize the STN transformation on some input batch
visualize_stn()
plt.ioff()
plt.show()
|
eea70bc887ea6241656ff760c5123faa58e12adf
|
f411bc79eabd7ed1ef6694ca14860a9cce78ac2e
|
/src/mstodo/handlers/due.py
|
edf196c1fde0d42a42f3b907d1a4b353d02e4688
|
[
"MIT"
] |
permissive
|
johandebeurs/alfred-mstodo-workflow
|
f9c1056a0902d6e219096f2156a39687fc483023
|
80aa4e0fdd54977abbd0278b221ddfdf94c9ddb7
|
refs/heads/master
| 2023-07-09T20:27:42.692927
| 2023-06-08T15:36:33
| 2023-06-08T15:36:33
| 247,481,243
| 125
| 5
|
MIT
| 2023-06-08T15:42:45
| 2020-03-15T14:21:58
|
Python
|
UTF-8
|
Python
| false
| false
| 4,598
|
py
|
due.py
|
# encoding: utf-8
from datetime import datetime, timedelta
import logging
from peewee import JOIN, OperationalError
from mstodo import icons
from mstodo.models.taskfolder import TaskFolder
from mstodo.models.preferences import Preferences
from mstodo.models.task import Task
from mstodo.sync import background_sync, background_sync_if_necessary
from mstodo.util import relaunch_alfred, wf_wrapper
log = logging.getLogger('mstodo')
_due_orders = (
{
'due_order': ['order', 'due_date', 'TaskFolder.id'],
'title': 'Most overdue within each folder',
'subtitle': 'Sort tasks by increasing due date within folders (Default)'
},
{
'due_order': ['order', '-due_date', 'TaskFolder.id'],
'title': 'Most recently due within each folder',
'subtitle': 'Sort tasks by decreasing due date within folders'
},
{
'due_order': ['order', 'due_date'],
'title': 'Most overdue at the top',
'subtitle': 'All tasks sorted by increasing due date'
},
{
'due_order': ['order', '-due_date'],
'title': 'Most recently due at the top',
'subtitle': 'All tasks sorted by decreasing due date'
}
)
def filter(args):
wf = wf_wrapper()
prefs = Preferences.current_prefs()
command = args[1] if len(args) > 1 else None
# Show sort options
if command == 'sort':
for i, order_info in enumerate(_due_orders):
wf.add_item(order_info['title'], order_info['subtitle'], arg='-due sort %d' % (i + 1), valid=True,
icon=icons.RADIO_SELECTED if order_info['due_order'] == prefs.due_order else icons.RADIO)
wf.add_item('Highlight skipped recurring tasks',
'Hoists recurring tasks that have been missed multiple times over to the top',
arg='-due sort toggle-skipped', valid=True,
icon=icons.CHECKBOX_SELECTED if prefs.hoist_skipped_tasks else icons.CHECKBOX)
wf.add_item('Back', autocomplete='-due ', icon=icons.BACK)
return
background_sync_if_necessary()
conditions = True
# Build task title query based on the args
for arg in args[1:]:
if len(arg) > 1:
conditions = conditions & (Task.title.contains(arg) | TaskFolder.title.contains(arg))
if conditions is None:
conditions = True
tasks = Task.select().join(TaskFolder).where(
(Task.status != 'completed') &
(Task.dueDateTime < datetime.now() + timedelta(days=1)) &
Task.list.is_null(False) &
conditions
)
# Sort the tasks according to user preference
for key in prefs.due_order:
order = 'asc'
field = None
if key[0] == '-':
order = 'desc'
key = key[1:]
if key == 'due_date':
field = Task.dueDateTime
elif key == 'taskfolder.id':
field = TaskFolder.id
elif key == 'order':
field = Task.lastModifiedDateTime
if field:
if order == 'asc':
tasks = tasks.order_by(field.asc())
else:
tasks = tasks.order_by(field.desc())
try:
if prefs.hoist_skipped_tasks:
log.debug('hoisting skipped tasks')
tasks = sorted(tasks, key=lambda t: -t.overdue_times)
for task in tasks:
wf.add_item(
f"{task.list_title} – {task.title}", task.subtitle(), autocomplete=f"-task {task.id} ",
icon=icons.TASK_COMPLETED if task.status == 'completed' else icons.TASK
)
except OperationalError:
background_sync()
wf.add_item(
'Sort order', 'Change the display order of due tasks',
autocomplete='-due sort', icon=icons.SORT
)
wf.add_item('Main menu', autocomplete='', icon=icons.BACK)
def commit(args, modifier=None):
action = args[1]
prefs = Preferences.current_prefs()
relaunch_command = None
if action == 'sort' and len(args) > 2:
command = args[2]
if command == 'toggle-skipped':
prefs.hoist_skipped_tasks = not prefs.hoist_skipped_tasks
relaunch_command = 'td-due sort'
else:
try:
index = int(command)
order_info = _due_orders[index - 1]
prefs.due_order = order_info['due_order']
relaunch_command = 'td-due '
except IndexError:
pass
except ValueError:
pass
if relaunch_command:
relaunch_alfred(relaunch_command)
|
bc657269b8abe459521a879eeb75bc5809f845b9
|
74965e70e72ec9f198c2b63120a368ead1e6c113
|
/neurokit2/misc/find_outliers.py
|
fb0d22a89c587a391d38cffa3a5accbfb45cd458
|
[
"MIT"
] |
permissive
|
neuropsychology/NeuroKit
|
ceef2128296431d417bffee9a5dc177bd7b345f0
|
366583e1e22a4ea06a18e5e162675c2a33334a19
|
refs/heads/master
| 2023-08-24T02:33:31.035399
| 2023-07-09T07:03:43
| 2023-07-09T07:03:43
| 218,212,111
| 1,120
| 358
|
MIT
| 2023-09-12T10:11:39
| 2019-10-29T05:39:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,051
|
py
|
find_outliers.py
|
import numpy as np
import scipy
from ..stats import standardize
def find_outliers(data, exclude=2, side="both", method="sd", **kwargs):
"""**Identify outliers (abnormal values)**
Extreme values identification using different methods, such as:
* **sd**: Data is :func:`standardized <.standardize>`, i.e., centered and
scaled, and absolute value beyond a certain SD threshold are considered as outliers.
* **norm**: Extreme values identified using theoretical percentiles to identify outliers
beyond a certain theoretical percentile (assuming the data comes from a normal distribution).
For example, with this method, ``exclude=0.025`` (one-sided) corresponds to the 2.5% lower
bound of the normal distribution, which corresponds to approx. -1.96 SD. This method is
related to the **SD** one, but instead of specifying the threshold in SDs, it is specified in
percentiles.
* **percentile**: Extreme values identified using percentiles.
Parameters
----------
data : list or ndarray
Data array
exclude : int, float
Amount of outliers to detect (depends on the chosen method).
side: str
Can be ``"both"``, ``"left"`` or ``"right"``. If ``exclude=0.05`` and ``side="both"`` and
``method="norm"``, 2.5% of extreme observation of each side will be marked as outliers.
method: str
Can be "standardize" or "percentile". The default is "standardize".
**kwargs : optional
Other arguments to be passed to :func:`standardize`.
Returns
----------
outliers : ndarray
A boolean vector of with ``True`` being the outliers.
See Also
----------
.standardize
Example
----------
.. ipython:: python
import neurokit2 as nk
data = [-12, 2, 1, 3, 66.6, 2, 1, 3, 2, -42, 2, 4, 1, 12]
# Outliers beyond 2 SD of the mean
outliers = nk.find_outliers(data, exclude=2, side="both", method="sd")
np.where(outliers)[0]
# Outliers beyond 1 MAD of the Median on one side
outliers = nk.find_outliers(data, exclude=1, side="left", method="sd", robust=True)
np.where(outliers)[0]
# 2.5% theoretical percentiles on each side
outliers = nk.find_outliers(data, exclude=0.05, method="norm")
np.where(outliers)[0]
# Outliers are beyond interquartile range
outliers = nk.find_outliers(data, exclude=(0.25, 0.75), method="percentile")
np.where(outliers)[0]
# Outliers are beyond interdecile range
outliers = nk.find_outliers(data, exclude=(0.1, 0.9), method="percentile")
np.where(outliers)[0]
"""
# Sanity checks
if side not in ["both", "left", "right"]:
raise ValueError("side must be 'both', 'left' or 'right'.")
method = method.lower()
if method not in ["standardize", "z", "sd", "percentile", "norm"]:
raise ValueError("method must be 'standardize' or 'percentile'.")
# Force array
data = np.array(data)
# Find thresholds
if method in ["percentile"]:
if isinstance(exclude, (list, tuple, np.ndarray)):
right = np.percentile(data, exclude[1] * 100)
left = np.percentile(data, exclude[0] * 100)
else:
right = np.percentile(data, (1 - (exclude / 2)) * 100)
left = np.percentile(data, (exclude / 2) * 100)
elif method in ["sd"]:
if isinstance(exclude, (list, tuple, np.ndarray)):
right = exclude[1]
left = exclude[0]
else:
right = exclude
left = -right
else:
if side == "both":
exclude = exclude / 2
right = scipy.stats.norm.ppf(1 - exclude)
left = -right
if method in ["standardize", "z", "sd", "norm"]:
data = np.array(standardize(data, **kwargs))
if side == "both":
outliers = (data < left) | (data > right)
elif side == "left":
outliers = data < left
elif side == "right":
outliers = data > right
return outliers
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.