code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from __future__ import annotations
import sys
from typing import Final
if sys.version_info >= (3, 11):
from enum import StrEnum
else:
from backports.strenum import StrEnum
LIVE_SESSION_PROPERTIES: Final[set[str]] = {
"chargerId",
"current",
"currentCurrency",
"currentMiles",
"currentPrice",
"isFreeSession",
"isRivianCharger",
"kilometersChargedPerHour",
"locationId",
"power",
"rangeAddedThisSession",
"soc",
"startTime",
"timeElapsed",
"timeRemaining",
"totalChargedEnergy",
"vehicleChargerState",
}
VEHICLE_STATE_PROPERTIES: Final[set[str]] = {
# VehicleCloudConnection
"cloudConnection",
# VehicleLocation
"gnssLocation",
# TimeStamped(String|Float|Int)
"alarmSoundStatus",
"batteryHvThermalEvent",
"batteryHvThermalEventPropagation",
"batteryCapacity",
"batteryLevel",
"batteryLimit",
"brakeFluidLow",
"cabinClimateDriverTemperature",
"cabinClimateInteriorTemperature",
"cabinPreconditioningStatus",
"cabinPreconditioningType",
"chargerDerateStatus",
"chargerState",
"chargerStatus",
"closureFrunkClosed",
"closureFrunkLocked",
"closureFrunkNextAction",
"closureLiftgateClosed",
"closureLiftgateLocked",
"closureLiftgateNextAction",
"closureSideBinLeftClosed",
"closureSideBinLeftLocked",
"closureSideBinRightClosed",
"closureSideBinRightLocked",
"closureTailgateClosed",
"closureTailgateLocked",
"closureTonneauClosed",
"closureTonneauLocked",
"defrostDefogStatus",
"distanceToEmpty",
"doorFrontLeftClosed",
"doorFrontLeftLocked",
"doorFrontRightClosed",
"doorFrontRightLocked",
"doorRearLeftClosed",
"doorRearLeftLocked",
"doorRearRightClosed",
"doorRearRightLocked",
"driveMode",
"gearGuardLocked",
"gearGuardVideoMode",
"gearGuardVideoStatus",
"gearGuardVideoTermsAccepted",
"gearStatus",
"gnssBearing",
"gnssSpeed",
"otaAvailableVersion",
"otaAvailableVersionGitHash",
"otaAvailableVersionNumber",
"otaAvailableVersionWeek",
"otaAvailableVersionYear",
"otaCurrentStatus",
"otaCurrentVersion",
"otaCurrentVersionGitHash",
"otaCurrentVersionNumber",
"otaCurrentVersionWeek",
"otaCurrentVersionYear",
"otaDownloadProgress",
"otaInstallDuration",
"otaInstallProgress",
"otaInstallReady",
"otaInstallTime",
"otaInstallType",
"otaStatus",
"petModeStatus",
"petModeTemperatureStatus",
"powerState",
"rangeThreshold",
"remoteChargingAvailable",
"seatFrontLeftHeat",
"seatFrontLeftVent",
"seatFrontRightHeat",
"seatFrontRightVent",
"seatRearLeftHeat",
"seatRearRightHeat",
"seatThirdRowLeftHeat",
"seatThirdRowRightHeat",
"serviceMode",
"steeringWheelHeat",
"timeToEndOfCharge",
"tirePressureStatusFrontLeft",
"tirePressureStatusFrontRight",
"tirePressureStatusRearLeft",
"tirePressureStatusRearRight",
"tirePressureStatusValidFrontLeft",
"tirePressureStatusValidFrontRight",
"tirePressureStatusValidRearLeft",
"tirePressureStatusValidRearRight",
"vehicleMileage",
"windowFrontLeftCalibrated",
"windowFrontLeftClosed",
"windowFrontRightCalibrated",
"windowFrontRightClosed",
"windowRearLeftCalibrated",
"windowRearLeftClosed",
"windowRearRightCalibrated",
"windowRearRightClosed",
"wiperFluidState",
}
class VehicleCommand(StrEnum):
"""Supported vehicle commands."""
WAKE_VEHICLE = "WAKE_VEHICLE"
HONK_AND_FLASH_LIGHTS = "HONK_AND_FLASH_LIGHTS"
UNLOCK_USER_PREFERENCES_AND_DISABLE_ALARM = (
"UNLOCK_USER_PREFERENCES_AND_DISABLE_ALARM"
)
# Charging
CHARGING_LIMITS = "CHARGING_LIMITS"
START_CHARGING = "START_CHARGING"
STOP_CHARGING = "STOP_CHARGING"
# Climate
CABIN_HVAC_DEFROST_DEFOG = "CABIN_HVAC_DEFROST_DEFOG"
CABIN_HVAC_LEFT_SEAT_HEAT = "CABIN_HVAC_LEFT_SEAT_HEAT"
CABIN_HVAC_LEFT_SEAT_VENT = "CABIN_HVAC_LEFT_SEAT_VENT"
CABIN_HVAC_REAR_LEFT_SEAT_HEAT = "CABIN_HVAC_REAR_LEFT_SEAT_HEAT"
CABIN_HVAC_REAR_RIGHT_SEAT_HEAT = "CABIN_HVAC_REAR_RIGHT_SEAT_HEAT"
CABIN_HVAC_RIGHT_SEAT_HEAT = "CABIN_HVAC_RIGHT_SEAT_HEAT"
CABIN_HVAC_RIGHT_SEAT_VENT = "CABIN_HVAC_RIGHT_SEAT_VENT"
CABIN_HVAC_STEERING_HEAT = "CABIN_HVAC_STEERING_HEAT"
CABIN_PRECONDITIONING_SET_TEMP = "CABIN_PRECONDITIONING_SET_TEMP"
VEHICLE_CABIN_PRECONDITION_DISABLE = "VEHICLE_CABIN_PRECONDITION_DISABLE"
VEHICLE_CABIN_PRECONDITION_ENABLE = "VEHICLE_CABIN_PRECONDITION_ENABLE"
# Closures
LOCK_ALL_CLOSURES_FEEDBACK = "LOCK_ALL_CLOSURES_FEEDBACK"
UNLOCK_ALL_CLOSURES = "UNLOCK_ALL_CLOSURES"
UNLOCK_DRIVER_DOOR = "UNLOCK_DRIVER_DOOR"
UNLOCK_PASSENGER_DOOR = "UNLOCK_PASSENGER_DOOR"
# Frunk
CLOSE_FRUNK = "CLOSE_FRUNK"
OPEN_FRUNK = "OPEN_FRUNK"
# Gear guard
ENABLE_GEAR_GUARD = "ENABLE_GEAR_GUARD"
ENABLE_GEAR_GUARD_VIDEO = "ENABLE_GEAR_GUARD_VIDEO"
DISABLE_GEAR_GUARD = "DISABLE_GEAR_GUARD"
DISABLE_GEAR_GUARD_VIDEO = "DISABLE_GEAR_GUARD_VIDEO"
# Liftgate (R1S only)
CLOSE_LIFTGATE = "CLOSE_LIFTGATE"
# Liftgate/tailgate
OPEN_LIFTGATE_UNLATCH_TAILGATE = "OPEN_LIFTGATE_UNLATCH_TAILGATE"
# OTA
OTA_INSTALL_NOW_ACKNOWLEDGE = "OTA_INSTALL_NOW_ACKNOWLEDGE"
# Panic
PANIC_OFF = "PANIC_OFF"
PANIC_ON = "PANIC_ON"
# Side bin (R1T only)
RELEASE_LEFT_SIDE_BIN = "RELEASE_LEFT_SIDE_BIN"
RELEASE_RIGHT_SIDE_BIN = "RELEASE_RIGHT_SIDE_BIN"
# Tonneau (Only for R1T with powered tonneau)
CLOSE_TONNEAU_COVER = "CLOSE_TONNEAU_COVER"
OPEN_TONNEAU_COVER = "OPEN_TONNEAU_COVER"
# Windows
CLOSE_ALL_WINDOWS = "CLOSE_ALL_WINDOWS"
OPEN_ALL_WINDOWS = "OPEN_ALL_WINDOWS"
UNLOCK_ALL_AND_OPEN_WINDOWS = "UNLOCK_ALL_AND_OPEN_WINDOWS" | /rivian_python_client-1.0.4.tar.gz/rivian_python_client-1.0.4/src/rivian/const.py | 0.527803 | 0.230238 | const.py | pypi |
from __future__ import annotations
import asyncio
import logging
from collections.abc import Awaitable, Callable
from datetime import datetime, timezone
from json import loads
from random import uniform
from typing import TYPE_CHECKING, Any
from uuid import uuid4
import async_timeout
from aiohttp import ClientWebSocketResponse, WSMessage, WSMsgType
if TYPE_CHECKING:
from .rivian import Rivian
_LOGGER = logging.getLogger(__name__)
async def cancel_task(*tasks: asyncio.Task | None) -> None:
"""Cancel task(s)."""
for task in tasks:
if task is not None and not task.done():
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
class WebSocketMonitor:
"""Web socket monitor for a vehicle."""
def __init__(
self,
account: Rivian,
url: str,
connection_init: Callable[[ClientWebSocketResponse], Awaitable[None]],
) -> None:
"""Initialize a web socket monitor."""
self._account = account
self._url = url
self._connection_init = connection_init
self._connection_ack: asyncio.Event = asyncio.Event()
self._disconnect = False
self._ws: ClientWebSocketResponse | None = None
self._monitor_task: asyncio.Task | None = None
self._receiver_task: asyncio.Task | None = None
self._last_received: datetime | None = None
self._subscriptions: dict[
str, tuple[Callable[[dict[str, Any]], None], dict[str, Any]]
] = {}
@property
def connected(self) -> bool:
"""Return `True` if the web socket is connected."""
if self._disconnect:
return False
return False if self._ws is None else not self._ws.closed
@property
def connection_ack(self) -> asyncio.Event:
"""Return `True` if the web socket connection is authenticated and acknowledged."""
return self._connection_ack
@property
def websocket(self) -> ClientWebSocketResponse | None:
"""Return the web socket."""
return self._ws
@property
def monitor(self) -> asyncio.Task | None:
"""Return the monitor task."""
return self._monitor_task
async def new_connection(self, start_monitor: bool = False) -> None:
"""Create a new connection and, optionally, start the monitor."""
await cancel_task(self._receiver_task)
self._disconnect = False
# pylint: disable=protected-access
assert self._account._session
self._ws = await self._account._session.ws_connect(
url=self._url, headers={"sec-websocket-protocol": "graphql-transport-ws"}
)
await self._connection_init(self._ws)
self._receiver_task = asyncio.ensure_future(self._receiver())
if start_monitor:
await self.start_monitor()
async def start_subscription(
self, payload: dict[str, Any], callback: Callable[[dict[str, Any]], None]
) -> Callable[[], Awaitable[None]] | None:
"""Start a subscription."""
if not self.connected:
return None
_id = str(uuid4())
self._subscriptions[_id] = (callback, payload)
await self._subscribe(_id, payload)
async def unsubscribe() -> None:
"""Unsubscribe."""
if _id in self._subscriptions:
del self._subscriptions[_id]
if self.connected:
assert self._ws
await self._ws.send_json({"id": _id, "type": "complete"})
return unsubscribe
async def _subscribe(self, _id: str, payload: dict[str, Any]) -> None:
"""Send a subscribe request."""
assert self._ws
await self._ws.send_json({"id": _id, "payload": payload, "type": "subscribe"})
async def _resubscribe_all(self) -> None:
"""Resubscribe all subscriptions."""
try:
async with async_timeout.timeout(self._account.request_timeout):
await self.connection_ack.wait()
except asyncio.TimeoutError:
_LOGGER.error("A timeout occurred while attempting to resubscribe")
return
for _id, (_, payload) in self._subscriptions.items():
await self._subscribe(_id, payload)
async def _receiver(self) -> None:
"""Receive a message from a web socket."""
if not (websocket := self._ws):
return
while not websocket.closed:
try:
msg = await websocket.receive(timeout=60)
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED):
self._log_message(msg)
if msg.extra == "Unauthenticated":
self._disconnect = True
break
self._last_received = datetime.now(timezone.utc)
if msg.type == WSMsgType.TEXT:
data = loads(msg.data)
if (data_type := data.get("type")) == "connection_ack":
self._connection_ack.set()
elif data_type == "next":
if (_id := data.get("id")) in self._subscriptions:
self._subscriptions[_id][0](data)
else:
self._log_message(msg)
elif msg.type == WSMsgType.ERROR:
self._log_message(msg, True)
continue
except asyncio.TimeoutError:
await self._resubscribe_all()
self._connection_ack.clear()
self._log_message("web socket stopped")
async def _monitor(self) -> None:
"""Monitor a web socket connection."""
attempt = 0
while not self._disconnect:
while self.connected:
if self._receiver_task and self._receiver_task.done():
# Need to restart the receiver
self._receiver_task = asyncio.ensure_future(self._receiver())
await asyncio.sleep(1)
if not self._disconnect:
try:
await self.new_connection()
except Exception as ex: # pylint: disable=broad-except
self._log_message(ex, True)
if not self._ws or self._ws.closed:
await asyncio.sleep(min(1 * 2**attempt + uniform(0, 1), 300))
attempt += 1
continue
attempt = 0
self._log_message("web socket connection reopened")
await self._resubscribe_all()
async def start_monitor(self) -> None:
"""Start or restart the monitor task."""
if self._monitor_task is None or self._monitor_task.done():
self._monitor_task = asyncio.ensure_future(self._monitor())
async def stop_monitor(self) -> None:
"""Stop the monitor task."""
await cancel_task(self._monitor_task)
async def close(self) -> None:
"""Close the web socket."""
self._disconnect = True
if self._ws:
await self._ws.close()
await cancel_task(self._monitor_task, self._receiver_task)
def _log_message(
self, message: str | Exception | WSMessage, is_error: bool = False
) -> None:
"""Log a message."""
log_method = _LOGGER.error if is_error else _LOGGER.debug
log_method(message) | /rivian_python_client-1.0.4.tar.gz/rivian_python_client-1.0.4/src/rivian/ws_monitor.py | 0.812942 | 0.153296 | ws_monitor.py | pypi |
from __future__ import annotations
import hashlib
import hmac
from base64 import b64decode, b64encode
from typing import cast
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
def base64_encode(data: bytes) -> str:
"""Encode bytes to Base64 string"""
return b64encode(data).decode("utf-8")
def decode_private_key(private_key_str: str) -> ec.EllipticCurvePrivateKey:
"""Decode an EC private key."""
key = serialization.load_pem_private_key(b64decode(private_key_str), password=None)
return cast(ec.EllipticCurvePrivateKey, key)
def decode_public_key(public_key_str) -> ec.EllipticCurvePublicKey:
"""Decode an EC public key."""
return ec.EllipticCurvePublicKey.from_encoded_point(
ec.SECP256R1(), bytes.fromhex(public_key_str)
)
def encode_private_key(private_key: ec.EllipticCurvePrivateKey) -> str:
"""Encode an EC public key."""
return base64_encode(
private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
)
def encode_public_key(public_key: ec.EllipticCurvePublicKey) -> str:
"""Encode an EC public key."""
return public_key.public_bytes(
encoding=serialization.Encoding.X962,
format=serialization.PublicFormat.UncompressedPoint,
).hex()
def generate_key_pair() -> tuple[str, str]:
"""Generate an ECDH public-private key pair.
Copied from https://rivian-api.kaedenb.org/app/controls/enroll-phone/
"""
# Generate a private key
private_key = ec.generate_private_key(ec.SECP256R1())
# Get the corresponding public key
public_key = private_key.public_key()
# Serialize the keys in the standard format
private_key_str = encode_private_key(private_key)
public_key_str = encode_public_key(public_key)
# Return the public-private key pair as strings
return (public_key_str, private_key_str)
def generate_vehicle_command_hmac(
command: str, timestamp: str, vehicle_key: str, private_key: str
):
"""Generate vehicle command hmac."""
message = (command + timestamp).encode("utf-8")
secret_key = get_secret_key(private_key, vehicle_key)
return get_message_signature(secret_key, message)
def get_message_signature(secret_key: bytes, message: bytes) -> str:
"""Get message signature."""
return hmac.new(secret_key, message, hashlib.sha256).hexdigest()
def get_secret_key(private_key_str: str, public_key_str: str) -> bytes:
"""Get HKDF derived secrety key from private/public key pair."""
private_key = decode_private_key(private_key_str)
public_key = decode_public_key(public_key_str)
secret = private_key.exchange(ec.ECDH(), public_key)
hkdf = HKDF(algorithm=hashes.SHA256(), length=32, salt=None, info=b"")
return hkdf.derive(secret) | /rivian_python_client-1.0.4.tar.gz/rivian_python_client-1.0.4/src/rivian/utils.py | 0.931392 | 0.193967 | utils.py | pypi |
import re
from rivr.http import Http404
from rivr_rest.resource import Resource
EXTRACT_VARIABLE_REGEX = re.compile(r'\{([\w]+)\}')
def extract(uri_template, uri):
"""
Reverse URI Template implementation.
Note, only simple variable templates are currently supported.
"""
if uri == uri_template:
return {}
escaped_uri_template = re.escape(uri_template).replace('\{', '{').replace('\}', '}')
def replace(match):
return '(?P<{}>[\w]+)'.format(match.group(1))
pattern = '^{}$'.format(
re.sub(EXTRACT_VARIABLE_REGEX, replace, escaped_uri_template))
match = re.match(pattern, uri)
if match:
return match.groupdict()
return None
class RootResource(Resource):
uri_template = '/'
def __init__(self, resources):
self.resources = resources
def get_relations(self):
def to_resource(relation):
if isinstance(relation[1], type):
return (relation[0], relation[1]())
return relation
return dict(map(to_resource, self.resources.items()))
def can_embed(self, relation):
return False
class Router(object):
root_resource = RootResource
def __init__(self, *resources):
self.root_resources = {}
self.resources = list(resources)
def register(self, resource):
self.resources.append(resource)
def add_root_resource(self, relation, resource):
self.root_resources[relation] = resource
def resolve(self, path):
for resource in self.resources:
parameters = extract(resource.uri_template, path)
if parameters is not None:
return (resource, parameters)
return None
def __call__(self, request):
if request.path == '/' and len(self.root_resources) > 0:
resource = self.root_resource(resources=self.root_resources)
return resource.dispatch(request)
match = self.resolve(request.path)
if not match:
raise Http404
resource, parameters = match
return resource(parameters=parameters).dispatch(request) | /rivr-rest-0.1.0.tar.gz/rivr-rest-0.1.0/rivr_rest/router.py | 0.514644 | 0.248956 | router.py | pypi |
<!--
Copyright (c) 2016, RivuletStudio, The University of Sydney, AU
All rights reserved.
This file is part of Rivuletpy <https://github.com/RivuletStudio/rivuletpy>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-->
# Rivuletpy
## Example Neuron Tracings

## Example Lung Airway Tracing

## Rivuletpy == Rivulet2
Rivuletpy is a Python3 toolkit for automatically reconstructing single neuron models from 3D microscopic image stacks & other tree structures from 3D medical images.
It is actively maintained and being used in industry scale image analysis applications.
The project was initiated in the [BigNeuron project](https://alleninstitute.org/bigneuron/about/)
The `rtrace` command is powered by the Rivulet2 algorithm published in IEEE Trans. TMI:
[1] S. Liu, D. Zhang, Y. Song, H. Peng and W. Cai, "Automated 3D Neuron Tracing with Precise Branch Erasing and Confidence Controlled Back-Tracking," in IEEE Transactions on Medical Imaging. URL: <http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354803&isnumber=4359023>
PDF [https://www.biorxiv.org/content/biorxiv/early/2017/11/27/109892.full.pdf]
The predecessor Rivulet1 was published on Neuroinformatics:
[2] Siqi Liu, Donghao Zhang, Sidong Liu, Dagan Feng, Hanchuan Peng, Weidong Cai,
"Rivulet: 3D Neuron Morphology Tracing with Iterative Back-Tracking",
Neuroinformatics, Vol.14, Issue 4, pp387-401, 2016.
A C++ implementation of the Rivulet2 algorithm is also available in the lastest [Vaa3D](https://github.com/Vaa3D) sources under the [Rivulet Plugin](https://github.com/Vaa3D/vaa3d_tools/tree/master/released_plugins/v3d_plugins/bigneuron_siqi_rivuletv3d) (Not yet available in the released build). However you can build Vaa3D easily on Mac/Linux following the [Vaa3D wiki](https://github.com/Vaa3D/Vaa3D_Wiki/wiki/Build-Vaa3D-on-Linux) carefully.
## Issues / questions / pull requests
Issues should be reported to the
[Rivuletpy github repository issue tracker](https://github.com/RivuletStudio/rivuletpy/issues).
The ability and speed with which issues can be resolved depends on how complete and
succinct the report is. For this reason, it is recommended that reports be accompanied
with a minimal but self-contained code sample that reproduces the issue, the observed and
expected output, and if possible, the commit ID of the version used. If reporting a
regression, the commit ID of the change that introduced the problem is also extremely valuable
information.
Questions are also welcomed in the [Rivuletpy github repository issue tracker](https://github.com/RivuletStudio/rivuletpy/issues).
If you put on a `question` label. We consider every question as an issue since it means we should have made things clearer/easier for the users.
Pull requests are definitely welcomed! Before you make a pull requests, please kindly create an issue first to discuss the optimal solution.
## Installation
### Setting up virtual environment
It is recommended to install rivulet in a virtual enviornment.
```bash
# create env and activate it
conda create -n riv
conda activate riv
# install pip and git
conda install pip git
```
### Install from PyPI
To install `rivuletpy` from **PyPI** simply activate your virtual environment and run:
```bash
pip install rivuletpy
```
### Install from GitHub
Optionally, you can use `pip` to install the latest version directly from GitHub:
```bash
pip install git+https://github.com/RivuletStudio/rivuletpy
```
## Test Installation
In ./rivuletpy/
`sh quicktest.sh`
This will download a simple neuron image and perform a neuron tracing with rivulet2 algorithm. If you encountered any issues while installing Rivuletpy, you are welcome to raise an issue for the developers in the [issue tracker](https://github.com/RivuletStudio/rivuletpy/issues)
## Usage
* Reconstruct single neuron file.
The script rtrace command will be installed
```bash
$ rtrace --help
usage: rtrace [-h] -f FILE [-o OUT] [-t THRESHOLD] [-z ZOOM_FACTOR]
[--save-soma] [--no-save-soma] [--speed]
[--quality] [--no-quality] [--clean] [--no-clean] [--silent]
[--no-silent] [-v] [--no-view]
[--tracing_resolution TRACING_RESOLUTION] [--vtk]
Arguments to perform the Rivulet2 tracing algorithm.
optional arguments:
-h, --help show this help message and exit
-f FILE, --file FILE The input file. A image file (*.tif, *.nii, *.mat).
-o OUT, --out OUT The name of the output file
-t THRESHOLD, --threshold THRESHOLD
threshold to distinguish the foreground and
background. Default 0. If threshold<0, otsu will be
used.
-z ZOOM_FACTOR, --zoom_factor ZOOM_FACTOR
The factor to zoom the image to speed up the whole
thing. Default 1.
--save-soma Save the automatically reconstructed soma volume along
with the SWC.
--no-save-soma Don't save the automatically reconstructed soma volume
along with the SWC (default)
--speed Use the input directly as speed image
--quality Reconstruct the neuron with higher quality and
slightly more computing time
--no-quality Reconstruct the neuron with lower quality and slightly
more computing time
--clean Remove the unconnected segments (default). It is
relatively safe to do with the Rivulet2 algorithm
--no-clean Keep the unconnected segments
--silent Omit the terminal outputs
--no-silent Show the terminal outputs & the nice logo (default)
-v, --view View the reconstructed neuron when rtrace finishes
--no-view
--tracing_resolution TRACING_RESOLUTION
Only valid for mhd input files. Will resample the mhd
array into isotropic resolution before tracing.
Default 1mm
--vtk Store the world coordinate vtk format along with the
swc
```
Example Usecases with single neurons in a TIFF image
```bash
rtrace -f example.tif -t 10 # Simple like this. Reconstruct a neuron in example.tif with a background threshold of 10
rtrace -f example.tif -t 10 --quality # Better results with longer running time
rtrace -f example.tif -t 10 --quality -v # Open a 3D swc viewer after reconstruction
```
Example Usecases with general tree structures in a mhd image
```bash
rtrace -f example.mhd -t 10 --tracing_resolution 1.5 --vtk # Perform the tracing under an isotropic resolution of 1.5mmx1.5mmx1.5mm and output a vtk output file under the world coordinates along side the swc.
rtrace -f example.mhd -t 10 --tracing_resolution 1.5 --vtk --speed # Use the input image directly as the source of making speed image. Recommended if the input mhd is a probablity map of centerlines.
```
Please note that Rivulet2 is powerful of handling the noises, a relatively low intensity threshold is preferred to include all the candidate voxels.
* Compare a swc reconstruction against the manual ground truth
```bash
$ compareswc --help
usage: compareswc [-h] --target TARGET --groundtruth GROUNDTRUTH
[--sigma SIGMA]
Arguments for comparing two swc files.
optional arguments:
-h, --help show this help message and exit
--target TARGET The input target swc file.
--groundtruth GROUNDTRUTH
The input ground truth swc file.
--sigma SIGMA The sigma value to use for the Gaussian function in
NetMets.
$ compareswc --target r2_tracing.swc --groundtruth hand_tracing.swc
0.9970 0.8946 0.9865 1 3
```
The `compareswc` command outputs five numbers which are in order:
precision, recall, f1-score, No. connection error type A, No. connection error type B
## FAQ
### What if I see on Mac OS ```ImportError: Failed to find TIFF library. Make sure that libtiff is installed and its location is listed in PATH|LD_LIBRARY_PATH|..```
Try
```bash
brew install libtiff
```
### What if I see ```...version `GLIBCXX_3.4.21' not found...``` when I run `rtrace` under Anaconda?
Try
```bash
(riv)$ conda install libgcc # Upgrades the gcc in your conda environment to the newest
```
### What if I see ```Intel MKL FATAL ERROR: Cannot load libmkl_avx2.so or libmkl_def.so.```?
Try to get rid of the mkl in your conda, it has been reported to cause many issues
```bash
(riv)$ conda install nomkl numpy scipy scikit-learn numexpr
(riv)$ conda remove mkl mkl-service
```
## Dependencies
The build-time and runtime dependencies of Rivuletpy are:
* [numpy](http://www.numpy.org/)
* [scipy](http://www.scipy.org/)
* [Cython](http://cython.org/)
* [scikit-fmm](https://github.com/scikit-fmm)
* [scikit-image](https://github.com/scikit-image)
* [matplotlib](http://www.matplotlib.org/)
* [tqdm](https://github.com/noamraph/tqdm)
* [nibabel](http://nipy.org/nibabel/)
| /rivuletpy-0.3.0.tar.gz/rivuletpy-0.3.0/README.md | 0.620852 | 0.650384 | README.md | pypi |
from dataclasses import dataclass
@dataclass(frozen=True)
class Interval:
start:int
end:int
def __post_init__(self):
"""Assures correct ordering of start and end"""
a,b = self.start,self.end
object.__setattr__(self,"start",min(a,b))
object.__setattr__(self,"end",max(a,b))
def clone(self) -> "Interval":
return Interval(self.start,self.end)
def get_length(self) -> int:
"""
[DEPRECATED]
Length of the interval, ends included
"""
return len(self)
def __len__(self) -> int:
return self.end - self.start + 1
def __lt__(self,other:"Interval") -> bool:
"""
Is this interval completely contained within other? (no ovelapping border)
"""
if not isinstance(other,Interval): return False
a,b,c,d = self.tup() + other.tup()
return a>c and b<d
def __le__(self,other:"Interval") -> bool:
"""
Is this interval completely contained within other? (allows ovelapping border)
"""
if not isinstance(other,Interval): return False
a,b,c,d = self.tup() + other.tup()
return a>=c and b<=d
def tup(self) -> tuple[int,int]:
"""Get tuple of extremes"""
return (self.start,self.end)
def __and__(self,other:"Interval") -> "Interval":
"""
Get the intersection of the two intervals, as an Interval
Returns None if the two intervals don't intersect
"""
if not isinstance(other,Interval): return None
a,b,c,d = self.tup() + other.tup()
if b<c or d<a: return None
new_min, new_max = max(a,c),min(b,d)
return Interval(new_min,new_max)
def __sub__(self,other:"Interval") -> list["Interval"]:
"""
List of 0-2 intervals that belong to this Interval, but not to the other
"""
if not isinstance(other,Interval): return [] # Exclude non-intervals
if self <= other: return [] # Exclude cases when self is completely in other
a,b,c,d = self.tup() + other.tup() # Get quick refs for interval limits
if b<c or d<a: return [self.clone()] # Cover cases where intvs don't overlap
if other < self: # Cover cases where there's exactly two sub-intervals
return [Interval(a,c-1),Interval(d+1,b)]
else:
if d < b: return [Interval(d+1,b)]
else: return [Interval(a,c-1)]
def segment(self,other:"Interval") -> tuple["Interval","Interval","Interval"]:
"""
Get 3-tuple of (Interval|None) representing (leftmost,a&b,rightmost)
"""
if not isinstance(other,Interval): return (None, self.clone(), None)
a,b,c,d = self.tup() + other.tup()
# TODO
def __str__(self) -> str:
"""Common interval representation (e.g. '1..5')"""
return f"{self.start}..{self.end}"
def str_to_iv(s:str) -> Interval:
"""Turn strings of the type "1..5" into an interval"""
_s = s.strip()
a,b,*l = _s.split("..")
a,b = map(int,[a,b])
return Interval(a,b) | /rizoma_utils-0.0.1-py3-none-any.whl/rizomath/interval.py | 0.775817 | 0.293759 | interval.py | pypi |
**This project is still under construction**
# Table of Contents
- [Motivation](#motivation)
- [Requirements](#requirements)
- [Installation](#installation)
* [Installation through PyPi (Recommended)](#installation-through-pypi-recommended)
* [Installation through Github](#installation-through-github)
* [But I really really need a binary](#but-i-really-really-need-a-binary)
* [Running Tests](#running-tests)
- [Configuration](#configuration)
* [Configuration File](#configuration-file)
- [Sample Configuration File](#sample-configuration-file)
* [Overriding configuration at runtime](#overriding-configuration-at-runtime)
- [Usage](#usage)
* [Execution](#execution)
* [Menu Items](#menu-items)
- [Generate a strong password](#generate-a-strong-password)
- [Generate a password](#generate-a-password)
- [Add a credential](#add-a-credential)
- [Retrieve credential using id](#retrieve-credential-using-id)
- [Copy credential to clipboard](#copy-credential-to-clipboard)
- [Filter credentials](#filter-credentials)
- [List all credentials](#list-all-credentials)
- [Modify credential](#modify-credential)
- [Remove credential](#remove-credential)
- [Remove all credentials](#remove-all-credentials)
- [Change master password](#change-master-password)
- [Export credentials to a JSON file](#export-credentials-to-a-json-file)
- [Import credentials from a JSON file](#import-credentials-from-a-json-file)
- [List all raw credentials](#list-all-raw-credentials)
- [Password checkup](#password-checkup)
* [File Mode](#file-mode)
* [Actions](#actions)
* [Other](#other)
# Motivation
The motivation behind this project was to create a password manager that could get up and running without much setup while still providing features to the enduser.
This is in fact the sole reason behind the file mode which allows you to safely store and retrieve your credentials using a json file.
# Requirements
- Python3
- Mariadb / MySQL / MongoDB (Optional)
# Installation
If you want to install Rizpass for personal use, you can either install it as a pip package or use the source code / binary provided with the latest release
## Installation through PyPi (Recommended)
The following command will upgrade an existing installation of Rizpass if it is already installed else it will install Rizpass
```bash
pip install --upgrade rizpass
```
## Installation through Github
1. Clone this repository
```bash
git clone https://github.com/rizwanmustafa/Rizpass.git
cd Rizpass
```
2. Create a new virtual environment in a folder called 'venv' for this project (This will prevent your binary size and compilation time from being too long).
```bash
python3 -m venv venv
```
3. Activate the virtual environment:
```bash
source venv/bin/activate
```
4. Install the package
```bash
pip install .
```
5. Start Rizpass
```bash
python3 -m rizpass
```
Note: You can also start rizpass by executing `rizpass` in the terminal directly however this may require modification to the `$PATH` variable
Bonus - If you want to do it all in one step:
```bash
git clone https://github.com/rizwanmustafa/Rizpass.git
cd Rizpass
python3 -m venv venv
source venv/bin/activate
pip install .
python3 -m rizpass
```
## But I really really need a binary
So you want to use Rizpass on the go.
Since python doesn't have an official compiler we are going to rely on one of it's module called `PyInstaller`.
1. Follow the steps in the [Installation through Github](#installation-through-github)
2. Install `PyInstaller`:
```bash
pip install PyInstaller
```
3. In the same virtual environment that we created, run the following command while in the root directory of the package:
```
python3 -m PyInstaller --onefile rizpass.py
```
4. Upon completion, this will create a binary file for your OS which will be located in `dist/`
Congratulations, you now have a huge sized binary
## Running tests
It is recommended that you run tests after installation to ensure the best experience. You can run all unit tests through the following command:
```bash
python3 -m rizpass.tests
```
# Configuration
Configuring Rizpass is as simple as running the following command and answering the questions asked
```bash
python3 -m rizpass --setup
```
## Configuration File
Rizpass uses a json object for storing its configuration. The setup command creates a configuration file at `~/.rizpass.json`
Here is a list of the fields contained in the configuration file and their description:
```
db_type (string, Required) : Name of the database. 'mysql' for MySQL or MariaDB and 'mongo' for MongoDB.
db_host (string, Required) : Address at which the database is hosted e.g 'localhost'
db_name (string, Required) : Name of the database created specifically for Rizpass to store your credentials in.
db_user (string, Required) : Name of the database user created specifically for Rizpass (Should have read and write permissions on the database).
db_port (integer, Optional): Port number for communication with the database. Defaults to 3306 for 'mysql' and 27017 for 'mongo'.
```
#### Sample Configuration File
```json
{"db_type": "mongo", "db_host": "localhost", "db_user": "passMan", "db_name": "rizpass", "db_port": 7000}
```
## Overriding configuration at runtime
You can override the configurations stored in a file on runtime using the following cli options:
```
--db-host <host> Database host
--db-type <type> Database type (mongo, mysql)
--db-user <user> Database user
--db-name <name> Database name
--db-port <port> Database port
```
You can also use all these options together to use Rizpass without a configuration file.
# Usage
## Execution
You can execute Rizpass through the following commmand:
```bash
python3 -m rizpass
```
## Menu Items
#### Generate a strong password
This menu item allows one to generate a strong password that contains all kinds of characters (uppercase, lowercase, special, digit) to enhance security. The generated passwords stand strong against dictionary attacks as they are truly random and not easy to guess. In this option the minimum length of a generated password can be 16 characters
You can access this feature through the commandline by the following command:
```bash
python3 -m rizpass generate-strong
```
#### Generate a password
This menu item allows one to generate a password with the traits of their choice. The user can choose the length and the type of characters they want to include in the generated password. This option is less secure than the option mentioned above but allows for greater customizability.
You can access this feature through the commandline by the following command:
```bash
python3 -m rizpass generate
```
#### Add a credential
This menu items allows one to store a new credential. Users can store the credential data in the following fields: 'title', 'username', 'email' and 'password'. Rizpass automatically adds a unique 'id' field to the credential for use in other menu items
You can access this feature through the commandline by the following command:
```bash
python3 -m rizpass add
```
#### Retrieve credential using id
This menu item takes the 'id' of a credential as an input from the user and prints the credential if it exists.
You can access this feature through the commandline by the following command:
```bash
python3 -m rizpass retrieve
```
#### Copy credential to clipboard
This menu item takes the 'id' of a credential as an input from the user and copies the password to the clipboard if it exists. For this menu item to work, pyperclip must be able to find a copy mechanism for your system.
You can access this feature through the commandline by the following command:
```bash
python3 -m rizpass copy
```
#### Filter credentials
This menu item allows the users to provide 'filters' for the following fields: 'title', 'username', 'email'. A 'filter' for a field is just character(s) that each credential's matching field must contain. If no 'filter' is provided for a particular field, all values for that field will be considered valid. If no 'filters' are provided at all, all stored credentials will be returned.
You can access this feature through the commandline by the following command:
```bash
python3 -m rizpass filter
```
#### List all credentials
This menu item prints all the stored credentials to the screen.
You can access this feature through the commandline by the following command:
```bash
python3 -m rizpass list-all
```
#### Modify credential
This menu item takes the credential 'id' as an input. It then takes in replacement values for each field for that credential. If a replacement value for a field is empty, the field value will not be modified.
You can access this feature through the commandline by the following command:
```bash
python3 -m rizpass modify
```
#### Remove credential
This menu item takes the credential 'id' as an input and removes the stored credential if it exists.
You can access this feature through the commandline by the following command:
```bash
python3 -m rizpass remove
```
#### Remove all credentials
This menu item first confirms if you are sure about what you intend to do. It then prompts you to re enter the master password, if the master password is incorrect, it exits, else it removes all stored credentials. Remember this is a permanent change.
You can access this feature through the commandline by the following command:
```bash
python3 -m rizpass remove-all
```
#### Change master password
This menu item changes the master password you use to log in to Rizpass. It first confirms your intentions and then prompts you for the current master password. If incorrect, it exits, else it continues with the process. It then asks you for the new master password. If you are using the database option, it will ask for the root credentials to change the password of the database user. It then re-encrypts the stored credentials using the new master password. Remember this is a permanent change.
You can access this feature through the commandline by the following command:
```bash
python3 -m rizpass change-master-pass
```
#### Export credentials to a JSON file
This menu item allows you to export your encrypted credentials to a JSON file to allow for portability. It will ask you for the file path and the master password for this file. You can choose a separate master password for the exported credentials but if you do not provide a separate master password, it will encrypt the credentials with your current master password. You can then use this file with the file mode of Rizpass to access your credentials on the go.
We recommend you use this feature to backup your credentials regularly. It is also recommended to store this file in a safe place.
You can access this feature through the commandline by the following command:
```bash
python3 -m rizpass export
```
#### Import credentials from a JSON file
This menu item allows you to import your encrypted credentials from a JSON file that was exported using the export menu item of Rizpass. Rizpass will prompt you to provide the path of the file and the master password for this file when you try to import it. It will then try to re-encrypt all credentials in the file and store them.
You can access this feature through the commandline by the following command:
```bash
python3 -m rizpass import
```
#### List all raw credentials
This menu item is similar to the "List all credentials" menu item but there is one key difference. It prints the encrypted version of the stored credentials on the screen rather than the usual decrypted version of the credentials.
You can access this feature through the commandline by the following command:
```bash
python3 -m rizpass list-raw
```
#### Password checkup
This menu item goes over your stored credentials and scans for passwords that are either weak or duplicate. For detecting weak passwords, it jsut checks if a password follows the guidlines that this item prints before beginning the checkup. It is completely possible for this item to miss weak passwords like "p@$$w0rdp@$$w0rd123". Hence this is why it is recommended to use passwords generated from the "Genearate a strong password" menu item.
You can access this feature through the commandline by the following command:
```bash
python3 -m rizpass pass-checkup
```
## File Mode
A major reason behind the creation of Rizpass was to have ease of use and to prevent confusion among the users. Rizpass supports file mode whereby all operations are performed on a JSON file instead of a database. This can help those who don't want to go through the process of setting up a database and those who want portability
You can access the file mode using the following command:
```bash
python3 -m rizpass --file <file_name>
```
## Actions
Since Rizpass is a CLI tool, it is designed to be as cli-friendly as possible. Hence, if you don't like the extensive menu and know exactly what you want to do, you can use actions. For example if you want to add a credential, you can do so through the terminal:
```
python3 -m rizpass add
```
Plus you can use this feature to perform multiple tasks in one go without touching the menu:
```
python3 -m rizpass add list-all generate-strong pass-checkup
```
However with great power comes great responsibility.
## Other
You can print the help menu through the following command:
```bash
python3 -m rizpass --help
```
You can print the version of Rizpass you are using through the following command:
```bash
python3 -m rizpass --version
```
You can have verbose output through the following command:
```bash
python3 -m rizpass --verbose
```
| /rizpass-0.0.5.tar.gz/rizpass-0.0.5/README.md | 0.577019 | 0.831006 | README.md | pypi |
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
rizwanmustafa0000@gmail.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
| /rizpass-0.0.5.tar.gz/rizpass-0.0.5/CODE_OF_CONDUCT.md | 0.57344 | 0.683964 | CODE_OF_CONDUCT.md | pypi |
from collections import namedtuple
modes_prompt = ('Mode options:\n'
' (Ionian)\n'
' (Dorian)\n'
' (Phrygian)\n'
' (Lydian)\n'
' (Mixolydian)\n'
' (Aeolian)\n'
' (Locrian)\n'
'\n'
'Enter mode: ')
mode_list = ('Ionian', 'Dorian', 'Phrygian', 'Lydian', 'Mixolydian', 'Aeolian', 'Locrian')
# Ionian
ionian_degrees = ('1', '2', '3', '4', '5', '6', '7')
ionian_semitones = (2, 2, 1, 2, 2, 2, 1)
ionian_mode_prompt = ('Ionian Mode:\n'
'\n'
'The Ionian Mode is the most common mode of them all. This is because it\'s the mode that\n'
'naturally occurs when you play through a Major scale. Without knowing anything about\n'
'modes, it\'s pretty easy to accidentally create music within the Ionian Mode.\n'
'Ionian uses the first scale degree as the root note.\n'
'\n'
'The following are the Ionian degrees and semitones:\n'
' (1) (1)\n'
'Degrees: 1 2 3 4 5 6 7 8 ==== 8 7 6 5 4 3 2 1\n'
'\n'
'Semitones: 2 2 1 2 2 2 1 ==== 1 2 2 2 1 2 2\n'
'\n'
'This is 1 of the 3 \'Major\' scale modes and is often considered the 2nd happiest of the\n'
'modes, mostly because the 7th degree has a weird dissonance.\n\n')
ionian_mode_contents = namedtuple('ionian_mode_stuff', ('First', 'Second', 'Third', 'Fourth', 'Fifth', 'Sixth',
'Seventh'))
ionian_mode_stuff = ionian_mode_contents(2, 2, 1, 2, 2, 2, 1)
# Dorian
dorian_degrees = ('1', '2', '\u266D3', '4', '5', '6', '\u266D7')
dorian_semitones = (2, 1, 2, 2, 2, 1, 2)
dorian_mode_prompt = ('Dorian Mode:\n'
'\n'
'The Dorian Mode uses the second scale degree as the root note. From what I\'ve read from\n'
'somewhere, it\'s similar to a natural minor scale, but it has a raised sixth instead.\n'
'Apparently, Simon and Garfunkel\'s "Scarborough Fair" was composed entirely in Dorian,\n'
'which gives it like an old, celtic vibe, but it also sounds really dreamy and chill.\n'
'\n'
'The following are the Dorian degrees and semitones:\n'
' (1) (1)\n'
'Degrees: 1 2 \u266D3 4 5 6 \u266D7 8 ==== 8 \u266D7 6 5 4 \u266D3 2 1\n'
'\n'
'Semitones: 2 1 2 2 2 1 2 ==== 2 1 2 2 2 1 2\n'
'\n'
'This is 1 of 4 \'minor\' scale modes and can be thought of as the happiest of the \'minor\'\n'
'modes.\n\n')
dorian_mode_contents = namedtuple('dorian_mode_stuff', ('First', 'Second', 'FlatThird', 'Fourth', 'Fifth', 'Sixth',
'FlatSeventh'))
dorian_mode_stuff = dorian_mode_contents(2, 1, 2, 2, 2, 1, 2)
# Phrygian
phrygian_degrees = ('1', '\u266D2', '\u266D3', '4', '5', '\u266D6', '\u266D7')
phrygian_semitones = (1, 2, 2, 2, 1, 2, 2)
phrygian_mode_prompt = ('Phrygian Mode:\n'
'\n'
'The Phrygian Mode is similar to Dorian in how they both sound like natural minor scales,\n'
'but unlike Dorian, Phrygian has a lowered 2nd (as opposed to Dorian\'s raised 6th. This is\n'
'kind of interesting considering the Dorian scale degree being altered is the second degree\n'
'if you were to run through the scale backwards. The Phrygian Mode degree that gets altered\n'
'is also the second degree, but running through the scale normal AND it\'s lowered, not\n'
'raised. Not sure if that observation is important, but still a cool little thing anyway.\n'
'\n'
'The following are the Phrygian degrees and semitones:\n'
' (1) (1)\n'
'Degrees: 1 \u266D2 \u266D3 4 5 \u266D6 \u266D7 8 == 8 \u266D7 \u266D6 5 4 \u266D3 \u266D2 1\n'
'\n'
'Semitones: 1 2 2 2 1 2 2 == 2 2 1 2 2 2 1\n'
'\n'
'This is 1 of 4 \'minor\' scale modes and can be thought of as 3rd happiest of the \'minor\'\n'
'modes, or the 2nd \'heaviest\'.\n\n')
phrygian_mode_contents = namedtuple('phrygian_mode_stuff', ('First', 'FlatSecond', 'FlatThird', 'Fourth', 'Fifth',
'FlatSixth', 'FlatSeventh'))
phrygian_mode_stuff = phrygian_mode_contents(1, 2, 2, 2, 1, 2, 2)
# Lydian
lydian_degrees = ('1', '2', '3', '\u266F4', '5', '6', '7')
lydian_semitones = (2, 2, 2, 1, 2, 2, 1)
lydian_mode_prompt = ('Lydian Mode:\n'
'\n'
'The Lydian Mode can be described as an \'almost\' Major scale that has a raised 4th. Which is\n'
'fitting considering that Lydian is the 4th mode. So just remember \'Lydian, 4th mode: Raised\n'
'4th.\n'
'\n'
'The following are the Lydian degrees and semitones:\n'
' (1) (1)\n'
'Degrees: 1 2 3 \u266F4 5 6 7 8 ==== 8 7 6 5 \u266F4 3 2 1\n'
'\n'
'Semitones: 2 2 2 1 2 2 1 ==== 1 2 2 1 2 2 2\n'
'\n'
'This is 1 of the 3 \'Major\' scale modes and is often considered THE happiest of the modes\n'
'because the 4th degree gets augmented into a regular 5th Major degree, which gives it a sense\n'
'of urgency and kind of a whimsical vibe.\n\n')
lydian_mode_contents = namedtuple('lydian_mode_stuff', ('First', 'Second', 'Third', 'SharpFourth', 'Fifth',
'Sixth', 'Seventh'))
lydian_mode_stuff = lydian_mode_contents(2, 2, 2, 1, 2, 2, 1)
# Mixolydian
mixolydian_degrees = ('1', '2', '3', '4', '5', '6', '\u266D7')
mixolydian_semitones = (2, 2, 1, 2, 2, 1, 2)
mixolydian_mode_prompt = ('Mixolydian Mode:\n'
'\n'
'It\'s Major-sounding like Lydian, except it has a lowered 7th. In fact, it\'s literally\n'
'a Major scale but \u266D7. The flattened 7 gets rid of the dissonance that\'s often\n'
'created when using a normal 7th.\n'
'The flat 7 can give it a \'bluesy\' sound.\n'
'\n'
'The following are the Mixolydian degrees and semitones:\n'
' (1) (1)\n'
'Degrees: 1 2 3 4 5 6 \u266D7 8 ==== 8 \u266D7 6 5 4 3 2 1\n'
'\n'
'Semitones: 2 2 1 2 2 1 2 ==== 2 1 2 2 1 2 2\n'
'\n'
'This is 1 of the 3 \'Major\' scale modes and is often considered the 3rd happiest mode\n'
'because of that smoothed out 7th degree.\n\n')
mixolydian_mode_contents = namedtuple('mixolydian_mode_stuff', ('First', 'Second', 'Third', 'Fourth', 'Fifth', 'Sixth',
'FlatSeventh'))
mixolydian_mode_stuff = mixolydian_mode_contents(2, 2, 1, 2, 2, 1, 2)
# Aeolian
aeolian_degrees = ('1', '2', '\u266D3', '4', '5', '\u266D6', '\u266D7')
aeolian_semitones = (2, 1, 2, 2, 1, 2, 2)
aeolian_mode_prompt = ('Aeolian Mode:\n'
'\n'
'Aeolian is a pretty swell fella, it\'s actually the exact notes you play when you\'re\n'
'switching to the relative minor scale of the Major scale you\'re working with.\n'
'\n'
'The following are the Aeolian degrees and semitones:\n'
' (1) (1)\n'
'Degrees: 1 2 \u266D3 4 5 \u266D6 \u266D7 8 ==== 8 \u266D7 \u266D6 5 4 \u266D3 2 1\n'
'\n'
'Semitones: 2 1 2 2 1 2 2 ==== 2 2 1 2 2 1 2\n'
'\n'
'This is 1 of the 4 \'minor\' modes and is identical to Natural Minor. Aeolian can be\n'
'thought of as the 2nd happiest of the \'minor\' modes (similar to how Ionian is the 2nd\n'
'happiest of the \'Major\' modes).\n\n')
aeolian_mode_contents = namedtuple('aeolian_mode_stuff', ('First', 'Second', 'FlatThird', 'Fourth', 'Fifth',
'FlatSixth', 'FlatSeventh'))
aeolian_mode_stuff = aeolian_mode_contents(2, 1, 2, 2, 1, 2, 2)
# Locrian
locrian_degrees = ('1', '\u266D2', '\u266D3', '4', '\u266D5', '\u266D6', '\u266D7')
locrian_semitones = (1, 2, 2, 1, 2, 2, 2)
locrian_mode_prompt = ('Locrian Mode:\n'
'\n'
'Locrian is wack. Don\'t use Locrian. It\'s the only mode with a diminished 5th.\n'
'\n'
'The following are the Locrian degrees and semitones:\n'
' (1)\n'
'Degrees: 1 \u266D2 \u266D3 4 \u266D5 \u266D6 \u266D7 8\n'
'\n'
'Semitones: 1 2 2 1 2 2 2\n'
'\n'
'The darkest mode. 1 of the 4 \'minor\' modes. The relationship from the root to the now\n'
'diminished fifth makes it not at all ideal to use in tonal music, since the fifth should stay\n'
'natural or \'perfect\' for the use of a strong cadence. A flattened fifth already completely\n'
'removes the strongest possible cadence.\n\n')
locrian_mode_contents = namedtuple('locrian_mode_stuff', ('First', 'FlatSecond', 'FlatThird', 'Fourth', 'FlatFifth',
'FlatSixth', 'FlatSeventh'))
locrian_mode_stuff = locrian_mode_contents(1, 2, 2, 1, 2, 2, 2) | /rizzless_guitar_guide-1.0.1a0-py3-none-any.whl/rizzless_guitar_guide/modestuff.py | 0.46223 | 0.292576 | modestuff.py | pypi |
from collections import namedtuple
scales_prompt = ('Scale options:\n'
' (Major)\n'
' (Major Pentatonic)\n'
' (Minor)\n'
' (Melodic Minor)\n'
' (Harmonic Minor)\n'
' (Minor Pentatonic)\n'
'\n'
'Enter scale: ')
scale_list = ['Major', 'Major Pentatonic', 'Minor', 'Melodic Minor', 'Harmonic Minor', 'Minor Pentatonic']
# Major Scale
major_steps = ('Whole', 'Whole', 'Half', 'Whole', 'Whole', 'Whole', 'Half')
simple_major_steps = ('W', 'W', 'H', 'W', 'W', 'W', 'H')
major_degrees = ('1', '2', '3', '4', '5', '6', '7')
major_scale_num = 7
major_semitones = (2, 2, 1, 2, 2, 2, 1)
major_scale_prompt = ('The Major Scale:\n'
'\n'
'The Major Scale is the most important scale within music theory.\n'
'\n'
'The formula for the Major Scale is as follows:\n'
'Whole, Whole, Half, Whole, Whole, Whole, Half\n'
'or for simplicity\'s sake,\n'
'W, W, H, W, W, W, H\n'
'\n'
'The scale degrees of a major scale are pretty straightforward:\n'
'1, 2, 3, 4, 5, 6, 7\n'
'\n'
'The semitones BETWEEN each scale degree, which can be thought of as the amount of frets\n'
'shifted to or from along the fretboard of a guitar, are equal in value to steps:\n'
'2, 2, 1, 2, 2, 2, 1\n'
'\n'
'The number of notes within the major scale is 7 which is pretty standard for common scales.\n'
'One way to think of it is every note counted ascending/descending from a root up until\n'
'reaching the octave is 12, which ideally would be 14, but because B/C and E/F don\'t have\n'
'any notes in between them, 2 is subtracted from the 14 (1 for B/C, 1 for E/F), thus leaving\n'
'12. 7 just so happens to be exactly half of 14, which is good ideology for chord building\n'
'(more on that elsewhere).\n'
'\n'
'Ascending and descending using the Major Scale looks like this:\n'
' (1) (1)\n'
'Scale Degrees: 1 2 3 4 5 6 7 8 ==== 8 7 6 5 4 3 2 1\n'
'\n'
'Semitones: 2 2 1 2 2 2 1 ==== 1 2 2 2 1 2 2\n'
'\n'
'This is the Major Scale pattern on guitar:\n'
'\n'
'E|--7|--1|---|--2|\n'
' | | | | |\n'
'B|---|--5|---|--6|\n'
' | | | | |\n'
'G|--2|---|--3|--4|\n'
' | | | | |\n'
'D|--6|---|--7|--1|\n'
' | | | | |\n'
'A|--3|--4|---|--5|\n'
' | | | | |\n'
'E|---|--1|---|--2|\n\n')
major_scale_contents = namedtuple('major_scale_stuff', ('First', 'Second', 'Third', 'Fourth', 'Fifth', 'Sixth',
'Seventh'))
major_scale_stuff = major_scale_contents(2, 2, 1, 2, 2, 2, 1)
# Major Pentatonic
major_penta_steps = ('Whole', 'Whole', 'Whole + Half', 'Whole', 'Whole + Half')
simple_major_penta_steps = ('W', 'W', 'W+H', 'W', 'W+H')
major_penta_degrees = ('1', '2', '3', '5', '6')
major_penta_scale_num = 5
major_penta_semitones = (2, 2, 3, 2, 3)
major_penta_scale_prompt = ('The Major Pentatonic Scale:\n'
'\n'
'The Major Pentatonic Scale can be thought of as the Major Scale, but without the\n'
'4th and 7th scale degrees of the Major Scale.\n'
'Your basic scale typically has 7 scale degrees. However, with this scale being\n'
'\'penta\' tonic, there are only 5 scale degrees.\n'
'\n'
'The formula for the Major Pentatonic Scale is as follows:\n'
'Whole, Whole, Whole + Half, Whole, Whole + Half\n'
'or for simplicity\'s sake,\n'
'W, W, W+H, W, W+H\n'
'\n'
'The scale degrees of the Major Pentatonic Scale only differentiate from the Major\n'
'Scale by removing the 4th and 7th scale degrees:\n'
'1, 2, 3, 5, 6\n'
'\n'
'The semitones between each scale degree:\n'
'2, 2, 3, 2, 3\n'
'\n'
'I don\'t know a whole lot about the Major Pentatonic Scale, but from what I do, the 5\n'
'remaining scale degrees are the more likeable ones, the ones that most people tend to\n'
'use when creating music. The fourth scale degree within a normal scale can be used to\n'
'create a Plagal Cadence when resolving to the root in a chord progression. The fifth\n'
'scale degree within a normal scale can be used to create a Perfect Cadence instead,\n'
'which naturally has a stronger resolution than a Plagal Cadence because of that slight\n'
'difference in semitones (the distance is bigger). Because of that, it makes sense to\n'
'choose the \'Dominant\' cadence of the two, which could be why the fourth was removed\n'
'from the Major Pentatonic. The seventh scale degree is usually really bizarre to base\n'
'intervals off of, so I can see how it was also removed from the Major Pentatonic.\n'
'\n'
'Ascending and descending using the Major Pentatonic Scale looks like this:\n'
' (1) (1)\n'
'Scale Degrees: 1 2 3 5 6 8 ==== 8 6 5 3 2 1\n'
'\n'
'Semitones: 2 2 3 2 3 3 2 3 2 2\n'
'\n'
'This is the Major Pentatonic Scale pattern on guitar:\n'
'\n'
'E|---|--1|---|--2|\n'
' | | | | |\n'
'B|---|--5|---|--6|\n'
' | | | | |\n'
'G|--2|---|--3|---|\n'
' | | | | |\n'
'D|--6|---|---|--1|\n'
' | | | | |\n'
'A|--3|---|---|--5|\n'
' | | | | |\n'
'E|---|--1|---|--2|\n\n')
major_penta_scale_contents = namedtuple('major_penta_scale_stuff', ('First', 'Second', 'Third', 'Fifth', 'Sixth'))
major_penta_scale_stuff = major_penta_scale_contents(2, 2, 3, 2, 3)
# Minor Scale (Natural Minor)
minor_steps = ('Whole', 'Half', 'Whole', 'Whole', 'Half', 'Whole', 'Whole')
simple_minor_steps = ('W', 'H', 'W', 'W', 'H', 'W', 'W')
minor_degrees = ('1', '2', '\u266D3', '4', '5', '\u266D6', '\u266D7')
minor_scale_num = 7
minor_semitones = (2, 1, 2, 2, 1, 2, 2)
minor_scale_prompt = ('The Natural Minor Scale:\n'
'\n'
'The Natural Minor Scale is the basis of all minor scales.\n'
'The Minor scales have a sadder vibe to them as opposed to Major scales.\n'
'If you\'re wanting to create music that\'s a bit more dramatic and sharp, you might would\n'
'be inclined to use a scale that\'s Minor based. The main difference between Major and Minor is\n'
'that in Major, the distance from the root to the 3rd is a Major interval, whereas with Minor,\n'
'the distance from the root to the 3rd is a Minor interval. The root note in Natural Minor, for\n'
'lack of a better word, \'naturally\' creates a Minor triad when using the 1,3,5 degree formula\n'
'because of the flattened third.\n'
'\n'
'The formula for the Natural Minor Scale is as follows:\n'
'Whole, Half, Whole, Whole, Half, Whole, Whole\n'
'or for simplicity\'s sake,\n'
'W, H, W, W, H, W, W\n'
'\n'
'The Natural Minor Scale is just like the Major Scale degrees, but with a flattened third,\n'
'sixth, and seventh:\n'
'1, 2, \u266D3, 4, 5, \u266D6, \u266D7\n'
'\n'
'The semitones between each scale degree:\n'
'2, 1, 2, 2, 1, 2, 2\n'
'\n'
'The 1, 4, 5 degrees of a Major or Minor scale are pretty significant in that they all are\n'
'considered \'Perfect\' intervals. By default, they\'re almost always natural notes, meaning\n'
'they are never sharp or flat. There\'s an exception to that of course, but with that in\n'
'mind, almost every other scale degree in Natural Minor gets flattened (except for the second\n'
'degree, which is also known as the \'Supertonic\'). These changes give the Minor scales their\n'
'\'sad\' vibe.\n'
'\n'
'Ascending and descending using the Natural Minor Scale looks like this:\n'
' (1) (1)\n'
'Scale Degrees: 1 2 \u266D3 4 5 \u266D6 \u266D7 8 ==== 8 \u266D7 \u266D6 5 4 \u266D3 2 1\n'
'\n'
'Semitones: 2 1 2 2 1 2 2 ==== 2 2 1 2 2 1 2\n'
'\n'
'This is the Natural Minor Scale pattern on guitar:\n'
'\n'
'E|---|--1|---|--2|-\u266D3|\n'
' | | | | | |\n'
'B|---|--5|-\u266D6|---|-\u266D7|\n'
' | | | | | |\n'
'G|--2|-\u266D3|---|--4|---|\n'
' | | | | | |\n'
'D|---|-\u266D7|---|--1|---|\n'
' | | | | | |\n'
'A|---|--4|---|--5|-\u266D6|\n'
' | | | | | |\n'
'E|---|--1|---|--2|-\u266D3|\n\n')
minor_scale_contents = namedtuple('minor_scale_stuff', ('First', 'Second', 'FlatThird', 'Fourth', 'Fifth', 'FlatSixth',
'FlatSeventh'))
minor_scale_stuff = minor_scale_contents(2, 1, 2, 2, 1, 2, 2)
# Melodic Minor Scale
melminor_steps = ('Whole', 'Half', 'Whole', 'Whole', 'Whole', 'Whole', 'Half')
simple_melminor_steps = ('W', 'H', 'W', 'W', 'W', 'W', 'H')
melminor_degrees = ('1', '2', '\u266D3', '4', '5', '6', '7')
melminor_scale_num = 7
melminor_semitones = (2, 1, 2, 2, 2, 2, 1)
melminor_scale_prompt = ('The Melodic Minor Scale:\n'
'\n'
'The Melodic Minor Scale is an interesting scale for sure because typically when musicians\n'
"use it, they use it in more ways than one. You may ask \"Well doesn't every musician use\n"
"scales in more ways than one anyway?\", and to that I say...\"Well, yeah...\"\n"
'BUT, other scales typically use the same notes and scale degrees when both ascending\n'
'AND descending... except for the Melodic Minor Scale... well kinda. Like I said,\n'
"it's an interesting scale. You can choose to descend and ascend it using the same\n"
'notes/scale degrees, OR you can choose to revert back to the Natural Minor Scale\n'
'formula when descending. Either way, the formula for the Melodic Minor Scale is\n'
'as follows:\n'
'Whole, Half, Whole, Whole, Whole, Whole, Half\n'
"or for simplicity's sake,\n"
'W, H, W, W, W, W, H\n'
'\n'
'For Melodic Minor, the only scale degree that gets altered is the third:\n'
'1, 2, \u266D3, 4, 5, 6, 7\n'
'\n'
'The semitones between each scale degree:\n'
'2, 1, 2, 2, 2, 2, 1\n'
'\n'
'Idk man, somethin\' \'bout keeping everything but the third unflat when ascending makes it\n'
'melodically very nice\n'
'\n'
'Ascending and descending using the Melodic Minor Scale can look like this:\n'
' (1) (1)\n'
'Scale Degrees: 1 2 \u266D3 4 5 6 7 8 ==== 8 \u266D7 \u266D6 5 4 \u266D3 2 1\n'
'\n'
'Semitones: 2 1 2 2 2 2 1 ==== 1 2 2 2 2 1 2\n'
'\n'
'Or this:'
'\n'
'Scale Degrees: 1 2 \u266D3 4 5 6 7 8 ==== 8 7 6 5 4 \u266D3 2 1\n'
'\n'
'Semitones: 2 1 2 2 2 2 1 ==== 1 2 2 2 2 1 2\n'
'\n'
'This is the Melodic Minor Scale pattern on guitar:\n'
'\n'
'E|---|---|--2|-\u266D3|---|--4|\n'
' | | | | | | |\n'
'B|---|---|--6|---|--7|--1|\n'
' | | | | | | |\n'
'G|-\u266D3|---|--4|---|--5|---|\n'
' | | | | | | |\n'
'D|---|--7|--1|---|--2|---|\n'
' | | | | | | |\n'
'A|--4|---|--5|---|--6|---|\n'
' | | | | | | |\n'
'E|--1|---|--2|-\u266D3|---|---|\n\n')
melminor_scale_contents = namedtuple('melminor_scale_stuff', ('First', 'Second', 'FlatThird', 'Fourth', 'Fifth',
'Sixth', 'Seventh'))
melminor_scale_stuff = melminor_scale_contents(2, 1, 2, 2, 2, 2, 1)
# Harmonic Minor Scale
harminor_steps = ('Whole', 'Half', 'Whole', 'Whole', 'Half', 'Whole + Half', 'Half')
simple_harminor_steps = ('W', 'H', 'W', 'W', 'H', 'W+H', 'H')
harminor_degrees = ('1', '2', '\u266D3', '4', '5', '\u266D6', '7')
harminor_scale_num = 7
harminor_semitones = (2, 1, 2, 2, 1, 3, 1)
harminor_scale_prompt = ('The Harmonic Minor Scale:\n'
'\n'
'Don\'t really have much to say about this scale except it sounds super cool and it\'s used\n'
'in the alternate picking part of the sweep/alternate picking solo in Machine by Born of\n'
'Osiris (Descending, to be specific).\n'
'\n'
'The formula for the Harmonic Minor Scale is as follows:\n'
'Whole, Half, Whole, Whole, Half, Whole + Half, Half\n'
"or for simplicity's sake,\n"
'W, H, W, W, H, W+H, H\n'
'\n'
'The scale degrees for Harmonic Minor only have the third and the sixth altered:\n'
'1, 2, \u266D3, 4, 5, \u266D6, 7\n'
'\n'
'The semitones between each scale degree:\n'
'2, 1, 2, 2, 1, 3, 1'
'\n'
'Harmonic Minor can be thought of as like halfway between Melodic and Natural Minor. It\'s\n'
'not as flat as Natural Minor, and it\'s not as un-flat as Melodic Minor.\n'
'\n'
'Ascending and descending using the Harmonic Minor Scale looks like this:\n'
' (1) (1)\n'
'Scale Degrees: 1 2 \u266D3 4 5 \u266D6 7 8 ==== 8 7 \u266D6 5 4 \u266D3 2 1\n'
'\n'
'Semitones: 2 1 2 2 1 3 1 1 3 1 2 2 1 2\n\n'
'\n'
'This is the Harmonic Minor Scale pattern on guitar:\n'
'\n'
'E|---|---|--2|-\u266D3|---|--4|\n'
' | | | | | | |\n'
'B|---|-\u266D6|---|---|--7|--1|\n'
' | | | | | | |\n'
'G|-\u266D3|---|--4|---|--5|---|\n'
' | | | | | | |\n'
'D|---|--7|--1|---|--2|---|\n'
' | | | | | | |\n'
'A|--4|---|--5|-\u266D6|---|---|\n'
' | | | | | | |\n'
'E|--1|---|--2|-\u266D3|---|---|\n\n')
harminor_scale_contents = namedtuple('harminor_scale_stuff', ('First', 'Second', 'FlatThird', 'Fourth', 'Fifth',
'FlatSixth', 'Seventh'))
harminor_scale_stuff = harminor_scale_contents(2, 1, 2, 2, 1, 3, 1)
# Minor Pentatonic Scale
minor_penta_steps = ('Whole + Half', 'Whole', 'Whole', 'Whole + Half', 'Whole')
simple_minor_penta_steps = ('W+H', 'W', 'W', 'W+H', 'W')
minor_penta_degrees = ('1', '\u266D3', '4', '5', '\u266D7')
minor_penta_scale_num = 5
minor_penta_semitones = (3, 2, 2, 3, 2)
minor_penta_scale_prompt = ('The Minor Pentatonic Scale:\n'
'\n'
'Similar to how the Major Pentatonic removes two scale degrees, the Minor Pentatonic\n'
"removes the 2nd and 6th scale degrees, leaving 5 notes, or 'penta' 'tonics'.\n"
'\n'
'The formula for the Minor Pentatonic Scale is as follows:\n'
'Whole + Half, Whole, Whole, Whole + Half, Whole\n'
"or for simplicity's sake,\n"
'W+H, W, W, W+H, W\n'
'\n'
'The scale degrees for the Minor Pentatonic Scale follow the scale degrees from Natural\n'
'Minor, but remove the 2nd and 6th scale degrees:\n'
'1, \u266D3, 4, 5, \u266D7'
'\n'
'The semitones between each scale degree:\n'
'3, 2, 2, 3, 2\n'
'\n'
'Minor Pentatonic is a chill little guitar warmup when you got nothin\' else in mind to\n'
'play.\n'
'\n'
'Ascending and descending using the Minor Pentatonic Scale looks like this:\n'
' (1) (1)\n'
'Scale Degrees: 1 \u266D3 4 5 \u266D7 8 ==== 8 \u266D7 5 4 \u266D3 1'
'\n'
'Semitones: 3 2 2 3 2 2 3 2 2 3\n'
'\n'
'This is the Minor Pentatonic Scale pattern on guitar:\n'
'\n'
'E|--1|---|---|-\u266D3|\n'
' | | | | |\n'
'B|--5|---|---|-\u266D7|\n'
' | | | | |\n'
'G|-\u266D3|---|--4|---|\n'
' | | | | |\n'
'D|-\u266D7|---|--1|---|\n'
' | | | | |\n'
'A|--4|---|--5|---|\n'
' | | | | |\n'
'E|--1|---|---|-\u266D3|\n\n')
minor_penta_scale_contents = namedtuple('minor_penta_scale_stuff', ('First', 'FlatThird', 'Fourth', 'Fifth',
'FlatSeventh'))
minor_penta_scale_stuff = minor_penta_scale_contents(3, 2, 2, 3, 2) | /rizzless_guitar_guide-1.0.1a0-py3-none-any.whl/rizzless_guitar_guide/scalestuff.py | 0.704058 | 0.286281 | scalestuff.py | pypi |
interval_menu = ('Interval options:\n'
' (Interval Basics) Lists interval basics.\n'
' (Chord Basics) Shows the basics of how chords are created.\n'
' (Chord Progressions) Shows how to build progressions and shows\n'
'some popular chord progressions.\n'
' (Arpeggio Basics) Displays basic arpeggio stuff.\n'
'\n'
'Enter choice: ')
interval_topics = ['Interval Basics', 'Chord Basics', 'Chord Progressions', 'Arpeggio Basics']
# Interval Basics
interval_basics = ('Interval Basics\n'
'\n'
'Below is a list of interval names, as well as their respective amount of\n'
'semitones from the root note:\n'
'\n'
'Perfect Unison: 0\n'
'Minor Second: 1\n'
'Major Second: 2\n'
'Minor Third: 3\n'
'Major Third: 4\n'
'Perfect Fourth: 5\n'
'Augmented Fourth/Diminished Fifth: 6\n'
'Perfect Fifth: 7\n'
'Minor Sixth: 8\n'
'Major Sixth: 9\n'
'Minor Seventh: 10\n'
'Major Seventh: 11\n'
'Octave: 12\n'
'Minor Ninth: 13\n'
'Major Ninth: 14\n'
'Minor Tenth: 15\n'
'Major Tenth: 16\n'
'Perfect Eleventh: 17\n'
'Augmented Eleventh/Diminished Twelfth: 18\n'
'Perfect Twelfth: 19\n'
'Minor Thirteenth: 20\n'
'Major Thirteenth: 21\n'
'Minor Fourteenth: 22\n'
'Major Fourteenth: 23\n'
'\n'
'Unison, Fourth, Fifth, and Octave are all considered perfect intervals, and the others after that\n'
'are essentially just stacked on top of the Octave\n\n')
# Chord Basics
chord_basics = ('Chord Basics:\n'
'\n'
'A \'chord\' is the result of playing 3 (or more) notes at once. Technically any 3 notes can\n'
'create a chord, but they won\'t necessarily sound good or make any sense when notating/composing\n'
'music. The creation of a chord with 3 notes, also known as a \'triad\', is achieved when you take\n'
'the first, third, and fifth scale degrees and play those notes at once. For Major, the third is\n'
'a normal, or \'natural\' third, but for Minor, the third gets flattened so instead of having\n'
'a 1, 3, 5 foundation, you have 1, \u266D3, 5. For tonal music, the third scale degree makes a big\n'
'difference in the way a chord sounds, and this is at a low-level form of chord-building, it only\n'
'gets more complicated when you start implementing other intervals. But in general, you got Major and\n'
'Minor chords that are created and built upon for whatever purpose is needed for the music.\n'
'\n'
'The primary chords of a scale are the 1, 4, and 5 scale degrees made into simple major chords.\n'
'When it comes to frequencies, all of these are considered \'perfect\' solely because of the ratio\n'
'they create when measuring the scale degree in comparison to all of the intervals. You don\'t\n'
'necessarily need to know why or what makes those scale degrees perfect, but it IS important to know\n'
'that they are like that. The 7th degree is naturally a diminished chord, and all the others are made\n'
'into Minor chords.\n\n')
# Chord Progressions
chord_progs = ('Chord Progressions:\n'
'\n'
'The list below provides some common chord progressions to help with practicing improv or to serve\n'
'as a basis for creating/composing some ideas:\n'
'\n'
'I-V-vi-IV\n'
'I-IV-V-IV\n'
'I-IV-V\n'
'I-IV-V-V\n'
'I-vi-IV-V\n'
'I-ii-vi-V\n'
'I-vi-ii-V\n'
'I-iii-vi-V\n'
'IV-V-IV\n'
'I-iii-IV-vi\n'
'I-IV-I-V\n'
'I-I-IV-vi\n\n')
# Arpeggio Basics
arpeggio_basics = ('Arpeggio Basics:\n'
'\n'
'An arpeggio is just a chord, but each note rings out separate from each other instead of all\n'
'at once like in a regular chord. They are sometimes referred to as broken chords. In this version\n'
'of the program, no arpeggio tabs will be provided, but just know that a regular arpeggio follows\n'
'the 1,3,5 formula like a basic chord. All you gotta do to play a simple Major/Minor arpeggio is\n'
'play the corresponding scale degrees for whatever Major/Minor scale you\'re playing (1,3,5 for\n'
'Major; 1,\u266D3,5 for Minor).\n\n')
# MORE INTERVALS
more_intervals_menu = ('More interval options:\n'
' (Circles) Shows Circle of Fifths/Fourths.\n'
' (Orders) Shows Order of Sharps/Flats.\n'
'\n'
'Enter choice: ')
more_intervals = ['Circles', 'Orders']
circles_prompt = ('Circle of Fifths/Fourths\n'
'\n'
'The Circle of Fifths/Fourths is a very convenient way to navigate through different keys and such.\n'
'When practicing guitar patterns, a good way to get more comfortable with playing things in\n'
'different spots along the fretboard is by circling around the circle to get to the next root note\n'
'of a key.\n'
'The circle typically has C at the 12 o\'clock position because it has no sharps or flats, and then\n'
'goes around the circle like a clock to get to the next key that increments the number of sharps or\n'
'flats by one. If you go clockwise on the circle, the \'fifth\' degree of C is the next key in the\n'
'sequence, which is G. Taking a closer look at the key of G will reveal it has 1 sharp note within\n'
'the scale. On the contrary, if you were to go counter-clockwise on the circle, the next note would\n'
'be F. F is the \'fourth\' scale degree of C so going backwards within the circle is now a fourth,\n'
'instead of a fixth. That\'s the only difference between the two circles, the direction in which it\n'
'goes. Anyway, when going counter-clockwise to F, the numbers of flats get incremented.\n'
'Forwards is sharps, backwards is flats. Visually, the flats are on the left side of the circle;\n'
'the sharps are on the right.\n'
'\n'
'The notes going clockwise are:\n'
'C, G, D, A, E, B, F\u266F, D\u266D, A\u266D, E\u266D, B\u266D, F\n'
'\n'
'The notes counter-clockwise are:\n'
'C, F, B\u266D, E\u266D, A\u266D, D\u266D, F\u266F, B, E, A, D, G\n\n')
orders_prompt = ('Order of Sharps/Flats\n'
'\n'
'The Order of Sharps and Flats is similar to how the Circle of Fifths goes clockwise and counter-\n'
'clockwise. A simple way to find the number of sharps/flats of a key without having to run through\n'
'the circle is by using a little system that starts on F of the circle and goes clockwise for\n'
'the order of sharps. On paper, it goes F, C, G, D, A, E, B. A mnemonic device for this is Fat Cats\n'
'Go Down Alleys Eating Birds. Starting from C on the list, you would number each note incrementing\n'
'by 1 starting from 0, which is C because it has no sharps or flats. Once you loop around to the\n'
'beginning, you would just continue numbering \'til you hit 7 (the amount of notes within a scale).\n'
'The 7 would fall on C again, but when looping around, you also assign a sharp to the note of the\n'
'sequence, which for 6 and 7 is F and C. The F now becomes F\u266F having 6 sharps. C\u266F has 7,\n'
'or \'all\' notes sharpened. Now when you\'re looking at the note\'s number, you just include that\n'
'amount of characters when reading the sequence. C has none, so you would move on to the next\n'
'note in the sequence, which is G. Because G\'s number is 1, you read only 1 character starting from\n'
'the beginning of the sequence, which is F. The key of G has 1 sharp, and that sharp lands on F.\n'
'Please note that these exact examples only work for major keys, although could probably work in\n'
'minor with some tweaks.\n'
' (F\u266F)(C\u266F)\n'
'Order of Sharps: F C G D A E B\n'
' 6 0 1 2 3 4 5\n'
' (7)\n'
'Now for the Order of Flats, the sequence is reversed. but read the same.\n'
' \u266D \u266D \u266D \u266D \u266D\n'
'Order of Flats: B E A D G C F\n'
' 2 3 4 5 6 0 1\n'
' (7)\n'
'\n'
'F Major has 1 flat, which is B\u266D, B\u266D Major has 2, which are B\u266D and E\u266D, etc.\n\n') | /rizzless_guitar_guide-1.0.1a0-py3-none-any.whl/rizzless_guitar_guide/intervalstuff.py | 0.537284 | 0.301955 | intervalstuff.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rj_distributions-0.0.1.tar.gz/rj_distributions-0.0.1/rj_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
Part of a subless definition\"""
def _arg_name(self, p):
p.add_argument(
'--name',
type=str,
help="Name of person to greet",
default=self.DEFAULT_NAME)
)
The method is called for its side-effect on the parser object passed in; the return
value is ignored.
To use the above, you'd write something like this::
class HelloCommand(MyCommand):
arguments = 'name'
def run(self, args):
print(f"Hello, {args.name}")
Parser building methods are free to add as many arguments as they need, and even to add
subparsers and other exotic things. They can of course also call other parser
building methods.
Each of your :class:`~rjgtoys.cli.Command` subclasses should define its
own :attr:`arguments` attribute (or property) that lists the parser builders
that need to be called.
This will work best if you create an intermediate class, e.g. ``MyCommand``, that
defines a `library` of ``_arg_`` methods for your ``MyCommand`` subclasses to
choose from.
You are of course free to override :meth:`~rjgtoys.cli.Command.add_arguments` in
your subclasses, and do parser building some other way.
The YAML tool specification language
====================================
The YAML used to describe a :class:`~rjgtoys.cli.Tool` using
:meth:`~rjgtoys.cli.Tool.from_yaml` basically defines a mapping
from command phrases to Python classes.
The Python classes are named using the dotted name that you'd use in a module
that had imported the module that provides the class; ``top.inner.CommandClass``
refers to a class ``CommandClass`` in the module (or subpackage) ``inner`` within
the package ``top``.
Instead of a command phrase, a 'special keyword' can be used. The keywords
all start with underscore, ``_``, because nobody in their right mind (or me) would
ever create a command language in which command names started with underscore.
Currently the only 'special keyword' that is used is ``_package``; this sets a package
'prefix' for all commands; the prefix is prepended to the class paths specified
for the commands, allowing them to be shorter, and avoiding
repetition.
So the following two specification are equivalent::
# Example 1: no _package
say hello: rjgtoys.cli.examples.hello.HelloCommand
wave goodbye: rjgtoys.cli.examples.goodbye.GoodbyeCommand
# Example 2: with _package
_package: rjgtoys.cli.examples
say hello: hello.HelloCommand
wave goodbye: goodbye.GoodbyeCommand
In place of a patch to a command implementation, a mapping may be given, that
defines a 'sublanguage', prefixed by the phrase that labels it.
This can save on repetition of prefixes for commands. The following two
specifications are equivalent::
# Example 1: flat language
_package: discovery
podbay doors open: podbay.doors.OpenCommand
podbay doors close: podbay.door.CloseCommand
podbay hatch open: podbay.hatch.OpenCommand
podbay hatch close: podbay.hatch.CloseCommand
# Example 2: nested languages
_package: discovery
podbay:
_package: podbay
doors:
_package: doors
open: OpenCommand
close: CloseCommand
hatch:
_package: hatch
open: OpenCommand
close: CloseCommand
The 'nested' version takes more lines, but there is much less repetition of
'navigational' detail.
Note that sublanguages can be nested as deep as necessary.
The nested structure can be combined with YAML file inclusion, so that the
language definition itself can be modularised::
# File doors.yaml
_package: discovery.podbay.doors
open: OpenCommand
close: CloseCommand
# File hatch.yaml
_package: discovery.podbay.hatch
open: OpenCommand
close: CloseCommand
# File podbay.yaml
podbay:
doors: !include doors.yaml
hatch: !include hatch.yaml
The ``!include`` directive is implemented by rjgtoys.yaml_.
.. _rjgtoys.yaml: https://rjgtoys.readthedocs.io/projects/yaml/en/latest/
"""
import sys
import argparse
from collections import defaultdict
import importlib
from rjgtoys.yaml import yaml_load, yaml_load_path
__all__ = (
'Command', 'Tool',
'CommaList',
'add_to_set',
'SpecificationError',
'HelpNeeded'
)
class SpecificationError(Exception):
"""Raised if a YAML tool specification is ambiguous.
A specification is ambiguous if a single phrase is associated
with more than one Python class.
It is not an error for a single phrase to be mentioned more than
once for the *same* Python class.
"""
def __init__(self, errors):
errs = []
for (phrase, impls) in errors:
impls = ",".join(impls)
errs.append(f"'{phrase}' implemented by [{impls}]")
super().__init__(f"Specification error (ambiguous): {errs}")
class HelpNeeded(Exception):
"""Raised by a :class:`Command` to cause help to be printed.
If :meth:`Command.main` sees a :exc:`HelpNeeded` exception it
prints the exception to standard output and exits.
This is intended to allow command implementations to deliver
helpful messages to their users without having to contain
explicit print calls or similar.
"""
pass
class Command(object):
"""
This is the base class for command actions.
Each command subclass should override some of the
following attributes:
:py:attr:`description` (:class:`str`)
A one-line short description of what the command does.
:py:attr:`epilog` (:class:`str`)
A 'tail' for the help text of the command.
:py:attr:`usage` (:class:`str`)
A longer description of how to use the command.
:py:attr:`formatter_class` (`argparse formatter class`) = :class:`argparse.ArgumentDefaultsHelpFormatter`
The class to be used to format help for this command.
:py:attr:`arguments` (:class:`str or iterable`)
Either an iterable producing a sequence of parser-building method names, or
a string containing a comma-separated list of parser-building method names.
Methods (most likely overridden first):
.. automethod:: run
.. automethod:: add_arguments
.. automethod:: check_arguments
.. automethod:: handle_arguments
.. automethod:: build_parser
.. automethod:: parse_args
.. automethod:: main
"""
description = None
epilog = None
usage = None
formatter_class = argparse.ArgumentDefaultsHelpFormatter
arguments = ()
# Useful for suppressing defaults in parameters
SUPPRESS = argparse.SUPPRESS
def __init__(self, name=None):
self._name = name
def build_parser(self):
# Return an argument parser
p = argparse.ArgumentParser(
self._name,
description=self.description,
epilog=self.epilog,
usage=self.usage,
formatter_class=self.formatter_class
)
p.set_defaults(_action=self.run)
self.add_arguments(p)
return p
def add_arguments(self, p):
"""Add arguments to the parser for this command.
The default implementation uses the :py:attr:`arguments`
attribute to produce a list of 'argument factories' to
invoke.
"""
args = self.arguments
if isinstance(args, str):
args = args.split(',')
for argname in args:
argname = argname.strip()
if not argname:
continue
action = getattr(self, '_arg_'+argname)
action(p)
def check_arguments(self, args):
"""Check parsed arguments for validity.
Called by :meth:`main` once the arguments have been parsed, but before
:meth:`handle_arguments`.
Exceptions raised by this method will not be caught by :meth:`main`.
The default implementation does nothing.
"""
pass
def handle_arguments(self, args):
"""Process parsed arguments.
Called by :meth:`main` once the arguments have been checked by
:meth:`check_arguments`, and just before the main action method, :meth:`run`
is called.
If this method raises :exc:`HelpNeeded` the exception will be printed
(as help), but no other exceptions will be caught.
The default implementation does nothing.
"""
pass
def parse_args(self,argv=None):
"""Build a parser and parse the arguments.
Why is it structured this way? I don't know.
"""
p = self.build_parser()
args = p.parse_args(argv)
return args
def main(self, argv=None):
"""The main entrypoint for a :class:`Command`.
Parses the arguments, checks them, calls any handlers,
and then calls the main action.
Delivers help if needed.
"""
args = self.parse_args(argv)
self.check_arguments(args)
try:
self.handle_arguments(args)
return args._action(args) or 0
except HelpNeeded as help:
print(str(help))
return 0
def run(self, args):
"""This performs the command action, and should be overridden by subclasses.
By the time :meth:`run` is called, both :meth:`check_arguments` and
:meth:`handle_arguments` have been called and returned successfully.
Should return an integer status code of 0 for success, anything else
for failure.
A return of `None` is interpreted as 0.
"""
pass
class Tool(object):
# Command line tokens that cause help to be generated
HELP_TOKENS = ('help', '--help', '-h')
def __init__(self, commands):
"""The ``commands`` parameter is a list of ('phrase', 'classpath')
pairs that define the 'command language' of this tool. Each
acceptable phrase is written out in full, along with a dotted 'classpath'
that defines the Python class that will implement the command (and should
be a subclass of :class:`rjgtoys.cli.Command`)::
tool = Tool((
('make tea', 'home_essentials.MakeTeaCommand'),
('open podbay doors', 'discovery.hal.OpenPodbayDoors'),
('eject core', 'galaxy.emergencies.CoreEject')
))
"""
self.cmds = sorted((p.split(' '),p,c) for (p,c) in commands)
@classmethod
def from_yaml(cls, text=None, path=None):
"""Create a tool definition from some yaml."""
spec = cls.spec_from_yaml(text=text, path=path)
return cls(spec)
@classmethod
def spec_from_yaml(cls, text=None, path=None):
if None not in (text, path):
raise ValueError("Tool specification may be text or path, not both")
data = None
if path:
data = yaml_load_path(path)
elif text:
data = yaml_load(text)
if not data:
raise ValueError("Tool specification is missing")
# Reduce the spec to something usable by the constructor
"""
Example:
_package: path.to.package:
_title: Name of this group
_description: |
Longer description of this command group
# Other keys define commands by naming the class that implements each
word-or-phrase: name-of-class
# Or by defining subcommands, using a nested structure:
word-or-phrase:
_package: optional
_title: optional
_description: optional
word-or-phrase: module.suffix
"""
return cls.spec_from_dict(data)
@classmethod
def spec_from_dict(cls, data):
spec = list(cls._spec_from_dict(data))
return cls.validate_spec(spec)
@classmethod
def _spec_from_dict(cls, data):
yield from cls._parse_part('', tuple(), data)
@classmethod
def validate_spec(cls, spec):
errors = list(cls._spec_errors(spec))
if errors:
raise SpecificationError(errors)
return spec
@classmethod
def _spec_errors(cls, spec):
"""Generate a sequence of all errors found in a spec.
Identifies phrases that have more than one implementation,
i.e. are ambiguous.
"""
targets = defaultdict(set)
for (phrase, impl) in spec:
targets[phrase].add(impl)
for (phrase, impls) in targets.items():
# Is this phrase ambiguous?
if len(impls) > 1:
yield (phrase, impls)
@classmethod
def _parse_part(cls, namespace, tokens, data):
try:
# Don't leave a leading '.' on this.
namespace = (namespace + '.' + data._package).lstrip('.')
except AttributeError:
pass
for (phrase, body) in data.items():
if phrase.startswith('_'):
continue
tokens = tokens + (phrase,)
try:
if isinstance(body, str):
yield (' '.join(tokens), (namespace + '.' + body).lstrip('.'))
continue
assert isinstance(body, dict)
yield from cls._parse_part(namespace, tokens, body)
finally:
tokens = tokens[:-1]
def do_help(self,possible=None, heading=None):
if possible is None:
possible = self.cmds
print(heading or "Valid commands:")
w = max(len(p) for (_,p,_) in possible)
for (_,p,c) in possible:
try:
desc = resolve(c).description
except Exception as e:
raise
desc = "BUG: %s" % (e)
print(" %s - %s" % (p.ljust(w), desc))
def main(self, argv=None):
"""Parse command line tokens, run the command, return the result.
Parses the tokens in ``argv`` (or by default, in ``sys.argv[1:]``)
and takes appropriate action, either:
1. Run the command action (causing further process of the command line) or
2. Print help, if requested to do so (``help``, ``--help`` or ``-h``)
on the command line, before a command is recognised)
3. Print help about an unrecognised or incomplete command
"""
possible = self.cmds
prefix = []
argv = argv or sys.argv[1:]
tokens = iter(argv)
while len(possible):
if len(possible) == 1:
# Only one option: have we seen the entire phrase?
if possible[0][0] == prefix:
break
try:
t = next(tokens)
except:
return self.handle_incomplete(prefix, possible)
if t in self.HELP_TOKENS:
# do some help
self.do_help(possible)
return
prefix.append(t)
next_state = [(p,s,c) for (p,s,c) in possible if p[:len(prefix)] == prefix]
if not next_state:
return self.handle_unrecognised(prefix, possible)
possible = next_state
# print "Found command '%s'" % (' '.join(prefix))
cmdargv = argv[len(prefix):]
# print "Cmd args %s" % (cmdargv)
target = possible[0][2]
# print "Target %s" % (target)
target = resolve(target)
cmd = target(name=' '.join(prefix))
return cmd.main(cmdargv)
def handle_unrecognised(self, prefix, possible):
"""Handle an unrecognised command.
The default implementation prints some help.
"""
if prefix:
prefix = " ".join(prefix)
heading=f"Unrecognised command '{prefix}', valid options are:"
else:
heading = "Unrecognised command, valid options are:"
self.do_help(possible, heading=heading)
def handle_incomplete(self, prefix, possible):
"""Handle an incomplete command.
The default implementation prints some help.
"""
if prefix:
prefix = " ".join(prefix)
heading = f"Incomplete command '{prefix}', could be one of:"
else:
heading = "Incomplete command, could be one of:"
self.do_help(possible, heading=heading)
def resolve(name):
"""Convert a dotted module path to an object.
This is used to do the importing, when :class:`Tool` resolves
the path to a command implementation.
"""
if not isinstance(name,str):
return name
p = name.rfind('.')
if p > 0:
mod = name[:p]
cls = name[p+1:]
m = importlib.import_module(mod)
target = getattr(m,cls)
else:
target = globals()[name]
return target
class CommaList(argparse.Action):
"""An :class:`argparse.Action` that allows an option to be used to specify
multiple values, either as a comma-separated list, or by using the option
multiple times, or a combination of those.
"""
separator = ','
def __call__(self, parser, ns, value, option_string=None):
current = getattr(ns, self.dest) or []
value = self._split(value)
current.extend(value)
setattr(ns, self.dest, current)
def _split(self, value):
"""Separate the parts of value."""
value = [v.strip() for v in value.split(self.separator)]
value = [self._check(v) for v in value if v]
return value
def _check(self, value):
"""Check and if necessary convert the value to the desired type."""
return value
class add_to_set(argparse.Action):
"""An :class:`argparse.Action` that builds a set.
Use this as an ``action`` parameter to :meth:`~argparse.ArgumentParser.add_argument`
when you want to build a set from multiple uses of an option, instead of, for example,
a list (which you would do by passing ``action='append'``)
"""
def __call__(self, parser, namespace, values, option_string=None):
try:
v = getattr(namespace, self.dest)
except:
v = None
if v is None:
v = set()
setattr(namespace, self.dest, v)
if isinstance(values,(list,tuple)):
v.update(values)
else:
v.add(values)
class splitlist(object):
def __init__(self,itemtype=None):
self.itemtype = itemtype or str
def __call__(self,value):
r = []
for v in value.split(","):
v = v.strip()
if v:
r.append(self.itemtype(v))
return r | /rjgtoys_cli-0.0.3-py3-none-any.whl/rjgtoys/cli/_base.py | 0.787196 | 0.527377 | _base.py | pypi |
import os
from typing import List
from rjgtoys.xc import Error, Title
from rjgtoys.yaml import yaml_load_path
class ConfigSearchFailed(Error):
"""Raised when no configuration file could be found"""
paths: List[str] = Title('List of paths that were searched')
detail = "Configuration search failed, tried: {paths}"
class ConfigSource:
"""This is the base class for configuration sources, and is
basically just an interface definition.
It provides one method, :meth:`fetch` that should be
overridden by subclasses to deliver data from some source.
"""
# One day we'll need a pub-sub sort of interface
# so a source can notify that it's been updated.
# For now, this will do
def fetch(self):
"""Fetches the current data from the source."""
return {}
def resolve_noop(path):
"""The default 'resolve path' action; just returns the path it was given."""
return path
class YamlFileConfigSource(ConfigSource):
"""This :class:`ConfigSource` implementation reads a configuration from
a file containing YAML."""
def __init__(self, path, resolve=None):
"""
`path`
The path to the file to be read.
`resolve`
If not `None`, is a callable that will be passed
`path`, to allow it to be 'resolved' to an absolute pathname.
The library function :func:`os.path.expanduser` would be a possible
candidate.
"""
super().__init__()
self.path = path
self.resolve = resolve or resolve_noop
def fetch(self):
path = self.resolve(self.path)
data = yaml_load_path(path)
return data
class SearchPathConfigSource(ConfigSource):
"""Searches a number of places for a configuration file."""
DEFAULT_LOADER = YamlFileConfigSource
def __init__(self, *paths, resolve=None, loader=None):
"""
`paths`
A list of paths to be tried, in order.
`resolve`
If not `None`, a callable that will be passed each
path before it is tried, to allow it to 'resolve' the
path into an absolute path.
The library function :func:`os.path.expanduser` would be a possible
candidate.
`loader`
The :class:`ConfigSource` implementation to use to try to
load each possible path. Must be a class or callable that
can accept a single pathname parameter. The default
is ``self.DEFAULT_LOADER``, which is :class:`YamlFileConfigSource`.
"""
self.loader = loader or self.DEFAULT_LOADER
self.resolve = resolve or resolve_noop
self.paths = [p for p in paths if p]
def fetch(self):
"""Search for a readable file and return the data from it."""
tries = []
for p in self.paths:
p = self.resolve(p)
tries.append(p)
if not os.path.exists(p):
# print("SearchPathConfigSource did not find %s" % (p))
continue
# print("SearchPathConfigSource using %s" % (p))
return self.loader(p).fetch()
raise ConfigSearchFailed(paths=tries) | /rjgtoys_config-0.0.2-py3-none-any.whl/rjgtoys/config/_source.py | 0.802865 | 0.365145 | _source.py | pypi |
import collections.abc
from rjgtoys.thing import Thing
from copy import deepcopy
def config_normalise(raw):
"""Normalise a config object to make it easier to process later.
Ensure it has both 'defaults' and '__view__' entries, that
'defaults' is a single map, and '__view__' represents a merge
of any 'local' '__view__' with that of the 'defaults'.
"""
result = Thing(raw)
defaults = normalise_defaults(raw)
result.defaults = defaults
view = deepcopy(defaults.get('__view__', {}))
local_view = raw.get('__view__')
if local_view:
config_merge(local_view, view)
result.__view__ = view
return result
def normalise_defaults(raw):
try:
defaults = raw.defaults
except AttributeError:
return {}
# If only a single set of defaults, work around it
if isinstance(defaults, collections.abc.Mapping):
return config_normalise(defaults)
result = {}
for layer in defaults:
layer = config_normalise(layer)
config_merge(layer, result)
return result
def config_resolve(raw):
"""Resolve 'defaults' in some raw config data."""
# If there are no defaults to apply, just return the raw data
defaults = resolve_defaults(raw)
if not defaults:
return raw
del raw['defaults']
# override defaults with raw data, return result
config_merge(raw, defaults)
return defaults
def resolve_defaults(raw):
"""Resolve 'defaults' in some raw config data."""
# If there are no defaults to apply, just return an empty dict
# print("resolve_defaults %s" % (raw))
try:
defaults = raw.defaults
except AttributeError:
return {}
# If only a single set of defaults, work around it
if isinstance(defaults, collections.abc.Mapping):
defaults = (defaults,)
result = {}
for layer in defaults:
layer = config_resolve(layer)
config_merge(layer, result)
return result
def config_merge(part, result):
"""Merge a set of defaults 'part' into 'result'."""
for (key, value) in part.items():
# If value is a mapping, any
# existing value in result had better
# be a mapping too.
# Merge the mappings.
# Otherwise, just override
if not isinstance(value, collections.abc.Mapping):
result[key] = value
continue
# See if there's an existing value
try:
prev = result[key]
except KeyError:
# No, just override
result[key] = value
continue
# Merge prev and new
config_merge(value, prev) | /rjgtoys_config-0.0.2-py3-none-any.whl/rjgtoys/config/_ops.py | 0.545286 | 0.34183 | _ops.py | pypi |
import collections.abc
class Thing(dict):
"""
A :class:`dict`-like thing that behaves like a JavaScript object;
attribute access and item access are equivalent. This makes writing
code that operates on things read from JSON or YAML much simpler
because there's no need to use lots of square brackets and string
quotes.
It also understands about getting items with dots in their names:
``something['x.y']`` will return an item called ``x.y`` if one exists,
but otherwise will try to return ``something['x']['y']``.
"""
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __getitem__(self, name):
"""Get an item, allowing dots to separate path components."""
try:
return super(Thing, self).__getitem__(name)
except KeyError:
if '.' not in name:
raise
# Otherwise try harder...
(prefix, tail) = name.split('.', 1)
try:
return self[prefix][tail]
except (TypeError, KeyError):
raise KeyError(name)
def __getattr__(self, name):
"""As __getitem__ but raise AttributeError rather than KeyError"""
try:
return self.__getitem__(name)
except KeyError:
raise AttributeError(name)
@classmethod
def from_object(cls, src=None, **kwargs):
"""Convert `src` to a :class:`~rjgtoys.thing.Thing`, replacing all
internal mapping objects by instances of :class:`~rjgtoys.thing.Thing` too.
If `src` is ``None``, treat it as an empty :class:`dict`.
If there are ``kwargs``, do ``Thing.from_object(kwargs)`` and merge
the result into the converted ``src``.
"""
if kwargs:
if src is not None:
dst = cls.from_object(src)
else:
dst = cls()
dst.update(cls.from_object(kwargs))
return dst
if isinstance(src, cls):
return src
if isinstance(src, collections.abc.Mapping):
return cls((k, cls.from_object(v)) for (k, v) in src.items())
if isinstance(src, (str, bytes)):
return src
if isinstance(src, collections.abc.Iterable):
return src.__class__(cls.from_object(item) for item in src)
return src | /rjgtoys_thing-0.0.1-py3-none-any.whl/rjgtoys/thing/__init__.py | 0.817064 | 0.387111 | __init__.py | pypi |
import os
import queue
import threading
import tkinter as tk
import logging
log = logging.getLogger(__name__)
class EventQueue(queue.Queue):
"""This is a subclass of the standard library :class:`queue.Queue`.
An :class:`~rjgtoys.tkthread.EventQueue` feeds any objects sent to it into a
handler function that is called from the main Tk event loop.
**NOTE**:
The constructor for :class:`EventQueue` must be called from the main Tk thread.
``handler``
A callable that will be called to handle a process.
It is called as ``handler(event)`` where ``event`` is a value that has previously
been :meth:`put` to the :class:`EventQueue`. The ``handler`` is called from the tkinter
event loop and may interact with tkinter objects, however note that it is
*not* passed the ``widget`` that was passed to the :class:`EventQueue` constructor.
``widget``
A tkinter widget, or ``None`` to use the default root widget.
This widget reference is used to create a Tk event handler; it
doesn't really have to be associated with the events that are
to be generated or handled.
``maxsize``
The maximum size of the queue.
If ``maxsize <= 0`` the size is not limited.
TODO: talk about exceptions from handler, and how to feed events in.
An :class:`EventQueue` implements the context manager protocol, which means it
can be used like this::
with EventQueue(handler=handle_event) as q:
invoke_process_to_feed_events_to(q)
The context manager exit operation calls :meth:`drain` on the queue, so all events have
been processed by the time the ``with`` completes.
.. automethod:: put
.. automethod:: put_nowait
.. automethod:: drain
"""
def __init__(self, handler, widget=None, maxsize=0):
super().__init__(maxsize)
self._pipe_r, self._pipe_w = os.pipe()
self._handler = handler
widget = widget or tk._default_root
widget.tk.createfilehandler(self._pipe_r, tk.READABLE, self._readable)
def drain(self):
"""Close the queue for further events, and process any that are waiting."""
# Close the pipe
for p in (self._pipe_r, self._pipe_w):
try:
os.close(p)
except OSError:
pass
# Process all pending events
while True:
try:
event = self.get(block=False)
except queue.Empty:
break
try:
self._handler(event)
except Exception as e:
log.exception("Exception raised by event handler")
def __enter__(self):
return self
def __exit__(self, typ, val, tbk):
self.drain()
def _readable(self, what, how):
_ = os.read(self._pipe_r, 1)
try:
event = self.get(block=False)
except queue.Empty:
return
try:
self._handler(event)
except Exception as e:
log.exception("Exception raised by event handler")
def put(self, event, block=True, timeout=None):
"""Add an event to the queue.
``block``
If ``True`` (the default), then wait if the queue is full,
otherwise raise :exc:`queue.Full` immediately.
``timeout``
If ``block`` is ``True``, specifies the maximum time to wait,
in seconds, before raising :exc:``queue.Full`` if the queue
remains full. A value of ``None`` means wait indefinitely.
Ignored if ``block`` is ``False``.
"""
super().put(event, block=block, timeout=timeout)
os.write(self._pipe_w, b"x")
def put_nowait(self, event):
"""Add an event to the queue without waiting.
Either puts the event, or raises :exc:`queue.Full` immediately.
"""
return self.put(event, block=False)
"""
target is the callable object to be invoked by the run() method. Defaults to None, meaning nothing is called.
name is the thread name. By default, a unique name is constructed of the form “Thread-N” where N is a small decimal number.
args is the argument tuple for the target invocation. Defaults to ().
kwargs is a dictionary of keyword arguments for the target invocation. Defaults to {}.
If a subclass overrides the constructor, it must make sure to invoke the base class constructor (Thread.__init__()) before doing anything else to the thread.
"""
# Name construction stuff copied from threading.py
from itertools import count as _count
# Helper to generate new EventGenerator names
_counter = _count().__next__
_counter() # Consume 0 so first non-main thread has id 1.
def _newname(template="EventGenerator-%d"):
return template % _counter()
class EventGenerator(threading.Thread):
"""
An :class:`~rjgtoys.tkthread.EventGenerator` is a subclass of :class:`threading.Thread`.
The constructor creates and optionally starts a thread that fetches values
from an iterable, and feeds each into a :class:`~rjgtoys.tkthread.EventQueue`
which in turn will make callbacks to a handler function, to process the
events in the main Tk thread.
``generator``
An iterable that will provide the events to be processed.
It will be called from a new thread, and each value that it generates
will be put into a queue.
``queue``
The queue into which to put the generated events.
If `None` is passed, a new :class:`~rjgtoys.tkthread.EventQueue` is created,
using the ``handler``, ``widget`` and ``maxsize`` parameters - see
:class:`~rjgtoys.tkthread.EventQueue` for descriptions of those.
``start``
A boolean that indicates whether the thread should be started.
The default is to start the thread immediately. This saves having
to write an explicit call to :meth:`start`.
``group``
Should be ``None``.
This is reserved for a future extension to :class:`threading.Thread`.
``name``
A name for the thread.
If ``None`` is passed, a name of the form ``EventGenerator-N`` is
used, where ``N`` is an integer.
Most of the above parameters will rarely be needed. The most
typical pattern is expected to be::
EventGenerator(
generator=source_of_events,
handler=handler_of_events,
widget=my_toplevel_widget
)
This creates a (notionally) unlimited sized :class:`~rjgtoys.tkthread.EventQueue`
which handles events put into it by calling ``handler_of_events``
from an event handler associated with ``my_toplevel_widget``. The events
are generated by a thread that calls ``source_of_events`` until it is exhausted.
:class:`~rjgtoys.tkthread.EventGenerator` implements the context manager protocol.
If used as a context manager, exiting the context implies calling :meth:`drain`
and so exit is delayed until the generator is exhausted and the queue drained -
i.e. until all events have been processed.
.. py:method:: start()
Starts the event collection thread (a loop that calls the ``generator``
specified in the constructor).
This call is unnecessary if ``start=True`` was passed (or defaulted) to
the constructor.
.. py:method:: join(timeout=None)
Waits (for up to ``timeout`` seconds, or indefinitely if ``timeout is None``)
for completion of the event generator. Note: does *not* drain the queue.
.. automethod:: drain
.. automethod:: run
"""
def __init__(
self, *,
generator=None,
handler=None,
start=True,
queue=None,
widget=None,
group=None,
name=None,
maxsize=0,
):
name = str(name or _newname())
super().__init__(group=group, name=name, daemon=True)
self._generator = generator
self._queue = queue or EventQueue(handler=handler, widget=widget, maxsize=maxsize)
if start:
self.start()
def run(self):
"""Consumes the generator iterable and sends each value to the queue.
Terminates when and if the generator terminates.
You might want to override this in a subclass if you want to use generators
that have unusual ways of signalling (early?) completion.
"""
for work in self._generator:
self._queue.put(work)
def __enter__(self):
return self
def __exit__(self, typ, val, tbk):
self.drain()
def drain(self, timeout=None):
"""Wait until the generator and queue have been processed.
Calls ``join(timeout)`` and then calls ``drain`` on the queue (even if the ``join`` timed out).
"""
self.join(timeout=timeout)
self._queue.drain() | /rjgtoys_tkthread-0.0.1-py3-none-any.whl/rjgtoys/tkthread/__init__.py | 0.669529 | 0.337013 | __init__.py | pypi |
import collections
class Thing(dict):
"""
A :class:`dict`-like thing that behaves like a JavaScript object;
attribute access and item access are equivalent. This makes writing
code that operates on things read from JSON or YAML much simpler
because there's no need to use lots of square brackets and string
quotes.
"""
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __getitem__(self, name):
"""Get an item, allowing dots to separate path components."""
try:
return super(Thing, self).__getitem__(name)
except KeyError:
if '.' not in name:
raise
# Otherwise try harder...
(prefix, tail) = name.split('.', 1)
return self[prefix][tail]
__getattr__ = __getitem__
def merge(self, other):
"""A recursive 'update'.
Any members that are themselves mappings or sets
are also updated.
"""
self.dict_merge(self, other)
@classmethod
def dict_merge(cls, dest, other):
"""Merge one dict-like object into another."""
# print("merge %s into %s" % (other, dest))
for (k, v) in other.items():
try:
orig = dest[k]
except KeyError:
dest[k] = v
continue
# Maybe it's another Thing, or similar
try:
orig.merge(v)
continue
except AttributeError:
pass
# Maybe it's a dict or similar
if isinstance(orig, dict):
dict_merge(orig, v)
continue
# Can't do lists or sets yet
# By default, other takes precedence
dest[k] = v
class ThingChain(collections.ChainMap):
"""This is a version of :class:`collections.ChainMap` adapted
for :class:`Thing` - it adds attribute-style access.
This an abandoned experiment. See test_thing.py for a test
case that demonstrates why I abandoned it.
"""
def __getitem__(self, name):
try:
return super(ThingChain, self).__getitem__(name)
except KeyError:
if '.' not in name:
raise
# Otherwise try harder...
(prefix, tail) = name.split('.', 1)
for m in self.maps:
try:
return m[prefix][tail]
except KeyError:
pass
return self.__missing__(name)
__getattr__ = __getitem__ | /rjgtoys_xc-0.0.3-py3-none-any.whl/rjgtoys/xc/_thing.py | 0.814459 | 0.323353 | _thing.py | pypi |
import urllib
import json
import string
from typing import Any
from pydantic import BaseModel, Field
from pydantic.fields import FieldInfo
from rjgtoys.xc._json import json_loads, json_dumps
def Title(t):
"""Simplifies model declarations a little."""
return Field(..., title=t)
class ImpliedFieldInfo(FieldInfo):
"""Subclass to help identify implied fields."""
def Implied(
alias: str = None,
title: str = None,
description: str = None,
const: bool = None,
gt: float = None,
ge: float = None,
lt: float = None,
le: float = None,
multiple_of: float = None,
min_items: int = None,
max_items: int = None,
min_length: int = None,
max_length: int = None,
regex: str = None,
**extra: Any
):
"""Declare a field to be 'implied' - i.e. optional because the
class will compute a value for itself, if no value is provided.
"""
return ImpliedFieldInfo(
None,
alias=alias,
title=title,
description=description,
const=const,
gt=gt,
ge=ge,
lt=lt,
le=le,
multiple_of=multiple_of,
min_items=min_items,
max_items=max_items,
min_length=min_length,
max_length=max_length,
regex=regex,
**extra,
)
class _XCContentModel(BaseModel):
"""
This is the base class for exception content - the values
of parameters passed to their constructors.
It's essentially :class:`pydantic.BaseModel`.
"""
class Config:
arbitrary_types_allowed = True
pass
"""
The Exception base class and the BaseModel don't
play nicely together; make Exceptions that carry
a BaseModel instance around, so that the two interfaces
can be kept separate.
"""
class _XCBase(Exception):
"""A hidden base class for exceptions."""
_model = _XCContentModel
def __init__(self, **kwargs):
super(_XCBase, self).__init__()
# Careful with this one because __setattr__ is overridden
super().__setattr__('_content', self._model.parse_obj(kwargs))
self._infer()
def __getattr__(self, name):
return getattr(self._content, name)
def __setattr__(self, name, value):
return setattr(self._content, name, value)
def _infer(self):
"""An optional method that is intended to add computed attributes to an exception.
This method is called during construction of the exception, and before
any of the kwargs have been validated, so there is scope for surprise
type checking errors.
When an exception is serialised, all its fields are encoded, including
the ones that are provided by this method. So when an exception
is de-serialised, there is nothing for this method to do; it should
provide values for any parameters (kwargs) that are missing, but should
probably not override any that are present.
For example, if the exception takes a 'hostname' parameter and this
method is capable of adding an 'ipaddress' field, that should only take
place if the 'ipaddress' parameter is not passed in. As a result,
if an exception is serialised on one host, sent to another, and deserialised
there, the 'ipaddress' value seen in the destination host will be the value
that was computed at the sender, not at the recipient.
"""
pass
@classmethod
def parse_json(cls, data):
return cls(**json_loads(data))
def __eq__(self, other):
"""Two exceptions are identical if they are the same class and have the same content."""
return (self.__class__ is other.__class__) and (self._content == other._content)
class _XCType(type):
"""Metaclass for exceptions."""
def __new__(cls, name, bases, attrs):
"""Generate a new BaseException subclass.
The attrs passed in are reorganised so that
most are moved to an internal '_model' class
that is derived from BaseModel (from the
_model classes of the bases, in fact).
"""
# Should this 'fully qualify'?
qualname = '.'.join((attrs['__module__'], name))
attrs.setdefault('typename', qualname)
# Does this 'inherit' correctly?
if 'title' not in attrs:
title = attrs.get('__doc__', '\n').splitlines()[0]
attrs['title'] = title
exc_attrs = {}
model_attrs = {}
exc_attr_forced = ('typename', 'title', 'detail', 'status')
for (n, v) in attrs.items():
# Some go only to the exception class
if n in exc_attr_forced:
exc_attrs[n] = v
continue
if n.startswith('_'):
exc_attrs[n] = v
continue
# Content items can't be callable
if callable(v) or isinstance(v, (classmethod, staticmethod)):
exc_attrs[n] = v
continue
# Otherwise, move it to model
model_attrs[n] = v
# UGLY: fix up the annotations of the model and the exception
anns = attrs.get('__annotations__', {})
# Capture annotations of any attributes that were put into the exception
exc_ann = {k: anns[k] for k in exc_attrs if k in anns}
# and anyway copy those for the forced attributes
exc_ann.update({k: anns[k] for k in exc_attr_forced if k in anns})
exc_attrs['__annotations__'] = exc_ann
# Move all the rest to the model
model_ann = {k: v for (k, v) in anns.items() if k not in exc_ann}
model_attrs['__annotations__'] = model_ann
# print("Build %s exception %s from %s" % (cls.__name__, name, attrs))
# print(" Exception attrs %s" % (exc_attrs,))
# print(" Model attrs %s" % (model_attrs,))
exc_doc = exc_attrs.get('__doc__', exc_attrs['title'])
model_attrs['__doc__'] = exc_doc
# Build the content model class
model = type('_model', tuple(s._model for s in bases), model_attrs)
exc_attrs['_model'] = model
return type.__new__(cls, name, bases, exc_attrs)
class MissingAttributeBug(Exception):
"""Raised when formatting the detail of an exception fails."""
# It can't be a Bug or Error because those are not available yet.
def __init__(self, cls, name):
self.cls = cls
self.name = name
def __str__(self):
return f"'{self.cls.__name__}' exception has no attribute '{self.name}'"
class _XCFormatter(string.Formatter):
"""A special string formatter than can retrieve attributes
from an exception. The attributes of an exception are not
all stored directly on the exception itself, and this hides
that from the authors of format strings.
"""
def __init__(self, exc):
self._exc = exc
def get_value(self, name, *args, **kwargs):
try:
return getattr(self._exc, name)
except AttributeError:
raise MissingAttributeBug(cls=self._exc.__class__, name=name) | /rjgtoys_xc-0.0.3-py3-none-any.whl/rjgtoys/xc/_xc.py | 0.80765 | 0.218649 | _xc.py | pypi |
import urllib
from ._xc import _XCBase, _XCType, _XCFormatter, Title, Implied
from ._raises import raises, may_raise, raises_exception
from ._json import json_loads, json_dumps
__all__ = (
'XC',
'Error',
'Title',
'Implied',
'raises',
'may_raise',
'raises_exception',
'BadExceptionBug',
'BadExceptionsInTestBug'
)
# The following are put here simply so that their fully qualified
# names do not include _xc
class XC(_XCBase, metaclass=_XCType):
"""The base class for 'structured' exceptions.
Provides a bit of structure on top of the language-provided
:class:`Exception`.
Each (sub-)class defines a set of parameters that are become
attributes of the exception, available to handlers.
Those parameters are type-checked and may have associated
defaults and descriptions that are available to generate
documentation and other forms of help.
In particular, :class:`XC` subclasses can be serialised
and deserialised as described in RFC7807, which makes
them easy to use for building REST APIs.
Each subclass should define the following attributes:
typename
The 'problem identifier' - defaults to the name of the class.
This is used to generate the RFC7807 `type` attribute.
If no value is set explicitly, the fully qualified name of the class is used.
title
A short human-readable description of the problem type.
This is used as the RFC7807 `title` attribute.
detail
A format template that can produce a human-readable explanation
specific to a particular instance of this exception.
This is used to define the string representation of the exception (the `__str__` method)
and also (via :func:`str`) to generate the RFC7807 `detail` attribute.
status
An HTTP status code associated with this exception. Defaults to 400.
This is used when the exception is transported over HTTP.
The above attributes are defined in RFC 7807.
"""
# The following are magically kept in the exception class, not the content
typename: str
title: str
detail: str
status: int = 400
def __str__(self):
try:
fmt = _XCFormatter(self)
return fmt.format(self.detail)
return self.detail.format(**self._content.dict())
except Exception as e:
return "%s.__str__() -> %s" % (self.__class__.__name__, e)
def to_dict(self):
"""Produce a JSON-encodable dict representing this exception.
Returns an RFC7807-compliant JSON object.
"""
content = self._content.dict()
data = dict(
type=self.typename,
title=self.title,
status=self.status,
detail=str(self),
instance="%s?%s" % (self.typename, urllib.parse.urlencode(content)),
content=content,
)
return data
@classmethod
def from_obj(cls, data):
"""Reconstruct an exception from some data.
Expects an object such as might be produced by
parsing the result of calling :meth:`to_json()` on
an instance of this class or a subclass of it.
Returns an instance of the appropriate class, or
raises :exc:`TypeError` if no class can be identified.
"""
typename = data['type']
for kls in all_subclasses(cls):
if kls.typename == typename:
return kls(**data['content'])
raise TypeError("No %s type %s" % (cls.__name__, typename))
@classmethod
def from_json(cls, data):
return cls.from_obj(json_loads(data))
def all_subclasses(cls):
# pylint: disable=line-too-long
# the following comment is simply too wide
"""Generate all subclasses of class `cls`.
See: https://stackoverflow.com/questions/3862310/how-to-find-all-the-subclasses-of-a-class-given-its-name
"""
return set(cls.__subclasses__()).union(
[s for c in cls.__subclasses__() for s in all_subclasses(c)]
)
class Bug(XC):
"""
This is the base class for exceptions that should never occur at runtime.
They indicate a programming error that is not recoverable.
"""
pass
class Error(XC):
"""
This is the base class for exceptions that may be recoverable.
Packages should create subclasses of this.
"""
pass
class _ExceptionField:
"""Allows a Pydantic model to have fields that hold an exception value."""
@classmethod
def __get_validators__(cls):
yield cls._validate
@classmethod
def _validate(cls, v):
assert isinstance(v, Exception)
return v
class BadExceptionBug(Bug):
"""Raised when some function or method raises an exception
that it has not declared itself capable of raising.
:param raised: The exception that was raised (and which is
not in the allowed set)
"""
raised: _ExceptionField = Title("The disallowed exception")
detail = "Disallowed exception raised: {raised}"
class BadExceptionsInTestBug(Bug):
"""Raised on an attempt to patch something to
raise an exception that it is not allowed to raise
Args:
name: Name of the object being tested
exceptions: The exceptions that may not be raised
by this object
"""
oneline = "Test case for {name} raises bad exception(s): {exclist}"
def unpack(self):
self.exclist = ", ".join(map(repr, self.exceptions)) | /rjgtoys_xc-0.0.3-py3-none-any.whl/rjgtoys/xc/__init__.py | 0.83901 | 0.306566 | __init__.py | pypi |
from typing import Union
from pydantic import BaseModel
from starlette.requests import Request
from starlette.responses import Response, JSONResponse
from starlette.routing import BaseRoute
from starlette.types import ASGIApp
from fastapi import routing, params
from fastapi.encoders import DictIntStrAny, SetIntStr
# This is just a convenience so that clients can avoid
# importing both helper packages
from rjgtoys.xc.starlette import *
from rjgtoys.xc import Error
# Keep this for reference
class ErrorResponse(BaseModel):
"""Document returned with an error."""
type: str = Title("Name of the error class")
title: str = Title("Readable description of the error class")
instance: str = Title("URI for this instance of the error")
detail: str = Title("Description of this instance of the error")
status: int = Title("HTTP status code")
content: dict = Title("Content of the error - depends on type")
ErrorResponses = {400: {'model': ErrorResponse}}
class APIRoute(routing.APIRoute):
"""A version of the :cls:`fastapi.routing.APIRoute` that figures out the
response model from an annotation on the route method."""
# About the 'unexpected_args` parameter: this is to capture any parameters
# that are passed around by fastapi but not known to me when I wrote this
# code. To keep the type checking straight it seems I have to repeat
# all the parameter declarations of the base class, but that means I end
# up with a subclass that only accepts the parameters that I knew about
# at the time I wrote the code. In an attempt to have this code remain
# usable with later versions of fastapi I have added the 'unexpected_args'
# parameter; any, um..., unexpected args will end up in there, and will be
# passed on to the superclass. If there's a nicer way to do this, especially
# a more concise way, that doesn't simply give up on being type-checkable,
# I'd like to hear about it.
def __init__(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
name: str = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Optional[Type[Response]] = None,
dependency_overrides_provider: Any = None,
callbacks: Optional[List["APIRoute"]] = None,
**unexpected_args
) -> None:
try:
rtype = endpoint.__annotations__['return']
except:
rype = response_model
responses = responses or {}
combined_responses = {**responses, **ErrorResponses}
if unexpected_args:
print(f"xc.APIRoute unexpected_args: {unexpected_args}")
super().__init__(
path=path,
endpoint=endpoint,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=combined_responses,
deprecated=deprecated,
name=name,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
dependency_overrides_provider=dependency_overrides_provider,
callbacks=callbacks,
**unexpected_args
)
class APIRouter(routing.APIRouter):
def __init__(
self,
routes: List[BaseRoute] = None,
redirect_slashes: bool = True,
default: ASGIApp = None,
dependency_overrides_provider: Any = None,
route_class: Type[APIRoute] = APIRoute,
default_response_class: Type[Response] = None,
**unexpected_args
) -> None:
if unexpected_args:
print(f"xc.APIRouter unexpected_args: {more_args}")
super().__init__(
routes=routes,
redirect_slashes=redirect_slashes,
default=default,
dependency_overrides_provider=dependency_overrides_provider,
route_class=route_class,
default_response_class=default_response_class,
**unexpected_args
) | /rjgtoys_xc-0.0.3-py3-none-any.whl/rjgtoys/xc/fastapi.py | 0.891661 | 0.210178 | fastapi.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rjh_distributions-0.1.tar.gz/rjh_distributions-0.1/rjh_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
u"""
=====================
Javascript Minifier
=====================
rJSmin is a javascript minifier written in python.
The minifier is based on the semantics of `jsmin.c by Douglas Crockford`_\\.
:Copyright:
Copyright 2011 - 2022
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
The module is a re-implementation aiming for speed, so it can be used at
runtime (rather than during a preprocessing step). Usually it produces the
same results as the original ``jsmin.c``. It differs in the following ways:
- there is no error detection: unterminated string, regex and comment
literals are treated as regular javascript code and minified as such.
- Control characters inside string and regex literals are left untouched; they
are not converted to spaces (nor to \\n)
- Newline characters are not allowed inside string and regex literals, except
for line continuations in string literals (ECMA-5).
- "return /regex/" is recognized correctly.
- More characters are allowed before regexes.
- Line terminators after regex literals are handled more sensibly
- "+ +" and "- -" sequences are not collapsed to '++' or '--'
- Newlines before ! operators are removed more sensibly
- (Unnested) template literals are supported (ECMA-6)
- Comments starting with an exclamation mark (``!``) can be kept optionally
- rJSmin does not handle streams, but only complete strings. (However, the
module provides a "streamy" interface).
Since most parts of the logic are handled by the regex engine it's way faster
than the original python port of ``jsmin.c`` by Baruch Even. The speed factor
varies between about 6 and 55 depending on input and python version (it gets
faster the more compressed the input already is). Compared to the
speed-refactored python port by Dave St.Germain the performance gain is less
dramatic but still between 3 and 50 (for huge inputs). See the docs/BENCHMARKS
file for details.
rjsmin.c is a reimplementation of rjsmin.py in C and speeds it up even more.
Supported python versions are 2.7 and 3.6+.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
"""
__author__ = u"Andr\xe9 Malo"
__license__ = "Apache License, Version 2.0"
__version__ = '1.2.1'
__all__ = ['jsmin']
import functools as _ft
import re as _re
def _make_jsmin(python_only=False):
"""
Generate JS minifier based on `jsmin.c by Douglas Crockford`_
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
Parameters:
python_only (bool):
Use only the python variant. If true, the c extension is not even
tried to be loaded.
Returns:
callable: Minifier
"""
# pylint: disable = unused-variable, possibly-unused-variable
# pylint: disable = too-many-locals, too-many-statements
if not python_only:
try:
import _rjsmin # pylint: disable = import-outside-toplevel
except ImportError:
pass
else:
# Ensure that the C version is in sync
# https://github.com/ndparker/rjsmin/issues/11
if getattr(_rjsmin, '__version__', None) == __version__:
return _rjsmin.jsmin
try:
xrange # pylint: disable = used-before-assignment
except NameError:
xrange = range # pylint: disable = redefined-builtin
space_chars = r'[\000-\011\013\014\016-\040]'
line_comment = r'(?://[^\r\n]*)'
space_comment = r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)'
space_comment_nobang = r'(?:/\*(?!!)[^*]*\*+(?:[^/*][^*]*\*+)*/)'
bang_comment = r'(?:/\*![^*]*\*+(?:[^/*][^*]*\*+)*/)'
string1 = r"(?:'[^'\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^'\\\r\n]*)*')"
string1 = string1.replace("'", r'\047') # portability
string2 = r'(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^"\\\r\n]*)*")'
string3 = r'(?:`[^`\\]*(?:\\(?:[^\r\n]|\r?\n|\r)[^`\\]*)*`)'
string3 = string3.replace('`', r'\140') # portability
strings = r'(?:%s|%s|%s)' % (string1, string2, string3)
charclass = r'(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\])'
nospecial = r'[^/\\\[\r\n]'
regex = r'(?:/(?![\r\n/*])%s*(?:(?:\\[^\r\n]|%s)%s*)*/[a-z]*)' % (
nospecial, charclass, nospecial
)
space = r'(?:%s|%s)' % (space_chars, space_comment)
newline = r'(?:%s?[\r\n])' % line_comment
def fix_charclass(result):
""" Fixup string of chars to fit into a regex char class """
pos = result.find('-')
if pos >= 0:
result = r'%s%s-' % (result[:pos], result[pos + 1:])
def sequentize(string):
"""
Notate consecutive characters as sequence
(1-4 instead of 1234)
"""
first, last, result = None, None, []
for char in map(ord, string):
if last is None:
first = last = char
elif last + 1 == char:
last = char
else:
result.append((first, last))
first = last = char
if last is not None:
result.append((first, last))
return ''.join(['%s%s%s' % (
chr(first),
last > first + 1 and '-' or '',
last != first and chr(last) or ''
) for first, last in result]) # noqa
return _re.sub(
r"([\000-\040'`])", # ' and ` for better portability
lambda m: '\\%03o' % ord(m.group(1)), (
sequentize(result)
.replace('\\', '\\\\')
.replace('[', '\\[')
.replace(']', '\\]')
)
)
def id_literal_(what):
""" Make id_literal like char class """
match = _re.compile(what).match
result = ''.join([
chr(c) for c in xrange(127) if not match(chr(c))
])
return '[^%s]' % fix_charclass(result)
def not_id_literal_(keep):
""" Make negated id_literal like char class """
match = _re.compile(id_literal_(keep)).match
result = ''.join([
chr(c) for c in xrange(127) if not match(chr(c))
])
return r'[%s]' % fix_charclass(result)
not_id_literal = not_id_literal_(r'[a-zA-Z0-9_$]')
preregex1 = r'[(,=:\[!&|?{};\r\n+*-]'
preregex2 = r'%(not_id_literal)sreturn' % locals()
id_literal = id_literal_(r'[a-zA-Z0-9_$]')
id_literal_open = id_literal_(r'[a-zA-Z0-9_${\[(!+-]')
id_literal_close = id_literal_(r'[a-zA-Z0-9_$}\])"\047\140+-]')
post_regex_off = id_literal_(r'[^\000-\040}\])?:|,;.&=+-]')
dull = r'[^\047"\140/\000-\040]'
space_sub_simple = _re.compile((
# noqa pylint: disable = bad-option-value, bad-continuation
r'(%(dull)s+)' # 0
r'|(%(strings)s%(dull)s*)' # 1
r'|(?<=[)])'
r'%(space)s*(?:%(newline)s%(space)s*)*'
r'(%(regex)s)' # 2
r'(?=%(space)s*(?:%(newline)s%(space)s*)*'
r'\.'
r'%(space)s*(?:%(newline)s%(space)s*)*[a-z])'
r'|(?<=%(preregex1)s)'
r'%(space)s*(?:%(newline)s%(space)s*)*'
r'(%(regex)s)' # 3
r'(%(space)s*(?:%(newline)s%(space)s*)+' # 4
r'(?=%(post_regex_off)s))?'
r'|(?<=%(preregex2)s)'
r'%(space)s*(?:(%(newline)s)%(space)s*)*' # 5
r'(%(regex)s)' # 6
r'(%(space)s*(?:%(newline)s%(space)s*)+' # 7
r'(?=%(post_regex_off)s))?'
r'|(?<=%(id_literal_close)s)'
r'%(space)s*(?:(%(newline)s)%(space)s*)+' # 8
r'(?=%(id_literal_open)s)'
r'|(?<=%(id_literal)s)(%(space)s)+(?=%(id_literal)s)' # 9
r'|(?<=\+)(%(space)s)+(?=\+)' # 10
r'|(?<=-)(%(space)s)+(?=-)' # 11
r'|%(space)s+'
r'|(?:%(newline)s%(space)s*)+'
) % locals()).sub
# print(space_sub_simple.__self__.pattern)
def space_subber_simple(match):
""" Substitution callback """
# pylint: disable = too-many-return-statements
groups = match.groups()
if groups[0]:
return groups[0]
elif groups[1]:
return groups[1]
elif groups[2]:
return groups[2]
elif groups[3]:
if groups[4]:
return groups[3] + '\n'
return groups[3]
elif groups[6]:
return "%s%s%s" % (
groups[5] and '\n' or '',
groups[6],
groups[7] and '\n' or '',
)
elif groups[8]:
return '\n'
elif groups[9] or groups[10] or groups[11]:
return ' '
else:
return ''
space_sub_banged = _re.compile((
# noqa pylint: disable = bad-option-value, bad-continuation
r'(%(dull)s+)' # 0
r'|(%(strings)s%(dull)s*)' # 1
r'|(?<=[)])'
r'(%(space)s*(?:%(newline)s%(space)s*)*)' # 2
r'(%(regex)s)' # 3
r'(?=%(space)s*(?:%(newline)s%(space)s*)*'
r'\.'
r'%(space)s*(?:%(newline)s%(space)s*)*[a-z])'
r'|(?<=%(preregex1)s)'
r'(%(space)s*(?:%(newline)s%(space)s*)*)' # 4
r'(%(regex)s)' # 5
r'(%(space)s*(?:%(newline)s%(space)s*)+' # 6
r'(?=%(post_regex_off)s))?'
r'|(?<=%(preregex2)s)'
r'(%(space)s*(?:(%(newline)s)%(space)s*)*)' # 7, 8
r'(%(regex)s)' # 9
r'(%(space)s*(?:%(newline)s%(space)s*)+' # 10
r'(?=%(post_regex_off)s))?'
r'|(?<=%(id_literal_close)s)'
r'(%(space)s*(?:%(newline)s%(space)s*)+)' # 11
r'(?=%(id_literal_open)s)'
r'|(?<=%(id_literal)s)(%(space)s+)(?=%(id_literal)s)' # 12
r'|(?<=\+)(%(space)s+)(?=\+)' # 13
r'|(?<=-)(%(space)s+)(?=-)' # 14
r'|(%(space)s+)' # 15
r'|((?:%(newline)s%(space)s*)+)' # 16
) % locals()).sub
# print(space_sub_banged.__self__.pattern)
keep = _re.compile((
r'%(space_chars)s+|%(space_comment_nobang)s+|%(newline)s+'
r'|(%(bang_comment)s+)'
) % locals()).sub
keeper = lambda m: m.groups()[0] or ''
# print(keep.__self__.pattern)
def space_subber_banged(match):
""" Substitution callback """
# pylint: disable = too-many-return-statements
groups = match.groups()
if groups[0]:
return groups[0]
elif groups[1]:
return groups[1]
elif groups[3]:
return "%s%s" % (
keep(keeper, groups[2]),
groups[3],
)
elif groups[5]:
return "%s%s%s%s" % (
keep(keeper, groups[4]),
groups[5],
keep(keeper, groups[6] or ''),
groups[6] and '\n' or '',
)
elif groups[9]:
return "%s%s%s%s%s" % (
keep(keeper, groups[7]),
groups[8] and '\n' or '',
groups[9],
keep(keeper, groups[10] or ''),
groups[10] and '\n' or '',
)
elif groups[11]:
return keep(keeper, groups[11]) + '\n'
elif groups[12] or groups[13] or groups[14]:
return keep(keeper, groups[12] or groups[13] or groups[14]) or ' '
else:
return keep(keeper, groups[15] or groups[16])
banged = _ft.partial(space_sub_banged, space_subber_banged)
simple = _ft.partial(space_sub_simple, space_subber_simple)
def jsmin(script, keep_bang_comments=False):
r"""
Minify javascript based on `jsmin.c by Douglas Crockford`_\.
Instead of parsing the stream char by char, it uses a regular
expression approach which minifies the whole script with one big
substitution regex.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
Parameters:
script (str):
Script to minify
keep_bang_comments (bool):
Keep comments starting with an exclamation mark? (``/*!...*/``)
Returns:
str: Minified script
"""
# pylint: disable = redefined-outer-name
is_bytes, script = _as_str(script)
script = (banged if keep_bang_comments else simple)(
'\n%s\n' % script
).strip()
if is_bytes:
script = script.encode('latin-1')
if is_bytes == 2:
script = bytearray(script)
return script
return jsmin
jsmin = _make_jsmin()
def _as_str(script):
""" Make sure the script is a text string """
is_bytes = False
if str is bytes:
if not isinstance(script, basestring): # noqa pylint: disable = undefined-variable
raise TypeError("Unexpected type")
elif isinstance(script, bytes):
is_bytes = True
script = script.decode('latin-1')
elif isinstance(script, bytearray):
is_bytes = 2
script = script.decode('latin-1')
elif not isinstance(script, str):
raise TypeError("Unexpected type")
return is_bytes, script
def jsmin_for_posers(script, keep_bang_comments=False):
r"""
Minify javascript based on `jsmin.c by Douglas Crockford`_\.
Instead of parsing the stream char by char, it uses a regular
expression approach which minifies the whole script with one big
substitution regex.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Warning: This function is the digest of a _make_jsmin() call. It just
utilizes the resulting regexes. It's here for fun and may
vanish any time. Use the `jsmin` function instead.
Parameters:
script (str):
Script to minify
keep_bang_comments (bool):
Keep comments starting with an exclamation mark? (``/*!...*/``)
Returns:
str: Minified script
"""
if not keep_bang_comments:
rex = (
r'([^\047"\140/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^'
r'\r\n]|\r?\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^'
r'\r\n]|\r?\n|\r)[^"\\\r\n]*)*")|(?:\140[^\140\\]*(?:\\(?:[^\r\n'
r']|\r?\n|\r)[^\140\\]*)*\140))[^\047"\140/\000-\040]*)|(?<=[)])'
r'(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+'
r')*/))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\0'
r'40]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)*((?:/(?![\r\n/*])[^/'
r'\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]'
r'\r\n]*)*\]))[^/\\\[\r\n]*)*/[a-z]*))(?=(?:[\000-\011\013\014\0'
r'16-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:(?:(?://[^\r\n'
r']*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^'
r'/*][^*]*\*+)*/))*)*\.(?:[\000-\011\013\014\016-\040]|(?:/\*[^*'
r']*\*+(?:[^/*][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\00'
r'0-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)'
r'*[a-z])|(?<=[(,=:\[!&|?{};\r\n+*-])(?:[\000-\011\013\014\016-'
r'\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)'
r'?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*]'
r'[^*]*\*+)*/))*)*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|'
r'(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*'
r'/[a-z]*))((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/'
r'*][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013'
r'\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000'
r'-\040&)+,.:;=?\]|}-]))?|(?<=[\000-#%-,./:-@\[-^\140{-~-]return'
r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*'
r'+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\014\016'
r'-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)*((?:/(?![\r\n/*])'
r'[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^'
r'\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/[a-z]*))((?:[\000-\011\013\014'
r'\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:(?:(?://[^\r'
r'\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:'
r'[^/*][^*]*\*+)*/))*)+(?=[^\000-\040&)+,.:;=?\]|}-]))?|(?<=[^\0'
r'00-!#%&(*,./:-@\[\\^{|~])(?:[\000-\011\013\014\016-\040]|(?:/'
r'\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))'
r'(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+'
r')*/))*)+(?=[^\000-\040"#%-\047)*,./:-@\\-^\140|-~])|(?<=[^\000'
r'-#%-,./:-@\[-^\140{-~-])((?:[\000-\011\013\014\016-\040]|(?:/'
r'\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^\000-#%-,./:-@\[-^\140{-'
r'~-])|(?<=\+)((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:'
r'[^/*][^*]*\*+)*/)))+(?=\+)|(?<=-)((?:[\000-\011\013\014\016-\0'
r'40]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=-)|(?:[\000-\011\0'
r'13\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:(?:(?'
r'://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]'
r'*\*+(?:[^/*][^*]*\*+)*/))*)+'
)
def subber(match):
""" Substitution callback """
groups = match.groups()
return (
groups[0] or
groups[1] or
groups[2] or
(groups[4] and (groups[3] + '\n')) or
groups[3] or
(groups[6] and "%s%s%s" % (
groups[5] and '\n' or '',
groups[6],
groups[7] and '\n' or '',
)) or
(groups[8] and '\n') or
(groups[9] and ' ') or
(groups[10] and ' ') or
(groups[11] and ' ') or
''
)
else:
rex = (
r'([^\047"\140/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^'
r'\r\n]|\r?\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^'
r'\r\n]|\r?\n|\r)[^"\\\r\n]*)*")|(?:\140[^\140\\]*(?:\\(?:[^\r\n'
r']|\r?\n|\r)[^\140\\]*)*\140))[^\047"\140/\000-\040]*)|(?<=[)])'
r'((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*'
r'+)*/))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-'
r'\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)*)((?:/(?![\r\n/*])'
r'[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^'
r'\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/[a-z]*))(?=(?:[\000-\011\013\0'
r'14\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:(?:(?://[^'
r'\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+('
r'?:[^/*][^*]*\*+)*/))*)*\.(?:[\000-\011\013\014\016-\040]|(?:/'
r'\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)?[\r\n])(?'
r':[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*'
r'/))*)*[a-z])|(?<=[(,=:\[!&|?{};\r\n+*-])((?:[\000-\011\013\014'
r'\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:(?:(?://[^\r'
r'\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:'
r'[^/*][^*]*\*+)*/))*)*)((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^'
r'\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r'
r'\n]*)*/[a-z]*))((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+'
r'(?:[^/*][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\01'
r'1\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=['
r'^\000-\040&)+,.:;=?\]|}-]))?|(?<=[\000-#%-,./:-@\[-^\140{-~-]r'
r'eturn)((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*]['
r'^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\0'
r'14\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)*)((?:/(?!['
r'\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^'
r'\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/[a-z]*))((?:[\000-\011'
r'\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:(?:('
r'?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*'
r']*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040&)+,.:;=?\]|}-]))?|'
r'(?<=[^\000-!#%&(*,./:-@\[\\^{|~])((?:[\000-\011\013\014\016-\0'
r'40]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)?['
r'\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^'
r'*]*\*+)*/))*)+)(?=[^\000-\040"#%-\047)*,./:-@\\-^\140|-~])|(?<'
r'=[^\000-#%-,./:-@\[-^\140{-~-])((?:[\000-\011\013\014\016-\040'
r']|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+)(?=[^\000-#%-,./:-@\[-^'
r'\140{-~-])|(?<=\+)((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*'
r'\*+(?:[^/*][^*]*\*+)*/))+)(?=\+)|(?<=-)((?:[\000-\011\013\014'
r'\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+)(?=-)|((?:[\00'
r'0-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+)'
r'|((?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|'
r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+)'
)
keep = _re.compile(
r'[\000-\011\013\014\016-\040]+|(?:/\*(?!!)[^*]*\*+(?:[^/*][^*]*'
r'\*+)*/)+|(?:(?://[^\r\n]*)?[\r\n])+|((?:/\*![^*]*\*+(?:[^/*][^'
r'*]*\*+)*/)+)'
).sub
keeper = lambda m: m.groups()[0] or ''
def subber(match):
""" Substitution callback """
groups = match.groups()
return (
groups[0] or
groups[1] or
groups[3] and "%s%s" % (
keep(keeper, groups[2]),
groups[3],
) or
groups[5] and "%s%s%s%s" % (
keep(keeper, groups[4]),
groups[5],
keep(keeper, groups[6] or ''),
groups[6] and '\n' or '',
) or
groups[9] and "%s%s%s%s%s" % (
keep(keeper, groups[7]),
groups[8] and '\n' or '',
groups[9],
keep(keeper, groups[10] or ''),
groups[10] and '\n' or '',
) or
groups[11] and (keep(keeper, groups[11]) + '\n') or
groups[12] and (keep(keeper, groups[12]) or ' ') or
groups[13] and (keep(keeper, groups[13]) or ' ') or
groups[14] and (keep(keeper, groups[14]) or ' ') or
keep(keeper, groups[15] or groups[16])
)
is_bytes, script = _as_str(script)
script = _re.sub(rex, subber, '\n%s\n' % script).strip()
if is_bytes:
script = script.encode('latin-1')
if is_bytes == 2:
script = bytearray(script)
return script
if __name__ == '__main__':
def main():
""" Main """
import sys as _sys # pylint: disable = import-outside-toplevel
argv = _sys.argv[1:]
keep_bang_comments = '-b' in argv or '-bp' in argv or '-pb' in argv
if '-p' in argv or '-bp' in argv or '-pb' in argv:
xjsmin = _make_jsmin(python_only=True)
else:
xjsmin = jsmin
_sys.stdout.write(xjsmin(
_sys.stdin.read(), keep_bang_comments=keep_bang_comments
))
main() | /rjsmin-1.2.1.tar.gz/rjsmin-1.2.1/rjsmin.py | 0.827932 | 0.422445 | rjsmin.py | pypi |
u"""
=================================
Benchmark jsmin implementations
=================================
Benchmark jsmin implementations.
:Copyright:
Copyright 2011 - 2022
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Usage::
python -mbench.main [-c COUNT] [-p file] jsfile ...
-c COUNT number of runs per jsfile and minifier. Defaults to 10.
-p file File to write the benchmark results in (pickled)
"""
__author__ = u"Andr\xe9 Malo"
__version__ = "1.0.0"
import sys as _sys
import time as _time
import_notes = []
class jsmins(object):
from bench import jsmin as p_01_simple_port
if _sys.version_info >= (2, 4):
from bench import jsmin_2_0_9 as p_02_jsmin_2_0_9
else:
import_notes.append(
"jsmin_2_0_9 available for python 2.4 and later..."
)
print(import_notes[-1])
import rjsmin as p_05_rjsmin
try:
import _rjsmin as p_06__rjsmin
except ImportError:
import_notes.append("_rjsmin (C-Port) not available")
print(import_notes[-1])
jsmins.p_05_rjsmin.jsmin = jsmins.p_05_rjsmin._make_jsmin(
python_only=True
)
print("Python Release: %s" % ".".join(map(str, _sys.version_info[:3])))
print("")
def slurp(filename):
""" Load a file """
fp = open(filename)
try:
return fp.read()
finally:
fp.close()
def print_(*value, **kwargs):
""" Print stuff """
(kwargs.get('file') or _sys.stdout).write(
''.join(value) + kwargs.get('end', '\n')
)
def bench(filenames, count):
"""
Benchmark the minifiers with given javascript samples
:Parameters:
`filenames` : sequence
List of filenames
`count` : ``int``
Number of runs per js file and minifier
:Exceptions:
- `RuntimeError` : empty filenames sequence
"""
if not filenames:
raise RuntimeError("Missing files to benchmark")
try:
xrange
except NameError:
xrange = range
try:
cmp
except NameError:
cmp = lambda a, b: (a > b) - (a < b)
ports = [item for item in dir(jsmins) if item.startswith('p_')]
ports.sort()
space = max(map(len, ports)) - 4
ports = [(item[5:], getattr(jsmins, item).jsmin) for item in ports]
flush = _sys.stdout.flush
struct = []
inputs = [(filename, slurp(filename)) for filename in filenames]
for filename, script in inputs:
print_("Benchmarking %r..." % filename, end=" ")
flush()
outputs = []
for _, jsmin in ports:
try:
outputs.append(jsmin(script))
except (SystemExit, KeyboardInterrupt):
raise
except:
outputs.append(None)
struct.append(dict(
filename=filename,
sizes=[
(item is not None and len(item) or None) for item in outputs
],
size=len(script),
messages=[],
times=[],
))
print_("(%.1f KiB)" % (struct[-1]['size'] / 1024.0,))
flush()
times = []
for idx, (name, jsmin) in enumerate(ports):
if outputs[idx] is None:
print_(" FAILED %s" % (name,))
struct[-1]['times'].append((name, None))
else:
print_(" Timing %s%s... (%5.1f KiB %s)" % (
name,
" " * (space - len(name)),
len(outputs[idx]) / 1024.0,
idx == 0 and '*' or ['=', '>', '<'][
cmp(len(outputs[idx]), len(outputs[0]))
],
), end=" ")
flush()
xcount = count
while True:
counted = [None for _ in xrange(xcount)]
start = _time.time()
for _ in counted:
jsmin(script)
end = _time.time()
result = (end - start) * 1000
if result < 10: # avoid measuring within the error range
xcount *= 10
continue
times.append(result / xcount)
break
print_("%8.2f ms" % times[-1], end=" ")
flush()
if len(times) <= 1:
print_()
else:
print_("(factor: %s)" % (', '.join([
'%.2f' % (timed / times[-1]) for timed in times[:-1]
])))
struct[-1]['times'].append((name, times[-1]))
flush()
print_()
return struct
def main(argv=None):
""" Main """
import getopt as _getopt
import os as _os
import pickle as _pickle
if argv is None:
argv = _sys.argv[1:]
try:
opts, args = _getopt.getopt(argv, "hc:p:", ["help"])
except getopt.GetoptError:
e = _sys.exc_info()[0](_sys.exc_info()[1])
print >> _sys.stderr, "%s\nTry %s -mbench.main --help" % (
e,
_os.path.basename(_sys.executable),
)
_sys.exit(2)
count, pickle = 10, None
for key, value in opts:
if key in ("-h", "--help"):
print >> _sys.stderr, (
"%s -mbench.main [-c count] [-p file] cssfile ..." % (
_os.path.basename(_sys.executable),
)
)
_sys.exit(0)
elif key == '-c':
count = int(value)
elif key == '-p':
pickle = str(value)
struct = bench(args, count)
if pickle:
fp = open(pickle, 'wb')
try:
fp.write(_pickle.dumps((
".".join(map(str, _sys.version_info[:3])),
import_notes,
struct,
), 0))
finally:
fp.close()
if __name__ == '__main__':
main() | /rjsmin-1.2.1.tar.gz/rjsmin-1.2.1/bench/main.py | 0.603581 | 0.176849 | main.py | pypi |
u"""
=========================
Write benchmark results
=========================
Write benchmark results.
:Copyright:
Copyright 2014 - 2022
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Usage::
python -mbench.write [-p plain] [-t table] <pickled
-p plain Plain file to write to (like docs/BENCHMARKS).
-t table Table file to write to (like docs/_userdoc/benchmark.txt).
"""
__author__ = u"Andr\xe9 Malo"
__version__ = "1.0.0"
import os as _os
import re as _re
import sys as _sys
try:
unicode
except NameError:
def uni(v):
if hasattr(v, 'decode'):
return v.decode('latin-1')
return str(v)
else:
def uni(v):
if isinstance(v, unicode):
return v.encode('utf-8')
return str(v)
try:
cmp
except NameError:
cmp = lambda a, b: (a > b) - (a < b)
def write_table(filename, results):
"""
Output tabled benchmark results
:Parameters:
`filename` : ``str``
Filename to write to
`results` : ``list``
Results
"""
try:
next
except NameError:
next = lambda i: (getattr(i, 'next', None) or i.__next__)()
names = [
('simple_port', 'Simple Port'),
('jsmin_2_0_9', 'jsmin 2.0.9'),
('rjsmin', '|rjsmin|'),
('_rjsmin', r'_\ |rjsmin|'),
]
benched_per_table = 2
results = sorted(results,
key=(lambda x: [int(a) for a in x[0].split('.')]),
reverse=True)
# First we transform our data into a table (list of lists)
pythons, widths = [], [0] * (benched_per_table + 1)
versions = []
for version, _, result in results:
version = uni(version)
if versions and versions[-1].startswith('3.10.') \
and not version.startswith('2.'):
continue
versions.append(version)
namesub = _re.compile(r'(?:-\d+(?:\.\d+)*)?\.js$').sub
result = iter(result)
tables = []
# given our data it's easier to create the table transposed...
for benched in result:
rows = [['Name'] + [desc for _, desc in names]]
for _ in range(benched_per_table):
if _:
try:
benched = next(result)
except StopIteration:
rows.append([''] + ['' for _ in names])
continue
times = dict((
uni(port), (time, benched['sizes'][idx])
) for idx, (port, time) in enumerate(benched['times']))
columns = ['%s (%.1f)' % (
namesub('', _os.path.basename(uni(benched['filename']))),
benched['size'] / 1024.0,
)]
for idx, (port, _) in enumerate(names):
if port not in times:
columns.append('n/a')
continue
time, size = times[port]
if time is None:
columns.append('(failed)')
continue
columns.append('%s%.2f ms (%.1f %s)' % (
idx == 0 and ' ' or '',
time,
size / 1024.0,
idx == 0 and '\\*' or ['=', '>', '<'][
cmp(size, benched['sizes'][0])
],
))
rows.append(columns)
# calculate column widths (global for all tables)
for idx, row in enumerate(rows):
widths[idx] = max(widths[idx], max(map(len, row)))
# ... and transpose it back.
tables.append(zip(*rows))
pythons.append((version, tables))
if versions[-1].startswith('2.'):
break
# Second we create a rest table from it
lines = []
separator = lambda c='-': '+'.join([''] + [
c * (width + 2) for width in widths
] + [''])
for idx, (version, tables) in enumerate(pythons):
if idx:
lines.append('')
lines.append('')
line = 'Python %s' % (version,)
lines.append(line)
lines.append('~' * len(line))
for table in tables:
lines.append('')
lines.append('.. rst-class:: benchmark')
lines.append('')
for idx, row in enumerate(table):
if idx == 0:
# header
lines.append(separator())
lines.append('|'.join([''] + [
' %s%*s ' % (col, len(col) - width, '')
for width, col in zip(widths, row)
] + ['']))
lines.append(separator('='))
else: # data
lines.append('|'.join([''] + [
j == 0 and (
' %s%*s ' % (col, len(col) - widths[j], '')
) or (
['%*s ', ' %*s '][idx == 1] % (widths[j], col)
)
for j, col in enumerate(row)
] + ['']))
lines.append(separator())
fplines = []
fp = open(filename)
try:
fpiter = iter(fp)
for line in fpiter:
line = line.rstrip()
if line == '.. begin tables':
buf = []
for line in fpiter:
line = line.rstrip()
if line == '.. end tables':
fplines.append('.. begin tables')
fplines.append('')
fplines.extend(lines)
fplines.append('')
fplines.append('.. end tables')
buf = []
break
else:
buf.append(line)
else:
fplines.extend(buf)
_sys.stderr.write("Placeholder container not found!\n")
else:
fplines.append(line)
finally:
fp.close()
fp = open(filename, 'w')
try:
fp.write('\n'.join(fplines) + '\n')
finally:
fp.close()
def write_plain(filename, results):
"""
Output plain benchmark results
:Parameters:
`filename` : ``str``
Filename to write to
`results` : ``list``
Results
"""
lines = []
results = sorted(results,
key=(lambda x: [int(a) for a in x[0].split('.')]),
reverse=True)
for idx, (version, import_notes, result) in enumerate(results):
if idx:
lines.append('')
lines.append('')
lines.append('$ python%s -OO bench/main.py bench/*.js' % (
'.'.join(version.split('.')[:2])
))
lines.append('~' * 72)
for note in import_notes:
lines.append(uni(note))
lines.append('Python Release: %s' % (version,))
for single in result:
lines.append('')
lines.append('Benchmarking %r... (%.1f KiB)' % (
uni(single['filename']), single['size'] / 1024.0
))
for msg in single['messages']:
lines.append(msg)
times = []
space = max([len(uni(port)) for port, _ in single['times']])
for idx, (port, time) in enumerate(single['times']):
port = uni(port)
if time is None:
lines.append(" FAILED %s" % (port,))
else:
times.append(time)
lines.append(
" Timing %s%s ... (%5.1f KiB %s) %8.2f ms" % (
port,
" " * (space - len(port)),
single['sizes'][idx] / 1024.0,
idx == 0 and '*' or ['=', '>', '<'][
cmp(single['sizes'][idx], single['sizes'][0])
],
time
)
)
if len(times) > 1:
lines[-1] += " (factor: %s)" % (', '.join([
'%.2f' % (timed / time) for timed in times[:-1]
]))
lines.append('')
lines.append('')
lines.append('# vim: nowrap')
fp = open(filename, 'w')
try:
fp.write('\n'.join(lines) + '\n')
finally:
fp.close()
def main(argv=None):
""" Main """
import getopt as _getopt
import pickle as _pickle
if argv is None:
argv = _sys.argv[1:]
try:
opts, args = _getopt.getopt(argv, "hp:t:", ["help"])
except getopt.GetoptError:
e = _sys.exc_info()[0](_sys.exc_info()[1])
print >> _sys.stderr, "%s\nTry %s -mbench.write --help" % (
e,
_os.path.basename(_sys.executable),
)
_sys.exit(2)
plain, table = None, None
for key, value in opts:
if key in ("-h", "--help"):
print >> _sys.stderr, (
"%s -mbench.write [-p plain] [-t table] <pickled" % (
_os.path.basename(_sys.executable),
)
)
_sys.exit(0)
elif key == '-p':
plain = str(value)
elif key == '-t':
table = str(value)
struct = []
_sys.stdin = getattr(_sys.stdin, 'detach', lambda: _sys.stdin)()
try:
while True:
version, import_notes, result = _pickle.load(_sys.stdin)
if hasattr(version, 'decode'):
version = version.decode('latin-1')
struct.append((version, import_notes, result))
except EOFError:
pass
if plain:
write_plain(plain, struct)
if table:
write_table(table, struct)
if __name__ == '__main__':
main() | /rjsmin-1.2.1.tar.gz/rjsmin-1.2.1/bench/write.py | 0.604049 | 0.242733 | write.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rk_distributions-0.1.tar.gz/rk_distributions-0.1/rk_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
import numpy
import zfit
import matplotlib.pyplot as plt
import utils_noroot as utnr
from logzero import logger as log
from zutils.plot import plot as zfp
#--------------------------------
class extractor:
def __init__(self):
self._d_eff = None
self._cov = None
self._d_rjpsi = None
self._d_data = None
self._d_model = None
self._d_ck = None
self._l_dsname = []
self._rk = zfit.Parameter('rk', 1.0, 0.0, 2.0)
self._plt_dir = None
self._initialized = False
#--------------------------------
def _initialize(self):
if self._initialized:
return
self._l_dsname = list(self._d_eff.keys())
self._check_inputs()
self._d_ck = { dsname : zfit.Parameter(f'ck_{dsname}', 1.0, 0.0, 2.0) for dsname in self._l_dsname }
d_model = {}
for ds, (mod_mm, mod_ee) in self._d_model.items():
mod_ee = self._reparametrize_ee_pdf(ds, mod_mm, mod_ee)
d_model[ds] = (mod_mm, mod_ee)
self._d_model = d_model
self._initialized = True
#--------------------------------
def _check_inputs(self):
for ds, (eff_mm, eff_ee) in self._d_eff.items():
self._check_eff(eff_mm)
self._check_eff(eff_ee)
for ds, (mod_mm, mod_ee) in self._d_model.items():
self._check_model(mod_mm)
self._check_model(mod_ee)
for ds, (dat_mm, dat_ee) in self._d_data.items():
self._check_data(dat_mm)
self._check_data(dat_ee)
self._check_keys(self._d_eff , self._d_model)
self._check_keys(self._d_model, self._d_data )
self._check_keys(self._d_data , self._d_eff )
self._check_cov()
self._check_rjpsi()
#--------------------------------
def _check_keys(self, d1, d2):
if d1.keys() != d2.keys():
log.error('Keys differ:')
log.error(d1.keys())
log.error(d2.keys())
raise ValueError
#--------------------------------
def _check_cov(self):
if self._cov is None:
return
if not isinstance(self._cov, numpy.ndarray):
log.error(f'Covariance matrix is not a numpy array')
raise
ndset = len(self._d_eff)
if self._cov.shape != (ndset, ndset):
log.error(f'Covariance matrix has wrong shape: {shape._cov.shape}')
raise
#--------------------------------
def _check_rjpsi(self):
if self._d_rjpsi is None:
self._d_rjpsi = {dsname : 1 for dsname in self._l_dsname}
log.warning('rjpsi not found, using 1 for all datasets')
#--------------------------------
def _reparametrize_ee_pdf(self, dsname, mod_mm, mod_ee):
nsg_mm = self._get_yld(mod_mm, 'nsg_mm')
nsg_ee = self._get_yld(mod_ee, 'nsg_ee')
l_model = mod_ee.models
s_param = mod_ee.get_params(is_yield=True)
l_param = list(s_param)
index = l_param.index(nsg_ee)
eff_mm, eff_ee = self._d_eff[dsname]
rjpsi = self._d_rjpsi[dsname]
ck=self._d_ck[dsname]
ck.set_value( (eff_ee / eff_mm) / rjpsi )
if self._cov is None:
ck.floating = False
log.warning('Covariance matrix not specified, will fix ck in fit')
nsg_ee = zfit.ComposedParameter(f'nsg_ee_rk_{dsname}', lambda a, b, c: b * c / a, params=[self._rk, nsg_mm, ck])
log.debug(f'nsg_ee_rk_{dsname} -> {nsg_mm.name} * {ck.name} / {self._rk.name}')
l_param[index] = nsg_ee
l_model_ext = [ model.copy().create_extended(nevt) for model, nevt in zip(l_model, l_param) ]
mod_ee = zfit.pdf.SumPDF(l_model_ext)
return mod_ee
#--------------------------------
def _get_yld(self, model, preffix):
l_param = model.get_params(is_yield=True)
l_sig_yld = [ param for param in l_param if param.name.startswith(preffix)]
[sig_yld] = l_sig_yld
return sig_yld
#--------------------------------
@property
def plt_dir(self):
return self._plt_dir
@plt_dir.setter
def plt_dir(self, value):
try:
self._plt_dir = utnr.make_dir_path(value)
except:
log.error(f'Cannot create: {value}')
raise
#--------------------------------
@property
def rjpsi(self):
return self._d_rjpsi
@rjpsi.setter
def rjpsi(self, value):
self._d_rjpsi = value
#--------------------------------
@property
def eff(self):
return self._d_eff
@eff.setter
def eff(self, value):
self._d_eff = value
#--------------------------------
@property
def cov(self):
return self._cov
@cov.setter
def cov(self, value):
self._cov = value
#--------------------------------
@property
def data(self):
return self._d_data
@data.setter
def data(self, value):
self._d_data = value
#--------------------------------
@property
def model(self):
return self._d_model
@model.setter
def model(self, value):
self._d_model = value
#--------------------------------
def _check_data(self, obj):
if not isinstance(obj, zfit.data.Data):
log.error(f'Object introduced is not a zfit dataset')
raise ValueError
#--------------------------------
def _check_model(self, obj):
if not isinstance(obj, zfit.pdf.SumPDF):
log.error(f'Object introduced is not a zfit PDF')
raise ValueError
pdf = obj
if not pdf.is_extended:
log.error(f'PDF is not extended:')
print(pdf)
raise ValueError
l_yld = pdf.get_params(is_yield=True)
l_yld_name = [ yld.name for yld in l_yld if yld.name.startswith('nsg_') ]
try:
[yld_name] = l_yld_name
except:
log.error('Not found one and only one signal yield:')
print(l_yld_name)
raise ValueError
log.debug(f'Picking up component with signal yield: {yld_name}')
#--------------------------------
def _check_eff(self, eff):
if not isinstance(eff, float):
log.error(f'Efficiency is not a float: {eff}')
raise ValueError
if not ( 0 < eff < 1 ):
log.error(f'Efficiency is not in (0, 1): {eff:.3f}')
raise ValueError
#--------------------------------
def _plot(self, data, model, results, component):
if self._plt_dir is None:
return
obj=zfp(data=data, model=model, result=results)
obj.plot(nbins=50)
plt_path = f'{self._plt_dir}/fit_{component}.png'
log.info(f'Saving to: {plt_path}')
plt.savefig(plt_path, bbox_inches='tight')
#--------------------------------
def _finalize(self):
self._delete_par('rk')
for ck in self._d_ck.values():
self._delete_par(ck.name)
for dsname in self._l_dsname:
self._delete_par(f'nsg_ee_rk_{dsname}')
#--------------------------------
def _delete_par(self, par_name):
if par_name in zfit.Parameter._existing_params:
del zfit.Parameter._existing_params[par_name]
#--------------------------------
def _add_const(self, nll):
if self._cov is None:
log.warning(f'Covariance matrix is missing, not adding constraints to fit')
return nll
l_ck_par=[]
l_ck_val=[]
for dsname in self._l_dsname:
eff_mm, eff_ee = self._d_eff[dsname]
rjpsi = self._d_rjpsi[dsname]
ck_val = (eff_ee / eff_mm) / rjpsi
ck_par = self._d_ck[dsname]
l_ck_val.append(ck_val)
l_ck_par.append(ck_par)
log.debug(f'Using covariance:\n{self._cov}')
log.debug(f'Using ck:\n{l_ck_val}')
cns_ck = zfit.constraint.GaussianConstraint(params = l_ck_par,
observation= l_ck_val,
uncertainty= self._cov)
nll = nll.create_new(constraints=cns_ck)
return nll
#--------------------------------
def _plot_fit(self):
if self._plt_dir is None:
return
for dsname in self._l_dsname:
mod_mm, mod_ee = self._d_model[dsname]
dat_mm, dat_ee = self._d_data [dsname]
self._plot(dat_mm, mod_mm, None, f'mm_{dsname}')
self._plot(dat_ee, mod_ee, None, f'ee_{dsname}')
#--------------------------------
def get_rk(self):
self._initialize()
nll = None
for dsname in self._l_dsname:
mod_mm, mod_ee = self._d_model[dsname]
dat_mm, dat_ee = self._d_data [dsname]
nll_mm = zfit.loss.ExtendedUnbinnedNLL(model=mod_mm, data=dat_mm)
nll_ee = zfit.loss.ExtendedUnbinnedNLL(model=mod_ee, data=dat_ee)
if nll is None:
nll = nll_mm + nll_ee
else:
nll+= nll_mm + nll_ee
nll = self._add_const(nll)
mnm = zfit.minimize.Minuit()
res = mnm.minimize(nll)
self._plot_fit()
self._finalize()
return res
#-------------------------------- | /rk_extractor-0.0.3-py3-none-any.whl/extractor.py | 0.571886 | 0.222204 | extractor.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rk_udacity_distributions_package-1.0.tar.gz/rk_udacity_distributions_package-1.0/rk_udacity_distributions_package/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
def reverse_num(num):
"""Returns an integer
Reverse of a number passed as argument
"""
rev = 0
while num > 0:
rev = (rev * 10) + (num % 10)
num //= 10
return rev
def sum_of_digits(num):
"""Returns an integer
Sum of the digits of a number passed as argument
"""
s = 0
while num > 0:
s += num % 10
num //= 10
return s
def is_prime(num):
"""Returns a boolean
Checks whether the number passed as argument is a prime or composite number
"""
if num == 0 or num == 1:
return False
i = 2
while i*i < num:
if num % i == 0:
return False
i += 1
return True
def generate_primes(num1, num2):
"""Returns a list
Prime numbers generated between the given range(num1, num2)
"""
if num1 > num2:
raise Exception(
"num1 can't be greater than num2. Specify the correct range.")
if num1 == 0 or num2 == 0:
raise Exception("Specify the correct range.")
primes_generated = []
range_length = num2 - num1 + 1
primes = [True for i in range(range_length)]
if num1 == 1:
primes[num1] = False
inc_value = 2
while inc_value * inc_value <= num2:
if primes[inc_value] == True:
for i in range(inc_value * inc_value, range_length, inc_value):
primes[i] = False
inc_value += 1
for prime in range(num1, range_length):
if primes[prime]:
primes_generated.append(prime)
return primes_generated
def gcd(num1, num2):
"""Returns an integer
Greatest common divisor of the two numbers passed as arguments
"""
if num2 == 0:
return num1
return gcd(num2, num1 % num2)
def lcm(num1, num2):
"""Returns an integer
Least common multiple of the two numbers passed as arguments
"""
return num1 * num2 // gcd(num1, num2)
def get_factors(num):
"""Returns a list
Factors of the number passed as argument
"""
factors = []
inc_value = 1
while inc_value * inc_value <= num:
if num % inc_value == 0:
if num//inc_value == inc_value:
factors.append(inc_value)
else:
factors.append(inc_value)
factors.append(num//inc_value)
inc_value += 1
return factors
def factorial(num):
"""Returns an integer
Factorial of the number passed as argument
"""
fact = 1
for i in range(1, num+1):
fact *= i
return fact
def fibonacci(n):
"""Returns an integer
Nth fibonacci number
"""
a = 0
b = 1
for i in range(n-2):
c = a + b
a = b
b = c
return c
def number_of_digits(num):
count = 0
while num > 0:
count += 1
num //= 10
return count
def is_armstrong(num):
orginal_num = num
formed_num = 0
nod = number_of_digits(num)
while num > 0:
dig = num % 10
formed_num += dig ** nod
num //= 10
return formed_num == orginal_num | /rk_utility-0.0.1-py3-none-any.whl/rk_utility.py | 0.732783 | 0.792986 | rk_utility.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rkb_probability-1.6-py3-none-any.whl/rkb_probability/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
import os
from typing import List
from typing import Optional
from typing import Dict
from traceback import format_exc
from rkd.api.inputoutput import IO
from .expressions import safe_eval
from .exception import ProfileNotFoundException
from .exception import ServiceNotFoundInYaml
from .exception import ServiceNotFoundInYamlLookedByCriteria
DEFAULT_SELECTOR = 'service is not None' # passes all containers
BOOLEANS = ['true', 'TRUE', 'True', True]
class ServiceDeclaration(object):
"""Model of a parsed service declaration from YAML"""
_name: str
_definition: dict
def __init__(self, name: str, definition: dict):
self._name = name
self._definition = definition
def get_name(self) -> str:
return self._name
def get_definition(self) -> dict:
return self._definition
def get_domains(self) -> list:
try:
return str(self.get_definition()['environment']['VIRTUAL_HOST']).split(',')
except KeyError:
return []
def is_using_watchtower(self) -> bool:
try:
return self.get_definition()['labels']['com.centurylinklabs.watchtower.enable'] in BOOLEANS
except KeyError:
return False
def is_using_maintenance_mode(self) -> bool:
try:
return self.get_definition()['labels']['org.riotkit.useMaintenanceMode'] in BOOLEANS
except KeyError:
return False
def get_ports(self) -> list:
try:
return self.get_definition()['ports']
except KeyError:
return []
def get_desired_replicas_count(self) -> int:
try:
return int(self.get_definition()['labels']['org.riotkit.replicas'])
except KeyError:
return 1
def get_update_strategy(self, default: str = 'compose') -> str:
try:
return str(self.get_definition()['labels']['org.riotkit.updateStrategy'])
except KeyError:
return default
def get_priority_number(self):
try:
return int(self.get_definition()['labels']['org.riotkit.priority'])
except KeyError:
return 1000
def get_image(self):
try:
return str(self.get_definition()['image'])
except KeyError:
return '_docker_build_local:latest'
def get_declared_version(self):
try:
return str(self.get_definition()['image'].split(':')[1])
except KeyError:
return 'latest (build)'
except IndexError:
return 'latest'
def has_domain(self, domain: str):
return domain in self.get_domains()
class ServiceSelector(object):
"""Acts as a service filter. Simple reduce() implementation"""
_selector: str
_io: IO
def __init__(self, selector: str, io: IO):
self._selector = selector
self._io = io
def is_service_matching(self, definition: dict, name: str) -> bool:
"""Asks the profile filter - is service of a given definition and name matching?"""
try:
return safe_eval(self._selector, {'service': definition, 'name': name})
except Exception:
self._io.errln(format_exc())
self._io.error_msg('Exception raised, while attempting to evaluate --profile selector')
return False
def find_matching_services(self, services: dict) -> List[ServiceDeclaration]:
"""Find names of matching services by current Service Selector"""
matched = []
for name, definition in services.items():
if self.is_service_matching(definition, name):
matched.append(ServiceDeclaration(name, definition))
matched.sort(key=lambda declaration: declaration.get_priority_number())
return matched
class ProfileLoader(object):
"""Parses profiles from ./apps/profiles
"""
_io: IO
_apps_path: str
def __init__(self, io: IO, apps_path: str):
self._io = io
self._apps_path = apps_path
def load_profile(self, name: str) -> ServiceSelector:
if name == '' or name is None:
return ServiceSelector(DEFAULT_SELECTOR, self._io)
profile_path = self._apps_path + '/profile/%s.profile.py' % name
if not os.path.isfile(profile_path):
raise ProfileNotFoundException(profile_path)
return self.load_profile_from_path(profile_path)
def load_profile_from_path(self, path: str) -> ServiceSelector:
with open(path, 'r') as f:
content = f.read()
return ServiceSelector(content, self._io)
class ServiceLocator(object):
"""Declarations repository"""
_services: Dict[str, ServiceDeclaration]
def __init__(self, services: dict):
self._services = {}
for name, yaml_dict in services.items():
self._services[name] = ServiceDeclaration(name, yaml_dict)
def get_by_name(self, name: str) -> Optional[ServiceDeclaration]:
try:
return self._services[name]
except KeyError:
raise ServiceNotFoundInYaml(name)
def find_by_domain(self, domain: str) -> Optional[ServiceDeclaration]:
for service in self._services.values():
if service.has_domain(domain):
return service
raise ServiceNotFoundInYamlLookedByCriteria('has domain "%s"' % domain) | /rkd_harbor-2.0.3-py3-none-any.whl/rkd_harbor/service.py | 0.74826 | 0.163646 | service.py | pypi |
import subprocess
from argparse import ArgumentParser
from typing import Dict
from rkd.api.contract import ExecutionContext
from .base import HarborBaseTask
from ..formatting import prod_formatting
class GatewayBaseTask(HarborBaseTask):
def configure_argparse(self, parser: ArgumentParser):
super().configure_argparse(parser)
def get_declared_envs(self) -> Dict[str, str]:
envs = super().get_declared_envs()
envs.update({
'DISABLE_SSL': 'False'
})
return envs
def make_sure_ssl_service_is_up(self, ctx: ExecutionContext):
service = self.services(ctx).get_by_name('gateway_letsencrypt')
self.containers(ctx).up(self.services(ctx).get_by_name('gateway'), norecreate=True)
self.containers(ctx).up(self.services(ctx).get_by_name('gateway_proxy_gen'), norecreate=True)
self.containers(ctx).up(service, norecreate=True)
self.containers(ctx).wait_for_log_message('Sleep for', service, timeout=60)
def format_task_name(self, name) -> str:
return prod_formatting(name)
class ReloadGatewayTask(GatewayBaseTask):
"""Reload gateway, regenerate missing SSL certificates"""
def run(self, ctx: ExecutionContext) -> bool:
self.io().h2('Validating NGINX configuration')
self.containers(ctx).exec_in_container('gateway', 'nginx -t')
self.io().h2('Reloading NGINX configuration')
self.containers(ctx).exec_in_container('gateway', 'nginx -s reload')
if ctx.get_env('DISABLE_SSL').lower() != 'true':
self.io().h2('Reloading SSL configuration')
self.make_sure_ssl_service_is_up(ctx)
self.containers(ctx).exec_in_container('gateway_letsencrypt', '/app/signal_le_service')
return True
def get_name(self) -> str:
return ':reload'
def get_group_name(self) -> str:
return ':harbor:gateway'
class ShowSSLStatusTask(GatewayBaseTask):
"""Show status of SSL certificates"""
def run(self, ctx: ExecutionContext) -> bool:
if ctx.get_env('DISABLE_SSL').lower() != 'true':
self.make_sure_ssl_service_is_up(ctx)
self.io().out(self.containers(ctx).exec_in_container('gateway_letsencrypt', '/app/cert_status'))
return True
def get_name(self) -> str:
return ':status'
def get_group_name(self) -> str:
return ':harbor:gateway:ssl'
class ForceReloadSSLTask(GatewayBaseTask):
"""Regenerate all certificates with force"""
def run(self, ctx: ExecutionContext) -> bool:
if ctx.get_env('DISABLE_SSL').lower() != 'true':
self.make_sure_ssl_service_is_up(ctx)
try:
self.io().out(self.containers(ctx).exec_in_container('gateway_letsencrypt', '/app/force_renew'))
except subprocess.CalledProcessError as err:
self.io().error_msg(str(err))
self.io().error('Output: ' + str(err.output))
return False
self.io().info_msg('SSL is disabled, not regenerating anything')
return True
def get_name(self) -> str:
return ':regenerate'
def get_group_name(self) -> str:
return ':harbor:gateway:ssl' | /rkd_harbor-2.0.3-py3-none-any.whl/rkd_harbor/tasks/gateway.py | 0.639173 | 0.150216 | gateway.py | pypi |
import subprocess
from typing import Dict
from argparse import ArgumentParser
from rkd.api.contract import ExecutionContext
from ...formatting import development_formatting
from .base import BaseDeploymentTask
class EditVaultTask(BaseDeploymentTask):
"""Edits an encrypted file
Example usage:
# edit ".env-prod" file
harbor :vault:edit .env-prod --vault-passwords="./.vault-password"
# usage of environment variable (NOTICE: paths to password files must begin with "/" or "./")
VAULT_PASSWORDS="./.vault-password-file||second-some-plaintext-password-there" harbor :vault:edit .env-prod
# use a different text editor (you can also put EDITOR variable to your .env file)
EDITOR=vim harbor :vault:edit deployment.yml
HINT: You can avoid writing the path in commandline each time by putting `VAULT_PASSWORDS=./path-to-password-file.txt` to the .env file
HINT: You can store vault password file on encrypted flash drive, and make a symbolic link. Every time when you mount an encrypted drive you will gain access to the project
NOTICE: When at least one of vault password files does not exist, then there will be a password prompt
"""
def get_group_name(self) -> str:
return ':harbor:vault'
def get_name(self) -> str:
return ':edit'
def get_declared_envs(self) -> Dict[str, str]:
envs = super(BaseDeploymentTask, self).get_declared_envs()
envs['VAULT_PASSWORDS'] = ''
return envs
def configure_argparse(self, parser: ArgumentParser):
parser.add_argument('filename', help='Filename')
self._add_vault_arguments_to_argparse(parser)
def run(self, context: ExecutionContext) -> bool:
vault_opts = self._get_vault_opts(context)
filename = context.get_arg('filename')
subprocess.check_call('ansible-vault edit %s %s' % (vault_opts, filename), shell=True)
return True
class EncryptVaultTask(BaseDeploymentTask):
"""Encrypts/Decrypts a file using strong AES-256 algorithm,
output files are suitable to be kept in GIT repository
See the documentation for :harbor:vault:edit task for general file encryption documentation
"""
def get_group_name(self) -> str:
return ':harbor:vault'
def get_name(self) -> str:
return ':encrypt'
def get_declared_envs(self) -> Dict[str, str]:
envs = super(BaseDeploymentTask, self).get_declared_envs()
envs['VAULT_PASSWORDS'] = ''
return envs
def configure_argparse(self, parser: ArgumentParser):
parser.add_argument('--decrypt', '-d', action='store_true', help='Decrypt instead of encrypting')
parser.add_argument('filename', help='Filename')
self._add_vault_arguments_to_argparse(parser)
def run(self, context: ExecutionContext) -> bool:
vault_opts = self._get_vault_opts(context)
filename = context.get_arg('filename')
mode = 'decrypt' if context.get_arg('--decrypt') else 'encrypt'
self.sh('ansible-vault %s %s %s' % (mode, vault_opts, filename), capture=False)
return True
class EnvEncryptTask(BaseDeploymentTask):
"""Manages the encryption of .env-prod file
The .env-prod file is a file that could be kept securely in GIT repository while containing passwords
required for services to work.
"""
def get_group_name(self) -> str:
return ':harbor:env'
def get_name(self) -> str:
return ':encrypt'
def format_task_name(self, name) -> str:
return development_formatting(name)
def get_declared_envs(self) -> Dict[str, str]:
envs = super(BaseDeploymentTask, self).get_declared_envs()
envs['VAULT_PASSWORDS'] = ''
return envs
def configure_argparse(self, parser: ArgumentParser):
parser.add_argument('--decrypt', '-d', action='store_true', help='Decrypt instead of encrypting')
self._add_vault_arguments_to_argparse(parser)
def run(self, context: ExecutionContext) -> bool:
vault_opts = self._get_vault_opts(context)
mode = 'decrypt' if context.get_arg('--decrypt') else 'encrypt'
src = '.env'
dst = '.env-prod'
if mode == 'decrypt':
src = '.env-prod'
dst = '.env'
self.sh('cp %s %s-tmp' % (src, dst))
self.sh('ansible-vault %s %s %s-tmp' % (mode, vault_opts, dst), capture=False)
self.sh('mv %s-tmp %s' % (dst, dst))
if mode == 'encrypt':
try:
self.sh('git add %s' % dst)
except:
pass
return True | /rkd_harbor-2.0.3-py3-none-any.whl/rkd_harbor/tasks/deployment/vault.py | 0.800302 | 0.178902 | vault.py | pypi |
from typing import List
from jsonschema import ValidationError
from .argparsing.model import TaskArguments
class ContextException(Exception):
pass
class TaskNotFoundException(ContextException):
pass
class TaskExecutionException(Exception):
pass
class InterruptExecution(TaskExecutionException):
pass
class ExecutionRetryException(TaskExecutionException):
"""Internal signal to retry a task"""
args: List[TaskArguments]
def __init__(self, args: List[TaskArguments] = None):
if args is None:
args = []
self.args = args
class ExecutionRescheduleException(TaskExecutionException):
"""Internal signal to put extra task into resolve/schedule queue of TaskResolver"""
tasks_to_schedule: List[TaskArguments]
def __init__(self, tasks_to_schedule: List[TaskArguments]):
self.tasks_to_schedule = tasks_to_schedule
class ExecutionRescueException(ExecutionRescheduleException):
"""Internal signal to call a rescue set of tasks in case of given task fails"""
class ExecutionErrorActionException(ExecutionRescheduleException):
"""Internal signal to call an error notification in case when given task fails"""
class TaskException(ContextException):
pass
class UndefinedEnvironmentVariableUsageError(TaskException):
pass
class EnvironmentVariableNotUsed(TaskException):
pass
class EnvironmentVariableNameNotAllowed(TaskException):
def __init__(self, var_name: str):
super().__init__('Environment variable with this name "' + var_name + '" cannot be declared, it probably a' +
' commonly reserved name by operating systems')
class UserInputException(Exception):
pass
class BlockDefinitionLogicError(Exception):
@staticmethod
def from_both_rescue_and_error_defined():
return BlockDefinitionLogicError('Block "{0:s}" cannot define both @rescue and @error'.format(task.block().body))
class NotSupportedEnvVariableError(UserInputException):
pass
class YamlParsingException(ContextException):
"""Logic or syntax errors in makefile.yaml"""
class YAMLFileValidationError(YamlParsingException):
"""Errors related to schema validation"""
def __init__(self, err: ValidationError):
super().__init__('YAML schema validation failed at path "%s" with error: %s' % (
'.'.join(list(map(str, list(err.path)))),
str(err.message)
))
class ParsingException(ContextException):
"""Errors related to parsing YAML/Python syntax"""
@classmethod
def from_import_error(cls, import_str: str, class_name: str, error: Exception) -> 'ParsingException':
return cls(
'Import "%s" is invalid - cannot import class "%s" - error: %s' % (
import_str, class_name, str(error)
)
)
@classmethod
def from_class_not_found_in_module_error(cls, import_str: str, class_name: str,
import_path: str) -> 'ParsingException':
return cls(
'Import "%s" is invalid. Class or method "%s" not found in module "%s"' % (
import_str, class_name, import_path
)
)
class DeclarationException(ContextException):
"""Something wrong with the makefile.py/makefile.yaml """
class ContextFileNotFoundException(ContextException):
"""When makefile.py, makefile.yaml, makefile.yml not found (at least one needed)"""
def __init__(self, path: str):
super().__init__('The directory "%s" should contain at least makefile.py, makefile.yaml or makefile.yml' % path)
class PythonContextFileNotFoundException(ContextFileNotFoundException):
"""When makefile.py is not found"""
def __init__(self, path: str):
super().__init__('Python context file not found at "%s"' % path)
class NotImportedClassException(ContextException):
"""When class was not imported"""
def __init__(self, exc: ImportError):
super().__init__(
'Your Makefile contains a reference to not available or not installed Python module "%s"' % str(exc)
)
class EnvironmentVariablesFileNotFound(ContextException):
""".env file specified, but not existing"""
def __init__(self, path: str, lookup_paths: list):
super().__init__(
'Specified file "%s" as environment variable provider does not exist. Looked in: %s' % (
path, str(lookup_paths)
)
)
class RuntimeException(Exception):
pass
class MissingInputException(RuntimeException):
def __init__(self, arg_name: str, env_name: str):
super().__init__('Either "%s" switch not used, either "%s" was not defined in environment' % (
arg_name, env_name
))
class CommandlineParsingError(RuntimeException):
@staticmethod
def from_block_header_parsing_exception(block_header: str) -> 'CommandlineParsingError':
return CommandlineParsingError('Cannot parse block header "{}"'.format(block_header))
@staticmethod
def from_block_modifier_declared_twice(name: str, block_header: str) -> 'CommandlineParsingError':
return CommandlineParsingError('Cannot declare "{}" twice in block "{}'.format(name, block_header))
@staticmethod
def from_block_unknown_modifier(header: str, e: Exception) -> 'CommandlineParsingError':
return CommandlineParsingError('Block "{}" contains invalid modifier, raised error: {}'.format(header, str(e)))
@staticmethod
def from_nested_blocks_not_allowed(token: str, header: str) -> 'CommandlineParsingError':
return CommandlineParsingError('Nesting blocks "{}" not allowed, attempted inside block "{}"'
.format(token, header))
@staticmethod
def from_block_closing_not_found(pos: int):
return CommandlineParsingError('Parsing exception: Closing character "}" not found for {@ opened at %i' % pos)
@staticmethod
def from_block_ending_not_found(block: str):
return CommandlineParsingError('Parsing exception: Block ending - %s not found' % block) | /rkd.core-0.0.0.tar.gz/rkd.core-0.0.0/rkd/core/exception.py | 0.903033 | 0.202778 | exception.py | pypi |
import os
import sys
from typing import Union
from subprocess import check_output, Popen, DEVNULL, CalledProcessError
from tempfile import NamedTemporaryFile
from abc import ABC as AbstractClass, abstractmethod
from copy import deepcopy
from rkd.process import check_call
from .api.inputoutput import IO
from . import env
class TaskUtilities(AbstractClass):
"""
Internal helpers for TaskInterface implementations
"""
@abstractmethod
def io(self) -> IO:
pass
def silent_sh(self, cmd: str, verbose: bool = False, strict: bool = True,
env: dict = None) -> bool:
"""
sh() shortcut that catches errors and displays using IO().error_msg()
"""
# kwargs is not used on purpose. For static analysis.
try:
self.sh(cmd=cmd, capture=False, verbose=verbose, strict=strict, env=env)
return True
except CalledProcessError as e:
self.io().error_msg(str(e))
return False
@staticmethod
def get_rkd_binary():
"""Gets the command how RKD was launched"""
if env.binary_name():
return env.binary_name()
binary = sys.argv[0]
sys_executable_basename = os.path.basename(sys.executable)
if "-m unittest" in binary or "-m pytest" in binary:
return binary.split(' ')[0] + ' -m rkd.core'
if binary.endswith('/pytest') or binary.endswith('/py.test'):
return sys_executable_basename + ' -m rkd.core'
# as a Python module: "python -m rkd.core" for example
if binary[:-3] == '.py':
return '%s -m %s' % (sys.executable, os.path.basename(os.path.dirname(binary)))
if sys_executable_basename.startswith('python'):
return binary
# using a script eg. "rkd"
return sys.executable
def sh(self, cmd: str, capture: bool = False, verbose: bool = False, strict: bool = True,
env: dict = None, use_subprocess: bool = False) -> Union[str, None]:
""" Executes a shell script in bash. Throws exception on error.
To capture output set capture=True
"""
self.io().debug('sh(%s)' % cmd)
is_debug = self.io().is_log_level_at_least('debug')
# cmd without environment variables
original_cmd = deepcopy(cmd)
# set strict mode, it can be disabled manually
if strict:
cmd = 'set -euo pipefail; ' + cmd
if verbose:
cmd = 'set -x; ' + cmd
# append environment variables in order
if env:
env_str = ""
for name, value in env.items():
value = '' if value is None else str(value).replace('"', '\\"')
env_str = env_str + (" export %s=\"%s\";\n" % (name, value))
cmd = env_str + cmd
bash_script = "#!/bin/bash -eopipefail \n" + cmd
bash_script = bash_script.replace('%RKD%', self.get_rkd_binary())
if not capture:
with NamedTemporaryFile() as bash_temp_file:
bash_temp_file.write(bash_script.encode('utf-8'))
bash_temp_file.flush()
check_call('bash ' + bash_temp_file.name,
script_to_show=original_cmd if not is_debug else bash_script,
use_subprocess=use_subprocess)
return
read, write = os.pipe()
os.write(write, bash_script.encode('utf-8'))
os.close(write)
return check_output('bash', shell=True, stdin=read).decode('utf-8')
def py(self, code: str = '', become: str = None, capture: bool = False,
script_path: str = None, arguments: str = '') -> Union[str, None]:
"""Executes a Python code in a separate process"""
if (not code and not script_path) or (code and script_path):
raise Exception('You need to provide only one of "code" or "script_path"')
read, write = os.pipe()
os.write(write, code.encode('utf-8'))
os.close(write)
cmd = 'python'
py_temp_file = None
if script_path:
cmd += ' ' + script_path + ' '
if code:
with NamedTemporaryFile(delete=False) as py_temp_file:
py_temp_file.write(code.encode('utf-8'))
py_temp_file.flush()
cmd += ' ' + py_temp_file.name
if become:
cmd = "sudo -E -u %s %s" % (become, cmd)
os.environ['RKD_BIN'] = self.get_rkd_binary()
os.environ['RKD_CTX_PY_PATH'] = ":".join(reversed(sys.path))
if not capture:
check_call(cmd + ' ' + arguments, script_to_show=code)
os.unlink(py_temp_file.name) if py_temp_file else None
return
if capture:
out = check_output(cmd + ' ' + arguments, shell=True, stdin=read).decode('utf-8')
os.unlink(py_temp_file.name) if py_temp_file else None
return out
def exec(self, cmd: str, capture: bool = False, background: bool = False) -> Union[str, None]:
""" Starts a process in shell. Throws exception on error.
To capture output set capture=True
"""
if background:
if capture:
raise Exception('Cannot capture output from a background process')
Popen(cmd, shell=True, stdout=DEVNULL, stderr=DEVNULL)
return
if not capture:
check_call(cmd)
return
return check_output(cmd, shell=True).decode('utf-8')
def rkd(self, args: list, verbose: bool = False, capture: bool = False) -> str:
""" Spawns an RKD subprocess
"""
bash_opts = 'set -x; ' if verbose else ''
args_str = ' '.join(args)
return self.sh(bash_opts + ' %%RKD%% --no-ui %s' % args_str, capture=capture) | /rkd.core-0.0.0.tar.gz/rkd.core-0.0.0/rkd/core/taskutil.py | 0.553023 | 0.160727 | taskutil.py | pypi |
from typing import List, Dict, Optional
from copy import deepcopy
from .contract import TaskDeclarationInterface
from .contract import GroupDeclarationInterface
from .contract import TaskInterface
from .inputoutput import get_environment_copy
from ..argparsing.model import ArgumentBlock
from ..exception import DeclarationException
from uuid import uuid4
class TaskDeclaration(TaskDeclarationInterface):
_task: TaskInterface
_env: Dict[str, str] # environment at all
_user_defined_env: list # list of env variables overridden by user
_args: List[str]
_block: ArgumentBlock = None
_unique_id: str
_workdir: Optional[str]
def __init__(self, task: TaskInterface, env: Dict[str, str] = None, args: List[str] = None, workdir: str = None):
if env is None:
env = {}
if args is None:
args = []
if not isinstance(task, TaskInterface):
raise DeclarationException('Invalid class: TaskDeclaration needs to take TaskInterface as task argument')
self._unique_id = uuid4().hex
self._task = task
self._env = merge_env(env)
self._args = args
self._workdir = workdir
self._user_defined_env = list(env.keys())
def to_full_name(self):
return self._task.get_full_name()
def with_env(self, envs: Dict[str, str]):
""" Immutable environment setter. Produces new object each time. """
copy = self._clone()
copy._env = envs
return copy
def with_args(self, args: List[str]):
""" Immutable arguments setter. Produces new object each time """
copy = self._clone()
copy._args = args
return copy
def with_user_overridden_env(self, env_list: list):
""" Immutable arguments setter. Produces new object each time """
copy = self._clone()
copy._user_defined_env = env_list
return copy
def with_connected_block(self, block: ArgumentBlock):
"""Immutable arguments setter. Produces new object each time
Block should be a REFERENCE to an object, not a copy
"""
copy = self._clone()
copy._block = block
return copy
def _clone(self) -> 'TaskDeclaration':
"""Clone securely the object. There fields shared across objects as references could be kept"""
copy = deepcopy(self)
copy._unique_id = uuid4().hex
copy._block = self._block
return copy
def get_args(self) -> List[str]:
return self._args
def get_task_to_execute(self) -> TaskInterface:
return self._task
def to_list(self) -> list:
return [self]
def get_env(self):
return self._env
def get_user_overridden_envs(self) -> list:
""" Lists environment variables which were overridden by user """
return self._user_defined_env
def get_group_name(self) -> str:
split = self.to_full_name().split(':')
return split[1] if len(split) >= 3 else ''
def get_task_name(self) -> str:
split = self.to_full_name().split(':')
if len(split) >= 3:
return split[2]
try:
return split[1]
except KeyError:
return self.to_full_name()
def get_description(self) -> str:
task = self.get_task_to_execute()
if task.get_description():
return task.get_description()
return task.__doc__.strip().split("\n")[0] if task.__doc__ else ''
def get_full_description(self) -> str:
task = self.get_task_to_execute()
if task.get_description():
return task.get_description()
return task.__doc__.strip() if task.__doc__ else ''
def block(self) -> ArgumentBlock:
return self._block
@staticmethod
def parse_name(name: str) -> tuple:
split = name.split(':')
task_name = ":" + split[-1]
group = ":".join(split[:-1])
return task_name, group
def format_task_name(self, name: str) -> str:
return self.get_task_to_execute().format_task_name(name)
def get_unique_id(self) -> str:
"""
Unique ID of a declaration is a TEMPORARY ID created during runtime to distinct even very similar declarations
"""
return self._unique_id
@property
def workdir(self) -> str:
if not self._workdir:
return '.'
return self._workdir
def __str__(self):
return 'TaskDeclaration<%s>' % self.get_task_to_execute().get_full_name()
class GroupDeclaration(GroupDeclarationInterface):
""" Internal DTO: Processed definition of TaskAliasDeclaration into TaskDeclaration """
_name: str
_declarations: List[TaskDeclaration]
_description: str
def __init__(self, name: str, declarations: List[TaskDeclaration], description: str):
self._name = name
self._declarations = declarations
self._description = description
def get_declarations(self) -> List[TaskDeclaration]:
return self._declarations
def get_name(self) -> str:
return self._name
def get_group_name(self) -> str:
split = self._name.split(':')
return split[1] if len(split) >= 3 else ''
def get_task_name(self) -> str:
split = self._name.split(':')
if len(split) >= 3:
return split[2]
try:
return split[1]
except KeyError:
return self._name
def to_full_name(self):
return self.get_name()
def get_description(self) -> str:
return self._description
def format_task_name(self, name: str) -> str:
return name
class TaskAliasDeclaration:
""" Allows to define a custom task name that triggers other tasks in proper order """
_name: str
_arguments: List[str]
_env: Dict[str, str]
_user_defined_env: list # list of env variables overridden by user
_description: str
def __init__(self, name: str, to_execute: List[str], env: Dict[str, str] = {}, description: str = ''):
self._name = name
self._arguments = to_execute
self._env = merge_env(env)
self._user_defined_env = list(env.keys())
self._description = description
def get_name(self):
return self._name
def get_arguments(self) -> List[str]:
return self._arguments
def get_env(self) -> Dict[str, str]:
return self._env
def get_user_overridden_envs(self) -> list:
""" Lists environment variables which were overridden by user """
return self._user_defined_env
def get_description(self) -> str:
return self._description
def merge_env(env: Dict[str, str]):
"""Merge custom environment variables set per-task with system environment
"""
merged_dict = deepcopy(env)
merged_dict.update(get_environment_copy())
return merged_dict | /rkd.core-0.0.0.tar.gz/rkd.core-0.0.0/rkd/core/api/syntax.py | 0.837603 | 0.242183 | syntax.py | pypi |
from copy import deepcopy
from typing import List, Dict
class TaskArguments(object):
"""
Task name + commandline switches model
"""
_name: str
_args: list
def __init__(self, task_name: str, args: list):
self._name = task_name
self._args = args
def __repr__(self):
return 'TaskCall<%s (%s)>' % (self._name, str(self._args))
def name(self):
return self._name
def args(self):
return self._args
def with_args(self, new_args: list) -> 'TaskArguments':
clone = deepcopy(self)
clone._args = new_args
return clone
class ArgumentBlock(object):
"""
ArgumentBlock
=============
Stores information about construction of blocks:
{@block @error :notify @retry 2}:task1 --param1=value1 :task2{/@block}
Lifetime:
- Initially could store *body* (raw string, from example: ":task1 --param1=value1 :task2")
- Later parsers are filling up the *_tasks* attribute with parsed TaskArguments
- At last stage the *RKD's Executor component* is reading from ArgumentBlock and deciding if task should be
retried, if there should be any error handling. The *_retry_counter_per_task* and *_retry_counter_on_whole_block*
fields are mutable to track the progress of error handling
Notice: Fields like on_error, on_rescue are filled up after block creation ex. in CommandlineParsingHelper class
See usages of set_parsed_error_handler(), set_parsed_rescue()
"""
body: List[str]
on_rescue: List[TaskArguments]
on_error: List[TaskArguments]
retry_per_task: int = 0
_tasks: List[TaskArguments]
_raw_attributes: dict
_retry_counter_per_task: Dict['TaskDeclaration', int]
_retry_counter_on_whole_block: int
def __init__(self, body: List[str] = None, rescue: str = '', error: str = '', retry: int = 0,
retry_block: int = 0):
"""
:param body Can be empty - it means that block will have tasks filled up later
"""
if body is None:
body = []
self.body = body
try:
self.retry_per_task = int(retry)
except ValueError:
self.retry_per_task = 0
try:
self.retry_whole_block = int(retry_block)
except ValueError:
self.retry_whole_block = 0
# lazy-filled by parser on later stage
self.on_rescue = []
self.on_error = []
self._retry_counter_per_task = {}
self._retry_counter_on_whole_block = 0
# those attributes will be lazy-parsed on later processing stage
self._raw_attributes = {
'rescue': rescue,
'error': error,
}
@staticmethod
def from_empty() -> 'ArgumentBlock':
"""Dummy instance"""
instance = ArgumentBlock(
body=[], rescue='', error='', retry=0
)
instance.set_parsed_rescue([])
instance.set_parsed_error_handler([])
return instance
def clone_with_tasks(self, tasks_arguments: List[TaskArguments]):
cloned = deepcopy(self)
cloned._tasks = tasks_arguments
return cloned
def tasks(self) -> List[TaskArguments]:
return self._tasks
def with_tasks_from_first_block(self, blocks: List['ArgumentBlock']):
try:
return self.clone_with_tasks(blocks[0]._tasks)
except IndexError:
return self
def raw_attributes(self) -> dict:
return self._raw_attributes
def set_parsed_error_handler(self, tasks_arguments: List[TaskArguments]) -> None:
self.on_error = tasks_arguments
def set_parsed_rescue(self, tasks_arguments: List[TaskArguments]) -> None:
self.on_rescue = tasks_arguments
def should_task_be_retried(self, declaration):
# no retry available at all
if self.retry_per_task < 1:
return False
# has to retry, but it is a first time
if declaration not in self._retry_counter_per_task:
return True
return self._retry_counter_per_task[declaration] < self.retry_per_task
def task_retried(self, declaration):
"""Takes notification from external source to internally note that given task was retried"""
if declaration not in self._retry_counter_per_task:
self._retry_counter_per_task[declaration] = 0
self._retry_counter_per_task[declaration] += 1
def whole_block_retried(self, declaration):
pass
def should_rescue_task(self):
"""
Decides if given task should have executed a rescue set of tasks
"""
return len(self.on_rescue) > 0
def has_action_on_error(self):
"""
Answers if there is a set of tasks that should be notified on error
"""
return len(self.on_error) > 0
def should_block_be_retried(self) -> bool:
"""
Can the whole block of tasks be repeated from scratch?
"""
if self.retry_whole_block < 1:
return False
# actual state < declared maximum
return self._retry_counter_on_whole_block < self.retry_whole_block
def __str__(self):
text = str(self.body)
try:
text += ', ' + str(self.tasks())
except AttributeError:
pass
return 'ArgumentBlock<' + text + '>' | /rkd.core-0.0.0.tar.gz/rkd.core-0.0.0/rkd/core/argparsing/model.py | 0.790369 | 0.324342 | model.py | pypi |
from typing import Union, Dict
from ..api.syntax import TaskDeclaration
from ..api.syntax import GroupDeclaration
from ..argparsing.model import ArgumentBlock
from ..inputoutput import SystemIO
STATUS_STARTED = 'started'
STATUS_ERRORED = 'errored'
STATUS_FAILURE = 'failure'
STATUS_SUCCEED = 'succeed'
"""
Can be treated as "succeed" because "in-rescue" means that we don't check task status, instead we start a new task
and check that new rescue-task status instead of failed task status
"""
STATUS_RESCUE_STATE = 'in-rescue'
class TaskResult(object):
task: TaskDeclaration
status: str
def __init__(self, task: TaskDeclaration, status: str):
self.task = task
self.status = status
def has_succeed(self) -> bool:
return self.status in [STATUS_SUCCEED, STATUS_RESCUE_STATE]
class ProgressObserver(object):
"""
Carefuly tracks tasks execution progress, have answers to questions such as:
- were there any tasks failed?
This service is a REGISTRY.
"""
_io: SystemIO
_executed_tasks: Dict[str, TaskResult]
def __init__(self, io: SystemIO):
self._io = io
self._executed_tasks = {}
@staticmethod
def _format_parent_task(parent: Union[GroupDeclaration, None]) -> str:
return ('[part of ' + parent.get_name() + ']') if parent else ''
def task_started(self, declaration: TaskDeclaration, parent: Union[GroupDeclaration, None], args: list):
""" When task is just started """
self._executed_tasks[declaration.get_unique_id()] = TaskResult(declaration, STATUS_STARTED)
self._io.info_msg(' >> Executing %s %s %s' % (
declaration.to_full_name(),
' '.join(args),
self._format_parent_task(parent)
))
def task_errored(self, declaration: TaskDeclaration, exception: Exception):
""" On exception catched in task execution """
self._set_status(declaration, STATUS_ERRORED)
self._io.print_opt_line()
self._io.error_msg('The task "%s" was interrupted with an %s' % (
declaration.to_full_name(),
str(exception.__class__)
))
self._io.print_separator()
self._io.print_opt_line()
def task_failed(self, declaration: TaskDeclaration, parent: Union[GroupDeclaration, None]):
""" When task returns False """
self._set_status(declaration, STATUS_FAILURE)
if not declaration.get_task_to_execute().is_silent_in_observer():
self._io.print_opt_line()
self._io.error_msg('The task "%s" %s ended with a failure' % (
declaration.to_full_name(),
self._format_parent_task(parent)
))
self._io.print_separator()
self._io.print_opt_line()
def task_succeed(self, declaration: TaskDeclaration, parent: Union[GroupDeclaration, None]):
""" When task success """
self._set_status(declaration, STATUS_SUCCEED)
if not declaration.get_task_to_execute().is_silent_in_observer():
self._io.print_opt_line()
self._io.success_msg('The task "%s" %s succeed.' % (
declaration.to_full_name(),
self._format_parent_task(parent)
))
self._io.print_separator()
self._io.print_opt_line()
def execution_finished(self):
"""
When all tasks were executed - the TaskExecutor finished its job
"""
if self.is_at_least_one_task_failing():
self._io.error_msg('Execution failed with %i failed tasks of %i total tasks scheduled for execution' % (
self.count_failed_tasks(), len(self._executed_tasks)
))
else:
self._io.success_msg('Successfully executed %i tasks.' % len(self._executed_tasks))
self._io.print_opt_line()
def _set_status(self, declaration: TaskDeclaration, status: str):
"""Internally mark given task as done + save status"""
self._io.internal('{} task, unique_id={}, status={}'.format(str(declaration), declaration.get_unique_id(), status))
self._executed_tasks[declaration.get_unique_id()] = TaskResult(declaration, status)
def is_at_least_one_task_failing(self) -> bool:
return self.count_failed_tasks() >= 1
def count_failed_tasks(self) -> int:
return len({
k: v for k, v in self._executed_tasks.items() if not v.has_succeed()
}.values())
def group_of_tasks_retried(self, block: ArgumentBlock):
"""
When a block failed and needs to be retried (even intermediate success steps)
"""
executed_tasks_that_belongs_to_block = {
k: v for k, v in self._executed_tasks.items() if v.task.block() is block
}
for declaration in executed_tasks_that_belongs_to_block.values():
self.task_retried(declaration)
def task_retried(self, declaration: TaskDeclaration):
self._io.warn_msg('Task "{}" was retried'.format(declaration.to_full_name()))
self._set_status(declaration, STATUS_STARTED)
def task_rescue_attempt(self, declaration: TaskDeclaration):
self._io.warn_msg('Task "{}" rescue attempt started'.format(declaration.to_full_name()))
self._set_status(declaration, STATUS_RESCUE_STATE) | /rkd.core-0.0.0.tar.gz/rkd.core-0.0.0/rkd/core/execution/results.py | 0.818374 | 0.188082 | results.py | pypi |
from rkgb.utils import *
from rkgb import Btools
from rkgb import Dtools
from rkgb import Stools
from rkgb import Ktools
from rkgb import Atools
import inspect
# ==========================
# ====== OUTPUT CLASS ======
# ==========================
class all_graphs():
def __init__(self,bg,dg,sg,kg,list_sg,list_kg,cc,list_ano_S):
self.B_graph = bg
self.D_graph = dg
self.S_graph = sg
self.K_graph = kg
self.S_graph_list = list_sg
self.K_graph_list = list_kg
self.equivalent_classes = cc
self.list_ano_S = list_ano_S
# ==========================
# ==========================
# ===== AUX FUNCTIONS ======
# ==========================
# to check if the given dict_inputs is correct
def print_inputs(model):
s = inspect.signature(model.forward)
p = list(s.parameters.items())
print(f"This module has {len(p)} parameters :")
for c in p: print(c[1])
def make_inputs(model,model_inputs,model_kwargs):
# 1) Build dict_inputs
# -- load params list --
sign = inspect.signature(model.forward)
params = list(sign.parameters.items())
# -- build model_kwargs --
if model_kwargs is None: model_kwargs = dict()
elif not isinstance(model_kwargs,dict): raise Exception(
f"model_kwargs must be a dict not {type(model_kwargs)}")
# -- positional params --
not_kw_params = [
p[0] for p in params
if p[0] not in model_kwargs]
pos_params = [
p[0] for p in params
if (p[1].default is inspect._empty
and p[0] not in model_kwargs)]
# -- build positional inputs --
if isinstance(model_inputs,dict):
dict_inputs = model_inputs.copy()
st_given = set(dict_inputs.keys())
st_asked = set(pos_params)
st_missing = st_asked - st_given
nb_missing = len(st_missing)
if nb_missing>0: raise Exception(
f"Missing {nb_missing} inputs for the model: {st_missing}")
else:
if (isinstance(model_inputs,set)
or isinstance(model_inputs,list)
or isinstance(model_inputs,tuple)):
inputs = list(model_inputs)
else:
inputs = [model_inputs]
nb_given = len(inputs)
nb_asked_pos = len(pos_params)
nb_asked_tot = len(not_kw_params)
if nb_given < nb_asked_pos: raise Exception(
f"To few values given in model_inputs "\
f"({nb_asked_pos - nb_given} missing).\n"\
f"You can use \"rkgb.print_inputs(<model>)\" to help you.")
if nb_given > nb_asked_tot: raise Exception(
f"To much values given in model_inputs "\
f"({nb_given - nb_asked_tot} too many, including kwargs).\n"\
f"You can use \"rkgb.print_inputs(<model>)\" to help you.")
dict_inputs = dict(zip(not_kw_params,inputs))
dict_inputs.update(model_kwargs)
# 2) check types
""" # -> might fail
for (name,value) in dict_inputs.items():
info = sign.parameters[name]
if not info.annotation is inspect._empty:
if not isinstance(value,info.annotation): raise Exception(
f"According to model's signature, {name} argument "\
f"is supposed to be of type {info.annotation}, but "\
f"the given value has type {type(value)}")
"""
return dict_inputs
# to check is the device is cuda
def print_cuda_warning_msg(things_not_on_cuda):
l = things_not_on_cuda
if l == []: pass
else:
if len(l) == 1:
main_line = f"{l[0]}'s device is not cuda !"
else:
s = " and ".join(l)
main_line = f"{s}'s devices are not cuda !"
print(
f"/!\\/!\\=======================================/!\\/!\\\n"\
f"/!\\/!\\= WARNING : {main_line}\n"\
f"/!\\/!\\=======================================/!\\/!\\\n\n"\
f"/!\\You ask rk-GB to measure the time and memory used by all\n"\
f"/!\\the computation nodes. But measuring memory can only\n"\
f"/!\\be done with cuda, therefore model and inputs' devices\n"\
f"/!\\should be cuda to get relevant results. You can use the \n"\
f"/!\\parameter \"check_device_is_gpu\" to avoid this warning.\n")
# ==========================
# ===== Main function ======
# ==========================
def make_all_graphs(model,
model_inputs,
model_kwargs=None,
verbose=False,
impose_device=True,
bool_bg = True , bool_dg = True ,
bool_sg = True , bool_kg = True ,
bool_list_sg = True , bool_list_kg = True,
check_device_is_gpu = True):
r"""
***** this function returns an objet with attributes *****
-> .B_graph, .D_graph, .S_graph and .K_graph of the whole module
-> .S_graph_list and .K_graph_list of the sequentialized module
on which you can use :
rkgb.print_graph and rkgb.print_graph_list or rkgb.print_all_graphs
***** args *****
-> model must be a torch.nn.Module
/!\ Most of the time errors occur because of jit.trace /!\
/!\ so 'model' must be compatible with jit.trace /!\
-> model_inputs :
args of 'model', it can either be a simple
variable or an iterable of variables.
-> model_kwargs :
optional dictionnary in case you want to
call 'model' with kwargs
"""
bool_list_sg = bool_list_sg or bool_list_kg
bool_sg = bool_sg or bool_kg or bool_list_sg
bool_dg = bool_dg or bool_sg
bool_bg = bool_bg or bool_dg
# check inputs
global_vars.ref_verbose[0] = verbose
dict_inputs = make_inputs(model,model_inputs,model_kwargs)
# check device
things_not_on_cuda = []
if bool_kg and check_device_is_gpu:
for (key,inp) in dict_inputs.items():
if not isinstance(inp,torch.Tensor):
print(f"Warning : {key} has type {type(inp)}")
elif not inp.is_cuda:
things_not_on_cuda.append(key)
b = False
for p in model.parameters():
if not p.is_cuda: b=True
if b: things_not_on_cuda.append("the model")
print_cuda_warning_msg(things_not_on_cuda)
device = small_fcts.get_device_and_check_all_same_device(
model,dict_inputs)
# -- protect original module from impact on eval mode --
# -> save running stats
saved_running_stats = dict()
for m in model.modules():
for batch_fct in global_vars.list_batch_fct:
if isinstance(m,batch_fct):
r_mean = m.running_mean
r_var = m.running_var
saved_running_stats[m] = (
r_mean.clone() if r_mean is not None else None,
r_var.clone() if r_var is not None else None,
)
# -- the whole module --
if bool_bg:
bg = Btools.make_B(model,dict_inputs,
impose_device=impose_device,device=device)
else: bg = None
if bool_dg: dg = Dtools.B_to_D(bg,model,dict_inputs,device=device)
else: dg = None
if bool_sg: sg = Stools.D_to_S(
dg,model=model,device=device)
else: sg = None
if bool_kg: kg = Ktools.S_to_K(sg,model,device=device)
else: kg = None
# -- sequentialized --
if bool_list_sg:
list_sg = Stools.cut(sg)
else: list_sg = None
if bool_list_kg:
cc,list_kg,list_ano_S = Atools.S_list_to_K_list_eco(
list_sg,model,device=device)
else: list_kg = None ; cc = None ; list_ano_S = None
# -- restore running_stats --
for (m,(r_mean,r_var)) in saved_running_stats.items():
m.running_mean = r_mean
m.running_var = r_var
return all_graphs(bg,dg,sg,kg,list_sg,list_kg,cc,list_ano_S)
# ==========================
# ==========================
# === printing functions ===
# ==========================
def print_graph(g,name=None,open=True,render_format="svg"):
r"""To visualize D, S or K graph.
This function creates a .gv file, and using
Graphviz's dot function builds a .pdf file.
They are stored in "graphviz_dir" sub-directory.
inputs:
name (string):
To name .gv and .pdf files.
By default named after the type of the graph.
render_format (string):
Render format wanted for the output file
open (boolean):
To automatically open the file with the default reader.
"""
if g is None: pass
elif isinstance(g,Dtools.D_graph):
Dtools.print_D_graph(g,name,open,render_format)
elif isinstance(g,Stools.S_graph):
Stools.print_S_graph(g,name,open,render_format)
elif isinstance(g,Ktools.K_graph):
Ktools.print_K_graph(g,name,open,render_format)
else: raise Exception(
"The graph given is neither of type D_graph, S_graph nor K_graph")
def print_graph_list(gl,name=None,open=True,render_format="svg"):
r"""The equivalent of rkgb.print_graph for a list of graph.
Generates all graphs next to each other in a single pdf.
Note:
Originally intented to visualize a sequentialized graph :
i.e. one graph cut by rkgb in blocks
i.e. S_graph_list of K_graph_list
"""
if gl is None: pass
elif len(gl) == 0: print("Empty list, no graph to visualize")
else:
t = type(gl[0])
for i in range(1,len(gl)):
if type(gl[i]) != t: raise Exception(
f"All graphs in the list must share the same type"\
f"type(gl[{i}])={type(gl[i])} and type(gl[0])={t}")
if t == Stools.S_graph:
Stools.print_S_graph_list(gl,name,open,render_format)
elif t == Ktools.K_graph:
Ktools.print_K_graph_list(gl,name,open,render_format)
else: raise Exception(
"The list given is neither a S_graph list nor K_graph list")
def print_all_graphs(a,name="",open=True,render_format="svg"):
print_graph(a.D_graph,f"{name}_D_graph",open,render_format)
print_graph(a.S_graph,f"{name}_S_graph",open,render_format)
print_graph(a.K_graph,f"{name}_K_graph",open,render_format)
print_graph_list(a.S_graph_list,f"{name}_seq_S_graph",
open,render_format)
print_graph_list(a.K_graph_list,f"{name}_seq_K_graph",
open,render_format)
# ==========================
# ===================
# == TO TEST rk-GB ==
# ===================
def test_rkgb(module, model_inputs, **kwargs):
rkgb_res = make_all_graphs(module, model_inputs, **kwargs)
list_kg = rkgb_res.K_graph_list
kg = rkgb_res.K_graph
print("Generated all the graphs !\n")
print(f"Equiv classes are : {rkgb_res.equivalent_classes}")
print(
f"So we have only {len(rkgb_res.equivalent_classes)} "
f"blocks to solve ILP on, instead of {len(list_kg)}\n"
)
print("CONCERNING K_graph_list :")
list_nb_kcn = [len(kg.list_kcn) for kg in list_kg]
list_nb_kdn = [len(kg.list_kdn) for kg in list_kg]
tot_nb_kcn = sum(list_nb_kcn)
tot_nb_kdn = sum(list_nb_kdn)
str_list_nb_kcn = "+".join(str(i) for i in list_nb_kcn)
str_list_nb_kdn = "+".join(str(i) for i in list_nb_kdn)
print(
f"{len(list_kg)} K_graphs in seq, with :\n"
f"{str_list_nb_kcn} = {tot_nb_kcn} Comp nodes\n"
f"{str_list_nb_kdn} = {tot_nb_kdn} Data nodes\n"
f"=> total of {tot_nb_kcn + tot_nb_kdn} nodes\n"
)
print("CONCERNING phantoms impossible to restore :")
nb_ips = 0
for kcn in kg.list_kcn:
deps_ips = kcn.deps_impossible_to_restore
if len(deps_ips) != 0:
nb_ips += 1
print(
f"{kcn.main_target}'s phantoms must be "
f"protected, because deps_impossible_to_restore :"
)
for kdn, ph_name in deps_ips:
print(f"deps on {kdn} through {ph_name}")
print(f"Total nb of special phantoms : {nb_ips}")
return rkgb_res | /rkgb-1.0.1.tar.gz/rkgb-1.0.1/src/main.py | 0.502441 | 0.163813 | main.py | pypi |
# A way to recognize similar blocks
# e.g. for GPT2 -> Transformer blocks
from rkgb.utils import *
from rkgb import Stools
from rkgb import Ktools
# Note : to handle parameters anonymization :
# 1) I need to check "info" equality, -> I need the model
# 2) It's impossible to run inspection with anonymized params
# -> I need to reverse_translate the parameters first
# -> then re-translate, and finally reverse_translate
# -> for all the copies of the ano graph
# ==================
# ====== INIT ======
# ==================
class Graph_Translator():
def __init__(self,sg=None,model=None,reverse_translator=None):
""" There are two ways to __init__ a graph_translator,
either you give a S_graph and it creates a translator to
anonymize the graph, or you give it a translator and it
creates the reverse translator.
Note: to fully translate S_graph, I try to translate
parameters too, to do so I need to precise their shape."""
if not reverse_translator is None:
self.reverse_translator = rev = reverse_translator
self.main_dict = md = dict()
self.param_dict = pd = dict()
self.const_dict = cd = dict()
for s1,s2 in rev.main_dict.items(): md[s2] = s1
for s1,s2 in rev.const_dict.items(): cd[s2] = s1
for s1,(s2,info) in rev.param_dict.items():
pd[s2] = (s1,info)
elif not sg is None:
# we want to respect the original order of sn.num
# -> so we first gather all the names, then sort
# -> them based on sn.num, and anonymize them.
########## FIRST PART ##########
all_real_vars = []
all_real_csts = []
all_real_params = []
def handle_str(real_str):
if (real_str[:2] == "__"
and not real_str in all_real_vars):
all_real_vars.append(real_str)
elif (real_str[:5] == "self."
or real_str[:5] == "self["
or real_str[:13] == "getattr(self."
and not real_str in all_real_params):
all_real_params.append(real_str)
elif (real_str[:5] == "_cst_"
and not real_str in all_real_csts):
all_real_csts.append(real_str)
def search_through(a):
if isinstance(a,ast.AST):
if isinstance(a,ast.Name):
handle_str(a.id)
else:
for s in a._fields:
try: search_through(getattr(a,s))
except: pass
elif isinstance(a,str): handle_str(a)
elif hasattr(a,"__iter__"):
for sub_a in a: search_through(sub_a)
snodes = [sg.init_node] + sg.nodes
for sn in snodes:
search_through(sn.main_code)
search_through(sn.body_code)
########## SECOND PART ##########
# Now that "all_real_vars" is complete, we generate the dict
all_real_vars = sorted(
all_real_vars,key = shared_methods.get_num_tar)
self.main_dict = r_to_a = dict()
nb_var = 0
for real_name in all_real_vars:
nb_var += 1
ano_name = f"__{nb_var}_ano"
r_to_a[real_name] = ano_name
# Same for "all_real_csts", ie constants
all_real_csts = sorted(
all_real_csts,key = shared_methods.get_num_cst)
self.const_dict = cst_r_to_a = dict()
nb_cst = 0
for real_name in all_real_csts:
nb_cst += 1
ano_name = f"_cst_{nb_cst}_ano"
cst_r_to_a[real_name] = ano_name
# Try to anonymize parameters:
self.param_dict = param_dict = dict()
nb_param = 0
if model:
for param_full_name in all_real_params: # strings
# -> e.g. param_full_name = "self.layer1.weight"
param = eval(param_full_name,{"self":model},{})
info_param = def_info.Var_info(param)
nb_param += 1
param_dict[param_full_name] = (
f"self.param_{nb_param}",info_param)
# To finish, build the reverse_translator :
self.reverse_translator = (
Graph_Translator(reverse_translator=self))
else:
self.main_dict = dict()
self.param_dict = dict()
self.const_dict = dict()
self.reverse_translator = self
# ==================
# ==================
# === TRANSLATE ====
# ==================
def translate(self,x):
# x's type can be :
# -> str
# -> ast.AST
# -> Var_info (/!\ in place /!\)
# -> S_node (/!\ in place /!\)
# -> K_C/D_node (/!\ in place /!\)
# -> S_graph
# -> K_graph
# -> an iterable with elts of types mentioned above
translate = self.translate
# -- STR --
if isinstance(x,str):
if x[:2] == "__" and x in self.main_dict:
return self.main_dict[x]
elif x[:5] == "_cst_" and x in self.const_dict:
return self.const_dict[x]
elif (x[:5] == "self."
or x[:5] == "self["
or x[:13] == "getattr(self."
and x in self.param_dict):
return self.param_dict[x][0]
elif ".grad_fn" in x:
var = x.split('.')[0]
if var in self.main_dict:
new_var = self.main_dict[var]
return new_var + x[len(var):]
return x
# -- AST --
elif isinstance(x,ast.AST):
ty = type(x)
if ty == ast.Name:
return ty(translate(x.id))
elif ty == ast.Call:
return ty(x.func,translate(x.args),translate(x.keywords))
elif ty == ast.keyword:
return ty(x.arg,translate(x.value))
elif ty == ast.List or ty == ast.Tuple:
return ty(translate(x.elts))
elif ty == ast.Subscript:
return ty(translate(x.value),x.slice)
elif ty == ast.UnaryOp:
return ty(x.op,translate(x.operand))
elif ty == ast.BinOp:
return ty(translate(x.left),x.op,translate(x.right))
elif ty == ast.Assign:
return ty(translate(x.targets),translate(x.value))
elif ty == ast.Module:
return ast_add_on.make_ast_module(translate(x.body))
elif ty == ast.Constant:
return x
else: raise Exception(
f"{x}'s type ({ty}) is not handled by the translator")
# -- info --
elif isinstance(x,def_info.Var_info):
new_x = x.copy()
new_x.data_owner_name = translate(new_x.data_owner_name)
new_x.data_direct_parent_name = (
translate(new_x.data_direct_parent_name))
return new_x
# -- S_NODE --
elif isinstance(x,Stools.S_node): # /!\ inplace /!\
# op done inplace because it's impossible to change deps/users
x.main_code = translate(x.main_code)
x.inplace_code= translate(x.inplace_code)
x.body_code = translate(x.body_code)
x.main_target = translate(x.main_target)
x.all_targets = translate(x.all_targets)
x.tensor_targets = translate(x.tensor_targets)
x.inplace_targets = translate(x.inplace_targets)
x.container_targets = translate(x.container_targets)
# Since S_node.__hash__ isn't changed, we change dict inplace
for req_sn,st in x.deps.items():
x.deps[req_sn] = translate(st)
for user_sn,st in x.users.items():
x.users[user_sn] = translate(st)
return ()
# -- K_C_NODE --
elif isinstance(x,Ktools.K_C_node): # /!\ inplace like S_node /!\
for attr in [
"main_code","inplace_code","body_code",
"main_target","container_targets",
"tensor_targets","all_targets",
"inplace_targets","phantom_names",
"alias_in_users_phantoms"]:
setattr(x,attr,translate(getattr(x,attr)))
mt = x.main_target
x.name = f"fwd_{mt}" if x.is_fwd else f"bwd_{mt}"
return ()
# -- K_D_NODE --
elif isinstance(x,Ktools.K_D_node): # /!\ inplace like S_node /!\
for attr in [
"main_target","container_targets",
"tensor_targets","all_targets","inplace_targets",
"alias_in_users_phantoms"]:
setattr(x,attr,translate(getattr(x,attr)))
mt = x.main_target
x.name = f"{mt} {x.kdn_type}"
# -- S_GRAPH --
elif isinstance(x,Stools.S_graph):
sg = Stools.copy_S_graph(x) # to protect x : NOT inplace
snodes = [sg.init_node] + sg.nodes
translate(snodes)
# dict_info is currently shared by all the graphs
# thus it contains unknown names for each block
# -> impossible to translate -> so I clean it up.
# -> I also disconnect inputs'info from the previous block
dict_info_keys = set(sg.dict_info.keys())
if len(self.main_dict) != 0: # to avoid special case
for k in dict_info_keys:
if k not in self.main_dict:
del sg.dict_info[k]
elif k in sg.direct_inputs:
info = sg.dict_info[k]
info.data_owner_name = k
info.data_direct_parent_name = k
info.is_inplace = False
info.is_view = False
for attr in [
"direct_inputs","dict_info",
"dict_rand",
"hidden_output","direct_outputs"]:
setattr(sg,attr,translate(getattr(sg,attr)))
new_dict_constants = dict()
for old,new in self.const_dict.items():
new_dict_constants[new] = x.dict_constants[old]
sg.dict_constants = new_dict_constants
# -> I do NOT translate hidden/direct_inputs
return sg
# -- K_GRAPH --
elif isinstance(x,Ktools.K_graph):
kg = Ktools.copy_K_graph(x)
translate(kg.list_kcn)
translate(kg.list_kdn)
translate(kg.input_kdn_data)
translate(kg.input_kdn_grad)
dkn = list(kg.dict_kn.values()) ; kg.dict_kn.clear()
for kn in dkn: kg.dict_kn[kn.name] = kn
dict_info_keys = set(kg.dict_info.keys())
if len(self.main_dict) != 0: # to avoid special case
for k in dict_info_keys:
if k not in self.main_dict: del kg.dict_info[k]
for attr in ["init_code","dict_info"]:
setattr(kg,attr,translate(getattr(kg,attr)))
new_dict_constants = dict()
for old,new in self.const_dict.items():
new_dict_constants[new] = x.dict_constants[old]
kg.dict_constants = new_dict_constants
for kdn in kg.list_kdn:
kdn.info = kg.dict_info[kdn.main_target]
new_set = set()
for r in kdn.users_impossible_to_restore:
new_set.add((r[0],translate(r[1])))
kdn.users_impossible_to_restore = new_set
for kcn in kg.list_kcn:
new_set = set()
for r in kcn.deps_impossible_to_restore:
new_set.add((r[0],translate(r[1])))
kcn.deps_impossible_to_restore = new_set
return kg
# -- ITERABLE --
elif type(x) in [list,tuple,set]:
return type(x)(translate(sub_x) for sub_x in x)
elif isinstance(x,dict):
return dict(translate(c) for c in x.items())
elif x is None: return None
else: return x
def reverse_translate(self,x):
return self.reverse_translator.translate(x)
# ==================
# ==================
# == Utilisation ===
# ==================
# cc : connexe componente
def S_list_to_K_list_eco(
list_sg,model,verbose=None,
device=None,print_cc = False):
nb_sg = len(list_sg)
# 1) anonymize S_graphs and recognize similar S_graphs
list_translator = [None] * nb_sg
sg_num_to_cc_num = [None] * nb_sg
tab_S_repr_cc = [] # cc_num -> S_graph
cc_num_to_repr_sg_num = []
for sg_num in range(nb_sg):
sg = list_sg[sg_num]
list_translator[sg_num] = translator = (
Graph_Translator(sg,model=model))
ano_sg = translator.translate(sg)
b = False ; cc_num = 0 ; nb_cc = len(tab_S_repr_cc)
while not b and cc_num < nb_cc:
if ano_sg == tab_S_repr_cc[cc_num]:
# -> We also need to manualy check param_info equalities
sort_key = lambda v : int(v[0][11:])
repr_tr = list_translator[cc_num_to_repr_sg_num[cc_num]]
ano_param_sg = sorted(
translator.param_dict.values(),key=sort_key)
ano_param_repr = sorted(
repr_tr.param_dict.values(),key=sort_key)
if ano_param_sg == ano_param_repr:
b = True
else: cc_num += 1
else: cc_num += 1
if not b:
tab_S_repr_cc.append(ano_sg)
cc_num_to_repr_sg_num.append(sg_num)
sg_num_to_cc_num[sg_num] = cc_num
# 1') Compute and print connexe components
nb_cc = len(tab_S_repr_cc)
cc = [[] for _ in range(nb_cc)]
for sg_num in range(nb_sg):
cc[sg_num_to_cc_num[sg_num]].append(sg_num)
if print_cc:
for cc_num in range(nb_cc):
print(f"Connexe component n°{cc_num}: {cc[cc_num]}")
print(
f"We now have {nb_cc} blocks "\
f"to handle, instead of {nb_sg}")
# 2) move anonymized graphs from S to K
# -> /!\ attention to params /!\
dict_info_global = list_sg[0].dict_info # we lost some global info
dict_constants_global = list_sg[0].dict_constants
Ktools.aux_init_S_to_K(model,verbose,device)
tab_K_repr_cc = []
for cc_num,ano_sg in enumerate(tab_S_repr_cc):
repr_trans = list_translator[cc_num_to_repr_sg_num[cc_num]]
tmp_trans_to_handle_params = Graph_Translator()
tmp_trans_to_handle_params.param_dict = repr_trans.param_dict
tmp_trans_to_handle_params.reverse_translator = (
Graph_Translator(
reverse_translator=tmp_trans_to_handle_params))
save_dict_constants = ano_sg.dict_constants
ano_sg = tmp_trans_to_handle_params.reverse_translate(ano_sg)
ano_sg.dict_info.update(dict_info_global)
ano_sg.dict_constants = save_dict_constants
ano_kg = Ktools.aux_build_S_to_K(ano_sg,model,None)
ano_kg = tmp_trans_to_handle_params.translate(ano_kg)
ano_kg.dict_constants = save_dict_constants
tab_K_repr_cc.append(ano_kg)
list_kg = []
for sg_num,cc_num in enumerate(sg_num_to_cc_num):
ano_kg = tab_K_repr_cc[cc_num]
real_kg = list_translator[sg_num].reverse_translate(ano_kg)
real_kg.dict_info.update(dict_info_global)
real_kg.dict_constants.update(dict_constants_global)
list_kg.append(real_kg)
# 3) link the K blocks
for i in range(1,nb_sg):
prev_kg = list_kg[i-1]
kg = list_kg[i]
real_inp_data = prev_kg.output_kdn_data
real_inp_grad = prev_kg.output_kdn_grad
fake_inp_data = kg.input_kdn_data
fake_inp_grad = kg.input_kdn_grad
kg.input_kdn_data = real_inp_data
kg.input_kdn_grad = real_inp_grad
for fst_kcn in fake_inp_data.users_global:
fst_kcn.deps_global.discard(fake_inp_data)
fst_kcn.deps_global.add(real_inp_data)
real_inp_data.users_global.add(fst_kcn)
for lst_kcn in fake_inp_grad.deps_global:
lst_kcn.users_global.discard(fake_inp_grad)
lst_kcn.users_global.add(real_inp_grad)
real_inp_grad.deps_global.add(lst_kcn)
#assert(real_inp_data.main_target == fake_inp_data.main_target)
#assert(real_inp_grad.main_target == fake_inp_grad.main_target)
# We cannot make this assertion because I don't
# translate hidden inputs because we don't care
# about how direct_inputs was generated. But it
# implies that fake_inp_data targets are dummy.
return cc,list_kg,tab_S_repr_cc | /rkgb-1.0.1.tar.gz/rkgb-1.0.1/src/Atools.py | 0.544317 | 0.344885 | Atools.py | pypi |
from rkgb.utils.imports import torch, sys
time_min_duration = 0
time_min_repeat = 5
# -> print debug messages
ref_verbose = [False]
def print_debug(*args, **kwargs):
if ref_verbose[0]:
print(*args, **kwargs)
# -> acceptance rate for two time measures to be declared equal
ref_reasonable_rate = [0.4]
def change_reasonable_rate(x):
assert 0 <= x
ref_reasonable_rate[0] = x
# -> to test phantoms detection
ref_test_phantoms_detection = [False]
# ==========================
# === LISTS OF FUNCTIONS ===
# ==========================
list_python_modules = [
"torch",
"torch.nn.functional",
"torch.Tensor",
"torch._C._nn",
"torch._C._fft",
"torch.ops.aten",
]
list_rand_fct = [
"torch.randn",
"torch.dropout",
"torch.rand",
"torch.randint",
"torch.randperm",
"torch.empty",
"torch.rrelu",
]
# -> ONLY used for root nodes
# -> ie nodes without depedencies
list_cheap_fct = [
"torch.add",
"torch.sub",
"torch.mul",
"torch.div",
"torch.floor_divide",
]
# -> OPTIONAL
list_cheap_fct.extend(["list constructor", "tuple constructor"])
# because we treat them in the same way
list_view_fct = [
"torch.adjoint",
"torch.Tensor.adjoint",
"torch.as_strided",
"torch.Tensor.as_strided",
"torch.Tensor.detach",
"torch.diagonal",
"torch.Tensor.diagonal",
"torch.Tensor.expand",
"torch.Tensor.expand_as",
"torch.movedim",
"torch.Tensor.movedim",
"torch.narrow",
"torch.Tensor.narrow",
"torch.permute",
"torch.Tensor.permute",
"torch.select",
"torch.Tensor.select",
"torch.squeeze",
"torch.Tensor.squeeze",
"torch.transpose",
"torch.Tensor.transpose",
"torch.view_as_real",
"torch.Tensor.unflatten",
"torch.Tensor.unfold",
"torch.unsqueeze",
"torch.Tensor.unsqueeze",
"torch.Tensor.view",
"torch.Tensor.view_as",
"torch.unbind",
"torch.Tensor.unbind",
"torch.split",
"torch.Tensor.split",
"torch.hsplit",
"torch.Tensor.hsplit",
"torch.vsplit",
"torch.Tensor.vsplit",
"torch.tensor_split",
"torch.Tensor.tensor_split",
"torch.split_with_sizes",
"torch.Tensor.split_with_sizes",
"torch.swapaxes",
"torch.Tensor.swapaxes",
"torch.swapdims",
"torch.Tensor.swapdims",
"torch.chunk",
"torch.Tensor.chunk",
"torch.Tensor.values",
"torch.Tensor.indices",
]
# list imported from https://pytorch.org/docs/stable/tensor_view.html
list_batch_fct = [
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.SyncBatchNorm,
torch.nn.InstanceNorm1d,
torch.nn.InstanceNorm2d,
torch.nn.InstanceNorm3d,
]
float_dtype = [
torch.float32,
torch.float,
torch.float64,
torch.complex64,
torch.complex128,
torch.float16,
torch.bfloat16
]
int_dtype = [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]
bool_dtype = [torch.bool]
torchscript_dtype_numbers = [
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.float16,
torch.float32,
torch.float64,
None, # 8
torch.complex64,
torch.complex128,
torch.bool,
None, # 12
None, # 13
None, # 14
torch.bfloat16
]
default_dtype = torch.float32
def get_torchscript_dtype(t):
if isinstance(t,torch.dtype): return t
else:
if not isinstance(t,int): raise Exception(
f"TorchScript usually changes torch.dtype by "\
f"weird integers, but here it's neither a dtype "\
f"nor an integer, what is it ??? : {t}"
)
if t > 15:
dt = default_dtype
problem = True
else:
dt = torchscript_dtype_numbers[t]
if dt is None:
dt = default_dtype
problem = True
else: problem = False
if problem: print("Warning : "\
f"TorchScript usually changes torch.dtype by "\
f"weird integers. For basic dtypes we know their "\
f"corresponding numbers, but here a {t} was found "\
f"what is the corresponding dtype ?? \n"\
f"{default_dtype} is used as the default dtype",
file = sys.stderr
)
return dt
"""
# torchscript_dtype_numbers tabular has been created
# using the following code. jit.trace replaces dtype keywords
# by integers, which make the code impossible to run (WTF)
def test_dtype(dtype):
class T(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self,x):
return x.to("cpu",dtype=dtype)
t = T()
x = torch.randn(5)
tm = torch.jit.trace_module(t,{"forward":x})
for c in tm.code:
try:
n = int(c)
print(n,end="")
except: pass
for dtype in float_dtype + int_dtype + bool_dtype:
print(dtype,end=" ") ; test(dtype) ; print("")
"""
default_forced_kwargs = dict(
[
(("torch.batch_norm", [("momentum", 6, 0)])),
(("torch.instance_norm", [("momentum", 6, 0)])),
]
)
# This dict is used by default when force_special_kwargs=True
# -> dict of : fct_name -> (arg_name,arg_value) list to inforce
# We change some kwargs in the code to avoid changing values
# due to recomputation. For instance batchnorm statistics.
# ========================== | /rkgb-1.0.1.tar.gz/rkgb-1.0.1/src/utils/global_vars.py | 0.471223 | 0.640158 | global_vars.py | pypi |
import json
import csv
import io
import datetime
import aiohttp
from typing import Dict
from rki_covid_parser.const import (
DISTRICTS_URL,
DISTRICTS_URL_RECOVERED,
DISTRICTS_URL_NEW_CASES,
DISTRICTS_URL_NEW_RECOVERED,
DISTRICTS_URL_NEW_DEATHS,
VACCINATIONS_URL,
HOSPITALIZATION_URL
)
from rki_covid_parser.model.district import District
from rki_covid_parser.model.state import State
from rki_covid_parser.model.country import Country
VaccinationCode2StateMap = {
"DE-SH": "Schleswig-Holstein",
"DE-HH": "Hamburg",
"DE-NI": "Niedersachsen",
"DE-HB": "Bremen",
"DE-NW": "Nordrhein-Westfalen",
"DE-HE": "Hessen",
"DE-RP": "Rheinland-Pfalz",
"DE-BW": "Baden-Württemberg",
"DE-BY": "Bayern",
"DE-SL": "Saarland",
"DE-BE": "Berlin",
"DE-BB": "Brandenburg",
"DE-MV": "Mecklenburg-Vorpommern",
"DE-SN": "Sachsen",
"DE-ST": "Sachsen-Anhalt",
"DE-TH": "Thüringen",
}
def generator_attributes_from_features(data):
assert type(data) == dict
_features = "features"
_attributes = "attributes"
if _features in data:
for feature in data[_features]:
assert _attributes in feature
yield feature[_attributes]
class RkiCovidParser:
def __init__(self, session: aiohttp.ClientSession):
self.session = session
self.districts: Dict[int, District] = {}
self.states: Dict[str, State] = {}
self.country = Country()
async def load_data(self) -> None:
"""load all data and merge results."""
await self._reset_states()
await self._load_districts()
await self._load_districts_recovered()
await self._load_districts_new_cases()
await self._load_districts_new_deaths()
await self._load_districts_new_recovered()
await self._merge_states()
await self._merge_country()
await self._load_hospitalization()
await self._load_vaccinations()
async def _reset_states(self) -> None:
"""reset previous loaded values."""
self.states = {}
async def _load_districts(self) -> None:
"""load and parse districts."""
data = await self._load_from_argcis(DISTRICTS_URL)
await self._extract_districts(data)
async def _load_districts_recovered(self) -> None:
"""Load and parse recovered"""
data = await self._load_from_argcis(DISTRICTS_URL_RECOVERED)
await self._extract_districts_recovered(data)
async def _load_districts_new_cases(self) -> None:
"""load and parse new cases."""
data = await self._load_from_argcis(DISTRICTS_URL_NEW_CASES)
await self._extract_districts_new_cases(data)
async def _load_districts_new_deaths(self) -> None:
"""load and parse new deaths."""
data = await self._load_from_argcis(DISTRICTS_URL_NEW_DEATHS)
await self._extract_districts_new_deaths(data)
async def _load_districts_new_recovered(self) -> None:
"""load new recovered for districts."""
data = await self._load_from_argcis(DISTRICTS_URL_NEW_RECOVERED)
await self._extract_districts_new_recovered(data)
async def _load_hospitalization(self) -> None:
"""load hospitalization numbers."""
data = await self._load_csv_from_url(HOSPITALIZATION_URL)
await self._extract_hospitalization(data)
async def _load_vaccinations(self) -> None:
"""load vaccinations."""
data = await self._load_from_tsv(VACCINATIONS_URL)
await self._extract_vaccinations(data)
async def _extract_districts(self, data: dict) -> None:
"""iterate through 'attributes' to extract districts."""
for attributes in generator_attributes_from_features(data):
id = attributes["RS"]
self.districts[id] = District(attributes)
async def _extract_districts_recovered(self, data: dict) -> None:
"""iterate through 'attributes' to extract recovered for districts."""
for attributes in generator_attributes_from_features(data):
id = str(attributes["IdLandkreis"]).rjust(5, '0')
recovered = attributes["recovered"]
if id in self.districts:
self.districts[id].recovered = recovered
async def _extract_districts_new_cases(self, data: dict) -> None:
"""iterate through 'attributes' to extract new cases for districts."""
for attributes in generator_attributes_from_features(data):
id = str(attributes["IdLandkreis"]).rjust(5, '0')
newCases = attributes["newCases"]
if id in self.districts:
self.districts[id].newCases = newCases
async def _extract_districts_new_recovered(self, data: dict) -> None:
"""iterate through 'attributes' to extract new cases for districts."""
for attributes in generator_attributes_from_features(data):
id = str(attributes["IdLandkreis"]).rjust(5, '0')
newRecovered = attributes["recovered"]
if id in self.districts:
self.districts[id].newRecovered = newRecovered
async def _extract_districts_new_deaths(self, data: dict) -> None:
"""iterate through 'attributes' to extract new deaths for districts."""
for attributes in generator_attributes_from_features(data):
id = str(attributes["IdLandkreis"]).rjust(5, '0')
newDeaths = attributes["newDeaths"]
if id in self.districts:
self.districts[id].newDeaths = newDeaths
async def _extract_vaccinations(self, data: csv.DictReader) -> None:
"""iterate through rows to extract vaccinations."""
assert type(data) == csv.DictReader
_code = "code"
_vaccinations_total = "vaccinationsTotal"
_people_first_total = "peopleFirstTotal"
_people_full_total = "peopleFullTotal"
for row in data:
assert _code in row
assert _vaccinations_total in row
assert _people_first_total in row
assert _people_full_total in row
if row[_code] in VaccinationCode2StateMap:
state = VaccinationCode2StateMap[row[_code]]
if state in self.states:
self.states[state].vaccinationTotal = int(row[_vaccinations_total])
self.states[state].vaccinationFirst = int(row[_people_first_total])
self.states[state].vaccinationFull = int(row[_people_full_total])
async def _extract_hospitalization(self, data: csv.DictReader) -> None:
"""iterate through rows to extract hospitalization."""
assert type(data) == csv.DictReader
_date = "Datum"
_state = "Bundesland"
_stateid = "Bundesland_Id"
_age_group = "Altersgruppe"
_cases_per_week = "7T_Hospitalisierung_Faelle"
_incidence_per_week = "7T_Hospitalisierung_Inzidenz"
for row in data:
assert _date in row
assert _state in row
assert _stateid in row
assert _age_group in row
assert _cases_per_week in row
assert _incidence_per_week in row
dateValue = str(row[_date])
stateValue = str(row[_state])
stateIdValue = int(row[_stateid])
ageGroupValue = str(row[_age_group])
try:
casesValue = int(row[_cases_per_week])
except ValueError:
casesValue = 0
try:
incidenceValue = float(row[_incidence_per_week])
except ValueError:
incidenceValue = 0.0
# skip older entries
if dateValue != str(datetime.date.today()):
continue
# skip unknown states
if stateValue not in self.states:
continue
if ageGroupValue == '00+':
self.states[stateValue].hospitalizationCasesMerged = casesValue
self.states[stateValue].hospitalizationIncidenceMerged = incidenceValue
if ageGroupValue == '00-04':
self.states[stateValue].hospitalizationCasesBaby = casesValue
self.states[stateValue].hospitalizationIncidenceBaby = incidenceValue
if ageGroupValue == '05-14':
self.states[stateValue].hospitalizationCasesChildren = casesValue
self.states[stateValue].hospitalizationIncidenceChildren = incidenceValue
if ageGroupValue == '15-34':
self.states[stateValue].hospitalizationCasesTeen = casesValue
self.states[stateValue].hospitalizationIncidenceTeen = incidenceValue
if ageGroupValue == '35-59':
self.states[stateValue].hospitalizationCasesGrown = casesValue
self.states[stateValue].hospitalizationIncidenceGrown = incidenceValue
if ageGroupValue == '60-79':
self.states[stateValue].hospitalizationCasesSenior = casesValue
self.states[stateValue].hospitalizationIncidenceSenior = incidenceValue
if ageGroupValue == '80+':
self.states[stateValue].hospitalizationCasesOld = casesValue
self.states[stateValue].hospitalizationIncidenceOld = incidenceValue
async def _load_from_argcis(self, url: str) -> str:
response = await self.session.get(url)
# parse data manually, due to missing content-type 'application/json'
body = await response.text()
return json.loads(body)
async def _load_from_tsv(self, url: str) -> str:
response = await self.session.get(url)
body = await response.text()
return csv.DictReader(io.StringIO(body), dialect=csv.excel_tab)
async def _load_csv_from_url(self, url: str) -> str:
response = await self.session.get(url)
body = await response.text()
return csv.DictReader(io.StringIO(body), dialect=csv.excel)
async def _merge_states(self) -> None:
"""merge all districts grouped by state."""
for district in self.districts.values():
state = self.states.setdefault(district.state, State(district.state))
state.accumulate(district)
state.lastUpdate = district.lastUpdate
async def _merge_country(self) -> None:
"""merge all districts to country."""
self.country = Country()
for district in self.districts.values():
self.country.accumulate(district)
self.country.lastUpdate = district.lastUpdate | /rki-covid-parser-1.3.3.tar.gz/rki-covid-parser-1.3.3/src/rki_covid_parser/parser.py | 0.568536 | 0.257567 | parser.py | pypi |
#Import required modules
import csv
import numpy as np
import datetime
import dateutil.parser as parser
import os
#Constanst
_AGE_GROUPS = {'A00-A04': 0, 'A05-A14': 1, 'A15-A34': 2, 'A35-A59': 3, 'A60-A79': 4, 'A80+': 5, 'unbekannt': 6}
_GENDERS = {'M': 0, 'W': 1, 'unbekannt': 2}
_DATE_TYPES = {'Meldedatum':0 ,'Refdatum':1}
_CASE_TYPES = {'Fall':0 ,'Todesfall':1}
#Filter class
class Filter:
def __init__(self,cases, case_description):
"""Construct the object.
Parameters
----------
cases : numpy ndarray
The numpy array containing the covid cases for a given day.
case_description : str
The description of the case type.
Returns
-------
None.
"""
self.cases = cases
self.case_description = case_description
def __str__(self):
"""Convert to formal string for print() representation.
Returns
-------
ndarray as string : str
Returns the object's ndarray as a string.
"""
return str(self.cases)
def values(self):
"""Return raw ndarray.
Returns
-------
cases : ndarray
Returns the object's cases as an ndarray.
"""
return self.cases
def by_gender(self,frequency:str='absolute',decimals:int=3):
"""Converts the Cases to a representation by gender.
Parameters
----------
frequency : str, optional
relative or absolute frequency for the output.
decimals : int, optional
number of decimal places.
Returns
-------
return_data : dict
a dictionary with gender as key and corrosponding value.
"""
result = self.cases.sum(axis=0)
result_all = result.sum(axis=0)
if (frequency.lower() == 'absolute' or result_all == 0):
return_data = {}
for index, key in enumerate(_GENDERS.keys()):
return_data[self.case_description+'_'+key] = round(result[index],1)
return return_data
elif (frequency.lower() == 'relative' and result_all != 0):
return_data = {}
for index, key in enumerate(_GENDERS.keys()):
return_data[self.case_description+'_'+key] = round(result[index]/result_all,decimals)
return return_data
def by_age(self,frequency:str='absolute',decimals:int=3):
"""Converts the Cases to a representation by age.
Parameters
----------
frequency : str, optional
relative or absolute frequency for the output.
decimals : int, optional
number of decimal places.
Returns
-------
return_data : dict
a dictionary with age as key and corrosponding value.
"""
result = self.cases.sum(axis=1)
result_all = result.sum(axis=0)
if (frequency.lower() == 'absolute' or result_all == 0):
return_data = {}
for index, key in enumerate(_AGE_GROUPS.keys()):
return_data[self.case_description+'_'+key] = round(result[index],1)
return return_data
elif (frequency.lower() == 'relative' and result_all != 0):
return_data = {}
for index, key in enumerate(_AGE_GROUPS.keys()):
return_data[self.case_description+'_'+key] = round(result[index]/result_all,decimals)
return return_data
def by_ageandgender(self,frequency:str='absolute',decimals:int=3):
"""Converts the Cases to a representation by age and gender.
Parameters
----------
frequency : str, optional
relative or absolute frequency for the output.
decimals : int, optional
number of decimal places.
Returns
-------
return_data : dict
a dictionary with age and gender as key and corrosponding value.
"""
return_data = {}
result_all = self.cases.sum(axis=0).sum(axis=0)
for age_index,age in enumerate(_AGE_GROUPS.keys()):
for gender_index,gender in enumerate(_GENDERS.keys()):
if (frequency.lower() == 'absolute' or result_all == 0):
return_data[self.case_description+'_'+age+'_'+gender] = round(self.cases[age_index][gender_index],1)
elif (frequency.lower() == 'relative' and result_all != 0):
return_data[self.case_description+'_'+age+'_'+gender] = round(self.cases[age_index][gender_index]/result_all,decimals)
return return_data
def by_cases(self, raw:bool=False, decimals:int=1):
"""Converts the Cases to an absolute number.
Parameters
----------
frequency : str, optional
relative or absolute frequency for the output.
raw : bool, optional
if true the raw values are returned.
decimals : int, optional
number of decimal places.
Returns
-------
return_data : dict
a dictionary with the absolute number of cases.
return_data : float
a float with the absolute number of cases.
"""
if(raw==True):
return round(self.cases.sum(axis=0).sum(axis=0),decimals)
else:
return_data = {}
return_data[self.case_description] = round(self.cases.sum(axis=0).sum(axis=0),decimals)
return return_data
class covid_cases:
def __init__(self):
"""Constructor for the object.
Returns
-------
None.
"""
self._loaded_rki_cases = None
self.data_actuality = ''
def load_rki_csv(self, csv_path:str=''):
"""loads the Covid19 cases from the RKI_COVID19.csv file and processes the data.
Parameters
----------
csv_path : str
path to the RKI_COVID19.csv file.
Returns
-------
"""
#create numpy array with zeros
lk_ids = _load_lk_ids()
dates = _load_dates()
days = len(dates)
covid_cases = np.zeros((len(lk_ids),days,2,2,7,3),dtype=int)
data_status = None
#Open RKI_COVID19.csv file and parse through it
csv_file = open(csv_path, mode='r', encoding='UTF-8')
csv_reader = csv.reader(csv_file, delimiter=',', quotechar='"')
next(csv_reader)
for index,row in enumerate(csv_reader):
#Prepare indicies
meldedatum_index = dates[parser.isoparse(row[8].replace("/", "-")).strftime("%Y-%m-%d %H:%M:%S")]
refdatum_index = dates[parser.isoparse(row[13].replace("/", "-")).strftime("%Y-%m-%d %H:%M:%S")]
agegroups_index = _AGE_GROUPS[row[4]]
genders_index = _GENDERS[row[5]]
lk_index = lk_ids[row[9]]
#update data status
data_status = row[10].split(',')[0] if data_status == None else data_status
#Fall Meldedatum
if (int(row[11]) in (0,1)):
covid_cases [lk_index] [meldedatum_index] [_CASE_TYPES['Fall']] [_DATE_TYPES['Meldedatum']] [agegroups_index] [genders_index] += int(row[6])
#Todefall Meldedatum
if (int(row[12]) in (0,1)):
covid_cases [lk_index] [meldedatum_index] [_CASE_TYPES['Todesfall']] [_DATE_TYPES['Meldedatum']] [agegroups_index] [genders_index] += int(row[7])
#Fall Refdedatum
if (int(row[11]) in (0,1)):
covid_cases [lk_index] [refdatum_index] [_CASE_TYPES['Fall']] [_DATE_TYPES['Refdatum']] [agegroups_index] [genders_index] += int(row[6])
#Todefall Refdedatum
if (int(row[12]) in (0,1)):
covid_cases [lk_index] [refdatum_index] [_CASE_TYPES['Todesfall']] [_DATE_TYPES['Refdatum']] [agegroups_index] [genders_index] += int(row[7])
self.data_actuality = data_status
csv_file.close()
self._loaded_rki_cases = covid_cases
def save_toFile(self, path:str=''):
"""saves the loaded ndarray to a file.
Parameters
----------
path : str, optional
the desired path to save the file to. The default is 'RKI_Covid19_Cases_(Date of the RKI_COVID19.csv file)'.
Returns
-------
None.
"""
if(path==''):
path = 'RKI_Covid19_Cases_{}'.format(self.data_actuality)
np.save(path ,self._loaded_rki_cases)
def load_fromFile(self, path:str):
"""loads the saved ndarray file into a ndarray.
Parameters
----------
path : str
path to the saved file.
Returns
-------
None.
"""
self._loaded_rki_cases = np.load(path)
def cumCases(self, date, region_id='0', date_type='Meldedatum'):
"""Return the cumulated Covid19 cases for the given day and region.
Parameters
----------
date : str in iso format, datetime.date obj, datetime.datetime obj
The desired date.
region_id : str, optional
ID of the desired Region. The default is '0'.
date_type : str, optional
The type of date. The default is 'Meldedatum'.
Returns
-------
Object of class Filter : Filter object
Returns an object of the class Filter.
"""
if(type(date)==datetime.date):
date = str(datetime.datetime.combine(date, datetime.time()))
elif(type(date)==datetime.datetime):
date = str(date)
covid_cases = self._loaded_rki_cases
dates = _load_dates()
datetype_index = _DATE_TYPES[date_type]
if (int(region_id) == 0):
result = covid_cases[0:,0:dates[date]+1,0,datetype_index].sum(axis=0).sum(axis=0)
elif (int(region_id) >= 1 and int(region_id) <= 16):
lk_ids = _load_lk_ids()
result = np.zeros((7,3),dtype=int)
for value,index in list(lk_ids.items()):
if (value[0:2]==region_id.zfill(2)):
result = np.add(result,covid_cases[index,0:dates[date]+1,0,datetype_index].sum(axis=0))
elif (int(region_id) > 1000):
lk_id = _load_lk_ids()[region_id.zfill(5)]
result = covid_cases[lk_id,0:dates[date]+1,0,datetype_index].sum(axis=0)
return Filter(result, 'kumFälle_{}'.format(date_type))
def cumDeaths(self, date, region_id='0', date_type='Meldedatum'):
"""Return the cumulated Covid19 deaths for the given day and region.
Parameters
----------
date : str in iso format, datetime.date obj, datetime.datetime obj
The desired date.
region_id : str, optional
ID of the desired Region. The default is '0'.
date_type : str, optional
The type of date. The default is 'Meldedatum'.
Returns
-------
Object of class Filter : Filter object
Returns an object of the class Filter.
"""
if(type(date)==datetime.date):
date = str(datetime.datetime.combine(date, datetime.time()))
elif(type(date)==datetime.datetime):
date = str(date)
covid_cases = self._loaded_rki_cases
dates = _load_dates()
datetype_index = _DATE_TYPES[date_type]
if (int(region_id) == 0):
result = covid_cases[0:,0:dates[date]+1,1,datetype_index].sum(axis=0).sum(axis=0)
elif (int(region_id) >= 1 and int(region_id) <= 16):
lk_ids = _load_lk_ids()
result = np.zeros((7,3),dtype=int)
for value,index in list(lk_ids.items()):
if (value[0:2]==region_id.zfill(2)):
result = np.add(result,covid_cases[index,0:dates[date]+1,1,datetype_index].sum(axis=0))
elif (int(region_id) > 1000):
lk_id = _load_lk_ids()[region_id.zfill(5)]
result = covid_cases[lk_id,0:dates[date]+1,1,datetype_index].sum(axis=0)
return Filter(result, 'kumTodesfälle_{}'.format(date_type))
def newCases(self, date, region_id='0', date_type='Meldedatum'):
"""Return the new Covid19 cases for the given day and region.
Parameters
----------
date : str in iso format, datetime.date obj, datetime.datetime obj
The desired date.
region_id : str, optional
ID of the desired Region. The default is '0'.
date_type : str, optional
The type of date. The default is 'Meldedatum'.
Returns
-------
Object of class Filter : Filter object
Returns an object of the class Filter.
"""
if(type(date)==datetime.date):
date = str(datetime.datetime.combine(date, datetime.time()))
elif(type(date)==datetime.datetime):
date = str(date)
covid_cases = self._loaded_rki_cases
dates = _load_dates()
datetype_index = _DATE_TYPES[date_type]
if (int(region_id) == 0):
result = covid_cases[0:,dates[date],0,datetype_index].sum(axis=0)#.sum(axis=0)
elif (int(region_id) >= 1 and int(region_id) <= 16):
lk_ids = _load_lk_ids()
result = np.zeros((7,3),dtype=int)
for value,index in list(lk_ids.items()):
if (value[0:2]==region_id.zfill(2)):
result = np.add(result,covid_cases[index,dates[date],0,datetype_index])
elif (int(region_id) > 1000):
lk_id = _load_lk_ids()[region_id.zfill(5)]
result = covid_cases[lk_id,dates[date],0,datetype_index]
return Filter(result, 'neueFälle_{}'.format(date_type))
def newDeaths(self, date, region_id='0', date_type='Meldedatum'):
"""Return the new Covid19 desths for the given day and region.
Parameters
----------
date : str in iso format, datetime.date obj, datetime.datetime obj
The desired date.
region_id : str, optional
ID of the desired Region. The default is '0'.
date_type : str, optional
The type of date. The default is 'Meldedatum'.
Returns
-------
Object of class Filter : Filter object
Returns an object of the class Filter.
"""
if(type(date)==datetime.date):
date = str(datetime.datetime.combine(date, datetime.time()))
elif(type(date)==datetime.datetime):
date = str(date)
covid_cases = self._loaded_rki_cases
dates = _load_dates()
datetype_index = _DATE_TYPES[date_type]
if (int(region_id) == 0):
result = covid_cases[0:,dates[date],1,datetype_index].sum(axis=0)#.sum(axis=0)
elif (int(region_id) >= 1 and int(region_id) <= 16):
lk_ids = _load_lk_ids()
result = np.zeros((7,3),dtype=int)
for value,index in list(lk_ids.items()):
if (value[0:2]==region_id.zfill(2)):
result = np.add(result,covid_cases[index,dates[date],1,datetype_index])
elif (int(region_id) > 1000):
lk_id = _load_lk_ids()[region_id.zfill(5)]
result = covid_cases[lk_id,dates[date],1,datetype_index]
return Filter(result, 'neueTodesfälle_{}'.format(date_type))
def newCasesTimespan(self, date, region_id='0', date_type='Meldedatum', timespan=1):
"""Return the new Covid19 cases for the given day and region.
Parameters
----------
date : str in iso format, datetime.date obj, datetime.datetime obj
The desired date.
region_id : str, optional
ID of the desired Region. The default is '0'.
date_type : str, optional
The type of date. The default is 'Meldedatum'.
timespan : int
The number of previous days included in the new cases.
Returns
-------
Object of class Filter : Filter object
Returns an object of the class Filter.
"""
covid_cases = self._loaded_rki_cases
dates = _load_dates()
datetype_index = _DATE_TYPES[date_type]
start_date = parser.isoparse(date)-datetime.timedelta(days=timespan)
if (int(region_id) == 0):
result = covid_cases[0:,dates[str(start_date)]:dates[date],0,datetype_index].sum(axis=0).sum(axis=0)
elif (int(region_id) >= 1 and int(region_id) <= 16):
lk_ids = _load_lk_ids()
result = np.zeros((7,3),dtype=int)
for value,index in list(lk_ids.items()):
if (value[0:2]==region_id.zfill(2)):
result = np.add(result,covid_cases[index,dates[str(start_date)]:dates[date],0,datetype_index].sum(axis=0))
elif (int(region_id) > 1000):
lk_id = _load_lk_ids()[region_id.zfill(5)]
result = covid_cases[lk_id,dates[str(start_date)]:dates[date],0,datetype_index].sum(axis=0)
return Filter(result, 'neueFälle_{}Tage_{}'.format(timespan,date_type))
def newDeathsTimespan(self, date, region_id='0', date_type='Meldedatum', timespan=1):
"""Return the new Covid19 cases for the given day and region.
Parameters
----------
date : str in iso format, datetime.date obj, datetime.datetime obj
The desired date.
region_id : str, optional
ID of the desired Region. The default is '0'.
date_type : str, optional
The type of date. The default is 'Meldedatum'.
timespan : int
The number of previous days included in the new cases.
Returns
-------
Object of class Filter : Filter object
Returns an object of the class Filter.
"""
covid_cases = self._loaded_rki_cases
dates = _load_dates()
datetype_index = _DATE_TYPES[date_type]
start_date = parser.isoparse(date)-datetime.timedelta(days=timespan)
if (int(region_id) == 0):
result = covid_cases[0:,dates[str(start_date)]:dates[date],1,datetype_index].sum(axis=0).sum(axis=0)
elif (int(region_id) >= 1 and int(region_id) <= 16):
lk_ids = _load_lk_ids()
result = np.zeros((7,3),dtype=int)
for value,index in list(lk_ids.items()):
if (value[0:2]==region_id.zfill(2)):
result = np.add(result,covid_cases[index,dates[str(start_date)]:dates[date],1,datetype_index].sum(axis=0))
elif (int(region_id) > 1000):
lk_id = _load_lk_ids()[region_id.zfill(5)]
result = covid_cases[lk_id,dates[str(start_date)]:dates[date],1,datetype_index].sum(axis=0)
return Filter(result, 'neueTodesfälle_{}Tage_{}'.format(timespan,date_type))
def activeCases(self, date, region_id='0', date_type='Meldedatum', days_infectious=14):
"""Return the active Covid19 cases for the given day and region.
Parameters
----------
date : str in iso format, datetime.date obj, datetime.datetime obj
The desired date.
region_id : str, optional
ID of the desired Region. The default is '0'.
date_type : str, optional
The type of date. The default is 'Meldedatum'.
days_infectious : int
The number of days an case is considered active after the transmission of the infection.
Returns
-------
Object of class Filter : Filter object
Returns an object of the class Filter.
"""
if(type(date)==datetime.date):
date = str(datetime.datetime.combine(date, datetime.time()))
elif(type(date)==datetime.datetime):
date = str(date)
covid_cases = self._loaded_rki_cases
dates = _load_dates()
datetype_index = _DATE_TYPES[date_type]
start_date = parser.isoparse(date)-datetime.timedelta(days=days_infectious)
if (int(region_id) == 0):
result = covid_cases[0:,dates[str(start_date)]:dates[date],0,datetype_index].sum(axis=0).sum(axis=0)
elif (int(region_id) >= 1 and int(region_id) <= 16):
lk_ids = _load_lk_ids()
result = np.zeros((7,3),dtype=int)
for value,index in list(lk_ids.items()):
if (value[0:2]==region_id.zfill(2)):
result = np.add(result,covid_cases[index,dates[str(start_date)]:dates[date],0,datetype_index].sum(axis=0))
elif (int(region_id) > 1000):
lk_id = _load_lk_ids()[region_id.zfill(5)]
result = covid_cases[lk_id,dates[str(start_date)]:dates[date],0,datetype_index].sum(axis=0)
return Filter(result, 'aktiveFälle_{}'.format(date_type))
def sevenDayCaserate(self, date, region_id='0', date_type='Meldedatum'):
"""Return the new Covid19 cases for the last 7 days from the given date.
Parameters
----------
date : str in iso format, datetime.date obj, datetime.datetime obj
The desired date.
region_id : str, optional
ID of the desired Region. The default is '0'.
date_type : str, optional
The type of date. The default is 'Meldedatum'.
Returns
-------
Object of class Filter : Filter object
Returns an object of the class Filter.
"""
if(type(date)==datetime.date):
date = str(datetime.datetime.combine(date, datetime.time()))
elif(type(date)==datetime.datetime):
date = str(date)
covid_cases = self._loaded_rki_cases
dates = _load_dates()
datetype_index = _DATE_TYPES[date_type]
start_date = parser.isoparse(date)-datetime.timedelta(days=7)
if (int(region_id) == 0):
result = covid_cases[0:,dates[str(start_date)]:dates[date],0,datetype_index].sum(axis=0).sum(axis=0)
elif (int(region_id) >= 1 and int(region_id) <= 16):
lk_ids = _load_lk_ids()
result = np.zeros((7,3),dtype=int)
for value,index in list(lk_ids.items()):
if (value[0:2]==region_id.zfill(2)):
result = np.add(result,covid_cases[index,dates[str(start_date)]:dates[date],0,datetype_index].sum(axis=0))
elif (int(region_id) > 1000):
lk_id = _load_lk_ids()[region_id.zfill(5)]
result = covid_cases[lk_id,dates[str(start_date)]:dates[date],0,datetype_index].sum(axis=0)
return Filter(result, '7-TageFallzahl_{}'.format(date_type))
def sevenDayIncidence(self, date, region_id='0', date_type='Meldedatum'):
"""Return the Covid19 cases per 100 000 residents for the last 7 days from the given date.
Parameters
----------
date : str in iso format, datetime.date obj, datetime.datetime obj
The desired date.
region_id : str, optional
ID of the desired Region. The default is '0'.
date_type : str, optional
The type of date. The default is 'Meldedatum'.
Returns
-------
Object of class Filter : Filter object
Returns an object of the class Filter.
"""
if(type(date)==datetime.date):
date = str(datetime.datetime.combine(date, datetime.time()))
elif(type(date)==datetime.datetime):
date = str(date)
covid_cases = self._loaded_rki_cases
dates = _load_dates()
datetype_index = _DATE_TYPES[date_type]
dividor = _get_population(region_id)*(1/100000)
start_date = parser.isoparse(date)-datetime.timedelta(days=7)
if (int(region_id) == 0):
result = covid_cases[0:,dates[str(start_date)]:dates[date],0,datetype_index].sum(axis=0).sum(axis=0)
result = np.divide(result,dividor)
elif (int(region_id) >= 1 and int(region_id) <= 16):
lk_ids = _load_lk_ids()
result = np.zeros((7,3),dtype=int)
for value,index in list(lk_ids.items()):
if (value[0:2]==region_id.zfill(2)):
result = np.add(result,covid_cases[index,dates[str(start_date)]:dates[date],0,datetype_index].sum(axis=0))
result = np.divide(result,dividor)
elif (int(region_id) > 1000):
lk_id = _load_lk_ids()[region_id.zfill(5)]
result = covid_cases[lk_id,dates[str(start_date)]:dates[date],0,datetype_index].sum(axis=0)
result = np.divide(result,dividor)
return Filter(result, '7-TageInzidenz_{}'.format(date_type))
def deathRate(self, date, region_id='0', days_infectious=14):
"""Return the death rate for the given date.
Parameters
----------
date : str in iso format, datetime.date obj, datetime.datetime obj
The desired date.
region_id : str, optional
ID of the desired Region. The default is '0'.
days_infectious : int
The number of days an case is considered active after the transmission of the infection.
Returns
-------
Object of class Filter : Filter object
Returns an object of the class Filter.
"""
active_cases = self.activeCases(date=date, region_id=region_id, date_type='Refdatum', days_infectious=days_infectious)
new_deaths = self.newDeaths(date=date, region_id=region_id, date_type='Meldedatum')
result = np.divide(new_deaths, active_cases, where=active_cases != 0)
return Filter(result, 'Todesrate')
def deathRate(self, date, region_id='0', days_infectious=14):
"""Return the death rate for the given date.
Parameters
----------
date : str in iso format, datetime.date obj, datetime.datetime obj
The desired date.
region_id : str, optional
ID of the desired Region. The default is '0'.
days_infectious : int
The number of days an case is considered active after the transmission of the infection.
Returns
-------
Object of class Filter : Filter object
Returns an object of the class Filter.
"""
active_cases = self.activeCases(date=date, region_id=region_id, date_type='Refdatum', days_infectious=days_infectious).values()
new_deaths = self.newDeaths(date=date, region_id=region_id, date_type='Meldedatum').values()
result = np.divide(new_deaths, active_cases, where=active_cases != 0)
return Filter(result, 'Todesrate')
#internal function to create dates dict
def _load_dates():
"""Return a dict with the dates from the start_date to the current date.
Returns
-------
dates : dict
Dictionary containing dates and indicies.
"""
dates = {}
end_date = datetime.datetime.now()
curr_date = datetime.datetime(2020, 1, 1)
counter = 0
while (curr_date <= end_date):
dates[str(curr_date)] = counter
curr_date = curr_date + datetime.timedelta(days=1)
counter += 1
return dates
#internal function to create Landkreise dict
def _load_lk_ids():
"""Return a dict with the Landkreise and the indexes.
Returns
-------
dates : dict
Dictionary containing Landkreise and indicies.
"""
lk_ids = {}
#lk_names = {}
path = os.path.join(os.path.dirname(__file__), 'data/Landkreis_id.csv')
csv_file = open(path, mode='r', encoding='UTF-8')
csv_reader = csv.reader(csv_file, delimiter=',', quotechar='"')
next(csv_reader)
for index,row in enumerate(csv_reader):
lk_ids[row[0].zfill(5)] = index
#lk_names[row[0].zfill(5)] = row[1]
csv_file.close()
return lk_ids
#internal function to get population
def _get_population(region_id):
"""Return the population for a given region.
Returns
-------
population : int
population for the given region_id.
"""
if (int(region_id) >= 0 and int(region_id) <= 16):
path = os.path.join(os.path.dirname(__file__), 'data/Bundesland_id.csv')
csv_file = open(path, mode='r', encoding='UTF-8')
csv_reader = csv.reader(csv_file, delimiter=',', quotechar='"')
next(csv_reader)
for index,row in enumerate(csv_reader):
if (row[0] == region_id.zfill(2)):
return int(row[2])
elif (int(region_id) > 1000):
path = os.path.join(os.path.dirname(__file__), 'data/Landkreis_id.csv')
csv_file = open(path, mode='r', encoding='UTF-8')
csv_reader = csv.reader(csv_file, delimiter=',', quotechar='"')
next(csv_reader)
for index,row in enumerate(csv_reader):
if (row[0] == region_id.zfill(5)):
return int(row[2])
return 0 | /rki-covid19csv-parser-1.2.0.tar.gz/rki-covid19csv-parser-1.2.0/src/rki_covid19csv_parser/csv_parser.py | 0.63477 | 0.463566 | csv_parser.py | pypi |
# rkpython
## Description
This is a general use Python module.
For now, it only contains some functions to make reading and **writing to text files** easier, as well as a function that returns information for reading csv files**.
## Installation
Use
```
pip install rkpython
```
Then import it in Python with
```python
import rkpython as rk
```
## Functions
### `rk.to_txt(l, path = 'text_file.txt', overwrite = False, verbose = False)`
Saves a list to a .txt file.
--------------
l : list, default is None
List object to be saved.
path : str, default is 'text_file.txt'
File path (with file name) you want to save to.
overwrite : bool, default is False
Overwrites the file if it exists.
verbose : boolean, default is False
Prints out a message if the operation was succesful
--------------
Examples :
>>> rk.to_txt(var_list, './documents/vars.txt', verbose = True)
File successfully written to ./documents/vars.txt
### `rk.read_txt(path = 'text_file.txt', verbose = False)`
Reads from a text file, saving the result as a list, where each line is one item.
--------------
path : str, default = 'text_file.txt'
File path (with file name) you want to read from. Can be any type of file (.txt, .csv...)
verbose : boolean, default is False
Prints out a message if the operation was succesful
--------------
Examples :
>>> var_list = rk.read_txt('./documents/vars.txt', verbose = True)
File successfully read from ./documents/vars.txt
### `rk.h_size(size)`
Converts a size in bytes to a humanly readable size, with 1 decimal number. Input an integer, returns a string.
--------------
size : float, int
Size of the object you want to convert, in bytes.
--------------
Examples :
>>> h_size(67108864)
'64.0 Mb'
### `rk.get_mem(nb_objects = 10)`
**Warning :** this function can't access the global variables so doesn't currently work. You can however copy the function from the source code and define it in your python session so that it will access the right variables.
Prints out a list of the largest objects stored in memory, as well as the total memory usage of global variables. Returns a string.
--------------
nb_objects : int, default = 10
Maximum number of items to be printed out.
--------------
Examples :
>>> get_mem(5)
Total usage : 25.3 Gb
5 largest objects :
_477 : 1.7 Gb
_529 : 1.7 Gb
_437 : 1.4 Gb
_412 : 1.3 Gb
_415 : 1.3 Gb
### `rk.csv_info(file)`
Returns information about a csv or text file, such as the encoding and separators infered using csv's Sniffer() function.
--------------
file : str
Path to the file you want to read.
--------------
csv_info().size :
Returns the size of the file as a string.
csv_info().separator :
Returns the infered separator as a string.
csv_info().quotechar :
Returns the infered quote character as a string, defaults to ["].
csv_info().encoding :
Returns the infered encoding using chardet. Defaults to ascii.
csv_info().rawdata :
Returns a 8192 byte sample of the file, unencoded.
csv_info().rows :
Returns the number of rows in the csv file.
csv_info().columns :
Returns the columns of the csv file.
csv_info().parameters :
Returns the separator, quotechar and encoding of the file to plug them in pandas or dask.
csv_info().info() :
Prints out the main characteristics of the file.
--------------
Examples :
>>> csv_info("table.csv").encoding
'utf-8'
>>> sep, quotechar, encoding = csv_info("table.csv").parameters
>>> df = pandas.read_csv("table.csv", sep=sep, quotechar=quotechar, encoding=encoding)
>>> print(sep, quotechar, encoding)
; " utf-8
### `rk.sql_dict(con, db_filter = '.*', table_filter = '.*', col_filter = '.*', strict=False)`
Creates a dictionary listing all databases, tables and columns of a MySql server. Results can be filtered using regex.
--------------
con : SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode)
db_filter : str or list of str, default is '.*'
Filters on databases containing the specified regular expression(s).
table_filter : str or list of str, default is '.*'
Filters on tables containing the specified regular expression(s).
column_filter : str or list of str, default is '.*'
Filters on columns containing the specified regular expression(s).
strict : boolean, default is False
Returns only strict matches instead of partial matches.
--------------
Examples :
>>> temp_dict = sql_dict(con = f'mysql://{user}:{pw}@{host}:{port}', col_filter = 'price')
>>> temp_dict.keys()
dict_keys(['products'])
>>> temp_dict['products'].keys()
dict_keys(['phones', 'laptops'])
>>> temp_dict['products']['phones']
['price_without_tax',
'price_with_tax']
| /rkpython-0.0.21.tar.gz/rkpython-0.0.21/README.md | 0.464659 | 0.840783 | README.md | pypi |
[![PyPI Version][pypi-image]][pypi-url]
[![Build Status][build-image]][build-url]
[![Code Coverage][coverage-image]][coverage-url]
<!-- Badges -->
[pypi-image]: https://img.shields.io/pypi/v/rkstiff
[pypi-url]: https://pypi.org/project/rkstiff/
[build-image]: https://github.com/whalenpt/rkstiff/actions/workflows/build.yml/badge.svg
[build-url]: https://github.com/whalenpt/rkstiff/actions/workflows/build.yml
[coverage-image]: https://codecov.io/gh/whalenpt/rkstiff/branch/master/graph/badge.svg
[coverage-url]: https://codecov.io/gh/whalenpt/rkstiff
# rkstiff #
Runge-Kutta integrating factor (IF) and exponential time-differencing (ETD) methods
for solving nonlinear-PDE's of the form <code>u<sub>t</sub> = Lu + NL(u)</code>.
Some examples of non-linear PDES that can be numerically solved using these methods are:
- Nonlinear Schrodinger equation (NLS)
- Kuramoto-Sivashinsky (KS)
- Korteweg-de Vries (KdV)
- Burgers
- Allen-Cahn
- Sine-Gordon
The adaptive step solver
options provided in this package are
1. ETD35 (5<sup>th</sup> order ETD with 3<sup>rd</sup> orderembedding)
2. ETD34 (4<sup>th</sup> order ETD with 3<sup>rd</sup> order embedding)
3. IF34 (4<sup>th</sup> order IF with 3<sup>rd</sup> order embedding)
4. IF45DP (5<sup>th</sup> order IF with 4<sup>th</sup> order embedding)
The constant step solver options provided are
1. ETD4 (4<sup>th</sup> order ETD - Krogstad method)
2. ETD5 (5<sup>th</sup> order ETD - same as the 5th order method in ETD35)
3. IF4 (4<sup>th</sup> order IF - same as the 4th order method in IF34)
In general, one should
prefer ETD35 as it often has the best speed and stability for diagonal systems or diagonalized
non-diagonal systems. Because the RK coefficients can be costly
to compute, IF34 or constant step methods may be preferable in certain settings.
A detailed discussion of these solvers is provided in the journal article <a href = https://www.sciencedirect.com/science/article/pii/S0021999114006743> Exponential time-differencing with embedded Runge–Kutta adaptive step control </a>.
# Dependencies
Package requires
<ul>
<li> numpy </li>
<li> scipy </li>
</ul>
Tested with versions
<ul>
<li> numpy = 1.19.2 </li>
<li> scipy = 1.6.0 </li>
</ul>
# Usage #
Each of the solvers is a python class (UPPERCASE) stored in a module of the same name (lowercase). Initializing each class requires two arguments, a linear operator `L` in the form of a numpy array, and a nonlinear function `NL(u)`. The solvers can then be proagated either by using the solver.step function (user steps through time) or using the solver.evolve function (stepping handled internally). For example
```python
from rkstiff import etd35
L = # some linear operator
def NL(u): # nonlinear function defined here
solver = etd35.ETD35(linop=L,NLfunc=NL)
u0 = # initial field to be propagated
t0 = # initial time
tf = # final time
uf = solver.evolve(u0,t0=t0,tf=tf)
```
By default, when using the function evolve, the field is stored at each step in a python list: u0,u1,...,uf are stored in solver.u. The corresponding times t0,t1,...,tf are stored in solver.t.
# Example #
Consider the Kuramoto-Sivashinsky (KS) equation:
<br>
u<sub>t</sub> = -u<sub>xx</sub> - u<sub>xxxx</sub> - uu<sub>x</sub>.
Converting to spectral space using a Fourier transform (F) we have
<br>
v<sub>t</sub> = k<sub>x</sub><sup>2</sup>(1- k<sub>x</sub><sup>2</sup>)v - F \{ F<sup>-1</sup> \{v\} F<sup>-1</sup>\{ i k<sub>x</sub> v\} \}
<br>
where v = F{u}. We can then plug L = k<sub>x</sub><sup>2</sup>(1- k<sub>x</sub><sup>2</sup>), and NL(u) = - F \{ F<sup>-1</sup> \{v\} F<sup>-1</sup>\{ i k<sub>x</sub> v\} \} into an rkstiff solver and propagate the field u in spectral space, converting back to real space when desired. For exampe, the python code may look something like this
```python
import numpy as np
from rkstiff import grids
from rkstiff import if34
# uniform grid spacing, real-valued u -> construct_x_kx_rfft
N = 8192
a,b = 0,32*np.pi
x,kx = grids.construct_x_kx_rfft(N,a,b)
L = kx**2*(1-kx**2)
def NL(uFFT):
u = np.fft.irfft(uFFT)
ux = np.fft.irfft(1j*kx*uFFT)
return -np.fft.rfft(u*ux)
u0 = np.cos(x/16)*(1.+np.sin(x/16))
u0FFT = np.fft.rfft(u0)
solver = if34.IF34(linop=L,NLfunc=NL)
ufFFT = solver.evolve(u0FFT,t0=0,tf=50,store_freq=20) # store every 20th step in solver.u and solver.t
U = []
for uFFT in solver.u:
U.append(np.fft.irfft(uFFT))
U = np.array(U)
t = np.array(solver.t)
```
The grid module in rkstiff has several useful helper functions for setting up spatial and spectral grids. Here we used it to construct grids for a real-valued `u` utilizing the real-valued numpy Fourier transform (rfft). The results of the KS 'chaotic' propagation are shown below.
<br>
<img width="300" src="https://raw.githubusercontent.com/whalenpt/rkstiff/master/images/KSfig.png">
# Installation #
From the github source
```bash
git clone https://github.com/whalenpt/rkstiff.git
cd rkstiff
python3 -m pip install .
```
PyPI install with a virtualenv (see the <a href = https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/> Python Packaging Authority </a> guide)
```bash
python3 -m venv env
source env/bin/activate
python3 -m pip install rkstiff
```
For use with Anaconda using the conda-forge channel (see the <a href = https://conda.io/projects/conda/en/latest/user-guide/getting-started.html> Getting started with conda guide</a>), from the terminal
```bash
conda create --name rkstiff-env
conda activate rkstiff-env
conda install rkstiff -c conda-forge
```
The demos require installation of the python `matplotlib` and `jupyter` packages in addition to `numpy` and `scipy`. The tests require installation of the python package `pytest`. These may be installed seperately or by using
```bash
python3 -m pip install '.[demo]'
python3 -m pip install '.[test]'
```
when installing from the rkstiff source directory
# License #
This project is licensed under the MIT License - see the [LICENSE](./LICENSE) file for details.
# Citation #
```text
@article{WHALEN2015579,
title = {Exponential time-differencing with embedded Runge–Kutta adaptive step control},
journal = {Journal of Computational Physics},
volume = {280},
pages = {579-601},
year = {2015},
author = {P. Whalen and M. Brio and J.V. Moloney}
}
```
# Contact #
Patrick Whalen - whalenpt@gmail.com
| /rkstiff-0.3.0.tar.gz/rkstiff-0.3.0/README.md | 0.519765 | 0.977926 | README.md | pypi |
# Burgers equation
* Physical space
\begin{align}
u_t + uu_x = \mu u_{xx}
\end{align}
* Spectral space: $\hat{u} = \mathscr{F}\{u\}$
\begin{align}
\hat{u_t} = -\mu k_x^{2}\hat{u}
- \mathscr{F}\{ {\mathscr{F}^{-1}\{ \hat{u} \} \mathscr{F}^{-1} \{ i k_x \hat{u} \} } \}
\end{align}
# Imports
```
import numpy as np
import matplotlib.pyplot as plt
from rkstiff.grids import construct_x_kx_rfft
from rkstiff.derivatives import dx_rfft
from rkstiff.if34 import IF34
from rkstiff.etd35 import ETD35
%matplotlib inline
```
# Helper graph functions
```
def plotResults(u0,uf,u0FFT,ufFFT):
"""
Helper function that plots initial 1D-array along with final propagated result.
Also plots the initial spectral (fft) 1D-array along with final propagated spectral result.
INPUTS
u0 - initial 1D-array
uf - final propagated 1D-array
u0FFT - initial spectral 1D-array
uFFT - final propagated spectral 1D-array
"""
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(2,2,1)
ax2 = fig.add_subplot(2,2,2,sharey = ax1)
ax3 = fig.add_subplot(2,2,3)
ax4 = fig.add_subplot(2,2,4,sharey = ax3)
ax1.plot(x,u0)
ax2.plot(x,uf)
ax3.plot(kx,np.abs(u0FFT)**2)
ax4.plot(kx,np.abs(ufFFT)**2)
ax3.set_yscale('log')
ax4.set_yscale('log')
fig.tight_layout()
def waterfall(x,t,u,**kwargs):
if 'figsize' in kwargs:
fig = plt.figure(figsize=kwargs['figsize'])
else:
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(1,1,1,projection='3d')
ax.w_xaxis.set_pane_color((0,0,0,0))
ax.w_yaxis.set_pane_color((0,0,0,0))
ax.w_zaxis.set_pane_color((0,0,0,0))
for i,snapshot in enumerate(u):
ax.plot(x,t[i]*np.ones_like(x),snapshot,color='black')
plt.xlim([x[0],x[-1]])
plt.ylim([t[0],t[-1]])
plt.tight_layout()
return ax
```
# Construct grids
```
# uniform grid spacing, field assumed to be real-valued -> construct_x_kx_rfft
N = 8192
a,b = -np.pi, np.pi
x,kx = construct_x_kx_rfft(N,a,b)
```
# Linear operator and nonlinear function
```
mu = 0.0005
L = -mu*kx**2
def NL(uf):
u = np.fft.irfft(uf)
ux = np.fft.irfft(1j*kx*uf)
return -np.fft.rfft(u*ux)
```
# Initial field to be propagated
```
u0 = np.exp(-10*np.sin(x/2)**2)
u0FFT = np.fft.rfft(u0)
plt.plot(x,u0)
```
# Initialize IF34 solver
```
solver = IF34(linop=L,NLfunc=NL,epsilon=1e-8)
```
# Propagate step by step
```
h = 0.01
uFFT = u0FFT.copy()
t = 0
while t < 1:
uFFT,h,h_suggest = solver.step(uFFT,h)
t += h
# use suggested step
h = h_suggest
print('tf = ',t)
uf = np.fft.irfft(uFFT)
plotResults(u0,uf,u0FFT,uFFT)
```
# Propagate from time t0 to tf
```
# store_data -> propagated field stored in solver.u at times solver.t
# store_freq -> propagated field values stored on every store_freq step (default is every step)
uFFT = solver.evolve(u0FFT,t0=0,tf=1,store_data=True,store_freq=50)
```
# Graph result
```
U = []
for u in solver.u:
U.append(np.fft.irfft(u))
U = np.array(U)
t = np.array(solver.t)
ax = waterfall(x,t,U,figsize=(8,8))
ax.grid(False)
ax.axis(False)
ax.view_init(62,-69)
```
| /rkstiff-0.3.0.tar.gz/rkstiff-0.3.0/demos/burgers.ipynb | 0.568536 | 0.947672 | burgers.ipynb | pypi |
from ._interface import ABC, abstractmethod
class IModel(ABC):
"""Interface for model"""
@abstractmethod
def search(self, search_string: str, **kwargs):
"""Search item"""
@abstractmethod
def select(self, selection: int, **kwargs):
"""Select item"""
class IQueue(ABC):
"""Interface for queue"""
@abstractmethod
def add(self, entity):
"""Add entity to queue"""
@abstractmethod
def remove(self, index: int):
"""Remove entity from queue and updates the index"""
@abstractmethod
def fetch(self, index: int):
"""fetch entity from queue index"""
@property
@abstractmethod
def get_queue(self):
"""Returns the queue"""
@property
@abstractmethod
def get_indexed_queue(self):
"""Returns the indexed queue"""
class ISongModel(IModel):
"""Interface for Song model"""
@abstractmethod
def get_song_url(self, data):
"""Get Song stream url"""
@abstractmethod
def get_related_songs(self, data):
"""Load related songs"""
class ISongQueue(IQueue):
"""Inferace for Song queue"""
@abstractmethod
def add_related_songs(self, songs):
"""Add related songs to queue"""
@abstractmethod
def update_qstatus(self, status, stream_url):
"""Update the main queue status"""
@abstractmethod
def pop_rsong(self):
"""Get related song from Rqueue list"""
@abstractmethod
def get_rsong_index(self, index: int):
"""Get related song by index"""
@abstractmethod
def remove_rsong_index(self, index: int):
"""Remove related song by index"""
@abstractmethod
def update_rqueue(self, rsongs):
"""Updates the related song queue"""
@property
@abstractmethod
def get_rsongs(self):
"""Get rsongs list"""
class IAlbumModel(IModel):
"""Inferface for Album model"""
@abstractmethod
def select_song_from_album(self, selection: int):
"""Select song from album"""
class IPlaylistModel(IModel):
"""Interface for Playlist model""" | /interfaces/models.py | 0.814864 | 0.323273 | models.py | pypi |
from typing import List, Optional
import numpy as np
import pandas as pd
from pandas import DataFrame
from rkt_lib_toolkit.logger import Logger
from rkt_lib_toolkit.config import Config
class QLearning:
"""
Exploration vs. Exploitation Tradeoff:
The agent initially has none or limited knowledge about the environment.
The agent can choose to explore by selecting an action with an unknown outcome,
to get more information about the environment.
Or, it can choose to exploit and choose an action based on its prior knowledge of the environment
to get a good reward.
"""
def __init__(self,
actions: List,
should_load: bool = False,
qtable_file_to_load: str = "",
alpha: float = 0.1,
gamma: float = 0.8,
action_selection_method: str = "epsilon-greedy"):
"""
Machine learning class based on q-learning\n
epsilon-greedy : (https://www.baeldung.com/cs/epsilon-greedy-q-learning)
@param actions: list of available actions
@param should_load: define if you want load the file contain dataframe (pkl file)
@param qtable_file_to_load: Path to dataframe pkl file
@param alpha: learning rate, can be defined as the degree of acceptance of the new value over the old one.
set between 0 and 1. Setting it to 0 means that the Q-values are never updated,
hence nothing is learned. Setting a high value such as 0.9 means that learning can occur quickly.
@param gamma: discount factor, generally this value vary between 0.8 and 0.99
"""
self._me = self.__class__.__name__
self._logger: Logger = Logger(caller_class=self.me)
self._logger.set_logger(caller_class=self.me, output="stream")
self._config: Config = Config()
self.learning_rate: float = alpha
self.discount_factor: float = gamma
self.qtable: Optional['DataFrame'] = None
self.available_actions = actions
self.previous_state: str = "start"
self.previous_action: str = "do-nothing"
self.action_selection_method: str = action_selection_method
self.load(should_load, qtable_file_to_load)
# PROPERTIES
@property
def me(self) -> str:
return self._me
@me.setter
def me(self, value: str) -> None:
if not isinstance(value, str):
raise TypeError("The '_me' property must be a string")
self._me: str = value
@property
def logger(self) -> Logger:
return self._logger
@logger.setter
def logger(self, value: Logger) -> None:
if not isinstance(value, Logger):
raise TypeError("The '_logger' property must be a 'Logger'")
self._logger: Logger = value
@property
def config(self) -> Config:
return self._config
@config.setter
def config(self, value: Config) -> None:
if not isinstance(value, Config):
raise TypeError("The '_config' property must be a 'Config'")
self._config: Config = value
@property
def learning_rate(self) -> float:
return self._learning_rate
@learning_rate.setter
def learning_rate(self, value: float) -> None:
if not isinstance(value, float):
raise TypeError("The '_learning_rate' property must be a float")
self._learning_rate: float = value
@property
def discount_factor(self) -> float:
return self._discount_factor
@discount_factor.setter
def discount_factor(self, value: float) -> None:
if not isinstance(value, float):
raise TypeError("The '_discount_factor' property must be a float")
self._discount_factor: float = value
@property
def qtable(self) -> 'DataFrame':
return self._qtable
@qtable.setter
def qtable(self, value: 'DataFrame') -> None:
if not isinstance(value, DataFrame) and value is not None:
raise TypeError("The '_qtable' property must be a DataFrame")
self._qtable: Optional['DataFrame'] = value
@property
def previous_state(self) -> str:
return self._previous_state
@previous_state.setter
def previous_state(self, value: str) -> None:
if not isinstance(value, str):
raise TypeError("The '_previous_state' property must be a string")
self._previous_state: str = value
@property
def previous_action(self) -> str:
return self._previous_action
@previous_action.setter
def previous_action(self, value: str) -> None:
if not isinstance(value, str):
raise TypeError("The '_previous_action' property must be a string")
self._previous_action: str = value
@property
def available_actions(self) -> List:
return self._available_actions
@available_actions.setter
def available_actions(self, value: List) -> None:
if not isinstance(value, List):
raise TypeError("The '_available_actions' property must be a list")
self._available_actions: List = value
@property
def action_selection_method(self) -> str:
return self._action_selection_method
@action_selection_method.setter
def action_selection_method(self, value: str) -> None:
if not isinstance(value, str):
raise TypeError("The '_action_selection_method' property must be a str")
self._action_selection_method: str = value
def __repr__(self) -> str:
return f"QLearning(alpha={self.learning_rate}, gamma={self.discount_factor})"
# Functions
# # Management
def save(self, file: str) -> None:
self.qtable.to_pickle(file)
def load(self, do_load: bool, file_to_load: str) -> None:
if do_load:
self.qtable = pd.read_pickle(file_to_load)
else:
self.qtable = pd.DataFrame(columns=self.available_actions, dtype=np.float64)
# # AI
def choose_action(self, state: str):
"""
Epsilon parameter is related to the epsilon-greedy (e_greedy) action selection procedure in the
Q-learning algorithm.
In the action selection step, we select the specific action based on the Q-values we already have.
The epsilon parameter introduces randomness into the algorithm, forcing us to try different actions.
This helps not to get stuck in a local optimum.
If epsilon is set to 0, we never explore but always exploit the knowledge we already have.
On the contrary, having the epsilon set to 1 force the algorithm to always take random actions and
never use past knowledge. Usually, epsilon is selected as a small number close to 0.
@param state :
@return :
"""
self.check_state_exist(state)
chosen_action: str = "do-nothing"
if self.action_selection_method == "epsilon-greedy":
chosen_action = self._choose_action_epsilon_greedy(state)
return chosen_action
def _choose_action_epsilon_greedy(self, state: str, e_greedy: float = 0.5):
e_greedy = 2 if self.previous_state == "start" else e_greedy
if np.random.uniform() < e_greedy:
action = np.random.choice(self.available_actions)
else:
state_rewards = self.qtable.loc[state, :]
better_reward = np.max(state_rewards)
better_action = state_rewards[state_rewards == better_reward]
action = np.random.choice(better_action.index)
return action
def learn(self, state: str, action: str, reward: float) -> None:
"""
Run for each step from your Bot\n
pseudo-code:\n
Q(S{t}, A{t}) <- Q(S{t}, A{t}) + alpha * [R_{t+1} + gamma * max(Q(S{t+1}, a) - Q(S{t}, A{t}))]\n
Q(S, A) <- Q(S, A) + alpha * [R + gamma * max(Q(S', A')) - Q(S, A)]
Q(S{t}, A{t}) <- Q(S{t}, A{t}) + alpha * (R + gamma * np.max(Q[S{t+1}, :]) — Q(S{t}, A{t}))
where:
Q: qtable\n
S: State\n
A: selected Action (see choose_action function)\n
R: Reward\n
t: T time (so t+1 is next T time, etc.)\n
max: function max like np.max()\n
:param state:
:param action:
:param reward:
:return:
"""
self.check_state_exist(state)
if self.previous_state != "start":
last_qvalue = self.qtable.loc[self.previous_state, self.previous_action]
qvalue = self.qtable.loc[state, action]
estimated_reward = self.learning_rate * (reward + self.discount_factor * np.max(qvalue) - last_qvalue)
self.qtable.loc[self.previous_state, self.previous_action] += estimated_reward
self.previous_state = state
self.previous_action = action
def check_state_exist(self, state: str):
if state not in self.qtable.index:
self.qtable.loc[state] = pd.Series([0] * len(self.available_actions), index=self.qtable.columns, name=state) | /rkt_ai_lib-1.1.1.tar.gz/rkt_ai_lib-1.1.1/rkt_lib_toolkit/ai/AI.py | 0.930844 | 0.561335 | AI.py | pypi |
from typing import List, Optional
import numpy as np
import pandas as pd
from pandas import DataFrame
from rkt_lib_toolkit.logger import Logger
from rkt_lib_toolkit.config import Config
class QLearning:
"""
Exploration vs. Exploitation Tradeoff:
The agent initially has none or limited knowledge about the environment.
The agent can choose to explore by selecting an action with an unknown outcome,
to get more information about the environment.
Or, it can choose to exploit and choose an action based on its prior knowledge of the environment
to get a good reward.
"""
def __init__(self,
actions: List,
should_load: bool = False,
qtable_file_to_load: str = "",
alpha: float = 0.1,
gamma: float = 0.8,
action_selection_method: str = "epsilon-greedy"):
"""
Machine learning class based on q-learning\n
epsilon-greedy : (https://www.baeldung.com/cs/epsilon-greedy-q-learning)
@param actions: list of available actions
@param should_load: define if you want load the file contain dataframe (pkl file)
@param qtable_file_to_load: Path to dataframe pkl file
@param alpha: learning rate, can be defined as the degree of acceptance of the new value over the old one.
set between 0 and 1. Setting it to 0 means that the Q-values are never updated,
hence nothing is learned. Setting a high value such as 0.9 means that learning can occur quickly.
@param gamma: discount factor, generally this value vary between 0.8 and 0.99
"""
self._me = self.__class__.__name__
self._logger: Logger = Logger(caller_class=self.me)
self._logger.set_logger(caller_class=self.me, output="stream")
self._config: Config = Config()
self.learning_rate: float = alpha
self.discount_factor: float = gamma
self.qtable: Optional['DataFrame'] = None
self.available_actions = actions
self.previous_state: str = "start"
self.previous_action: str = "do-nothing"
self.action_selection_method: str = action_selection_method
self.load(should_load, qtable_file_to_load)
# PROPERTIES
@property
def me(self) -> str:
return self._me
@me.setter
def me(self, value: str) -> None:
if not isinstance(value, str):
raise TypeError("The '_me' property must be a string")
self._me: str = value
@property
def logger(self) -> Logger:
return self._logger
@logger.setter
def logger(self, value: Logger) -> None:
if not isinstance(value, Logger):
raise TypeError("The '_logger' property must be a 'Logger'")
self._logger: Logger = value
@property
def config(self) -> Config:
return self._config
@config.setter
def config(self, value: Config) -> None:
if not isinstance(value, Config):
raise TypeError("The '_config' property must be a 'Config'")
self._config: Config = value
@property
def learning_rate(self) -> float:
return self._learning_rate
@learning_rate.setter
def learning_rate(self, value: float) -> None:
if not isinstance(value, float):
raise TypeError("The '_learning_rate' property must be a float")
self._learning_rate: float = value
@property
def discount_factor(self) -> float:
return self._discount_factor
@discount_factor.setter
def discount_factor(self, value: float) -> None:
if not isinstance(value, float):
raise TypeError("The '_discount_factor' property must be a float")
self._discount_factor: float = value
@property
def qtable(self) -> 'DataFrame':
return self._qtable
@qtable.setter
def qtable(self, value: 'DataFrame') -> None:
if not isinstance(value, DataFrame) and value is not None:
raise TypeError("The '_qtable' property must be a DataFrame")
self._qtable: Optional['DataFrame'] = value
@property
def previous_state(self) -> str:
return self._previous_state
@previous_state.setter
def previous_state(self, value: str) -> None:
if not isinstance(value, str):
raise TypeError("The '_previous_state' property must be a string")
self._previous_state: str = value
@property
def previous_action(self) -> str:
return self._previous_action
@previous_action.setter
def previous_action(self, value: str) -> None:
if not isinstance(value, str):
raise TypeError("The '_previous_action' property must be a string")
self._previous_action: str = value
@property
def available_actions(self) -> List:
return self._available_actions
@available_actions.setter
def available_actions(self, value: List) -> None:
if not isinstance(value, List):
raise TypeError("The '_available_actions' property must be a list")
self._available_actions: List = value
@property
def action_selection_method(self) -> str:
return self._action_selection_method
@action_selection_method.setter
def action_selection_method(self, value: str) -> None:
if not isinstance(value, str):
raise TypeError("The '_action_selection_method' property must be a str")
self._action_selection_method: str = value
def __repr__(self) -> str:
return f"QLearning(alpha={self.learning_rate}, gamma={self.discount_factor})"
# Functions
# # Management
def save(self, file: str) -> None:
self.qtable.to_pickle(file)
def load(self, do_load: bool, file_to_load: str) -> None:
if do_load:
self.qtable = pd.read_pickle(file_to_load)
else:
self.qtable = pd.DataFrame(columns=self.available_actions, dtype=np.float64)
# # AI
def choose_action(self, state: str):
"""
Epsilon parameter is related to the epsilon-greedy (e_greedy) action selection procedure in the
Q-learning algorithm.
In the action selection step, we select the specific action based on the Q-values we already have.
The epsilon parameter introduces randomness into the algorithm, forcing us to try different actions.
This helps not to get stuck in a local optimum.
If epsilon is set to 0, we never explore but always exploit the knowledge we already have.
On the contrary, having the epsilon set to 1 force the algorithm to always take random actions and
never use past knowledge. Usually, epsilon is selected as a small number close to 0.
@param state :
@return :
"""
self.check_state_exist(state)
chosen_action: str = "do-nothing"
if self.action_selection_method == "epsilon-greedy":
chosen_action = self._choose_action_epsilon_greedy(state)
return chosen_action
def _choose_action_epsilon_greedy(self, state: str, e_greedy: float = 0.5):
e_greedy = 2 if self.previous_state == "start" else e_greedy
if np.random.uniform() < e_greedy:
action = np.random.choice(self.available_actions)
else:
state_rewards = self.qtable.loc[state, :]
better_reward = np.max(state_rewards)
better_action = state_rewards[state_rewards == better_reward]
action = np.random.choice(better_action.index)
return action
def learn(self, state: str, action: str, reward: float) -> None:
"""
Run for each step from your Bot\n
pseudo-code:\n
Q(S{t}, A{t}) <- Q(S{t}, A{t}) + alpha * [R_{t+1} + gamma * max(Q(S{t+1}, a) - Q(S{t}, A{t}))]\n
Q(S, A) <- Q(S, A) + alpha * [R + gamma * max(Q(S', A')) - Q(S, A)]
Q(S{t}, A{t}) <- Q(S{t}, A{t}) + alpha * (R + gamma * np.max(Q[S{t+1}, :]) — Q(S{t}, A{t}))
where:
Q: qtable\n
S: State\n
A: selected Action (see choose_action function)\n
R: Reward\n
t: T time (so t+1 is next T time, etc.)\n
max: function max like np.max()\n
:param state:
:param action:
:param reward:
:return:
"""
self.check_state_exist(state)
if self.previous_state != "start":
last_qvalue = self.qtable.loc[self.previous_state, self.previous_action]
qvalue = self.qtable.loc[state, action]
estimated_reward = self.learning_rate * (reward + self.discount_factor * np.max(qvalue) - last_qvalue)
self.qtable.loc[self.previous_state, self.previous_action] += estimated_reward
self.previous_state = state
self.previous_action = action
def check_state_exist(self, state: str):
if state not in self.qtable.index:
self.qtable.loc[state] = pd.Series([0] * len(self.available_actions), index=self.qtable.columns, name=state) | /rkt_lib_toolkit-1.6.2.tar.gz/rkt_lib_toolkit-1.6.2/rkt_lib_toolkit/ai/AI.py | 0.930844 | 0.561335 | AI.py | pypi |
import re
from argparse import ArgumentParser
from abc import ABC
from typing import Callable
from subprocess import CalledProcessError
from rkd.api.contract import TaskInterface, ExecutionContext
from rkd.api.syntax import TaskDeclaration
class DockerBaseTask(TaskInterface, ABC):
def calculate_images(self, image: str, latest_per_version: bool, global_latest: bool, allowed_meta_list: str,
keep_prefix: bool):
""" Calculate tags propagation """
allowed_meta = allowed_meta_list.replace(' ', '').split(',')
tag = image.split(':')[-1]
# output
output_tags = [image]
pattern = re.compile('(?P<version>[0-9.]+)(-(?P<meta>[A-Za-z]+))?(?P<metanum>[0-9]+)?', re.IGNORECASE)
matches = [m.groupdict() for m in pattern.finditer(tag)]
if not matches:
self._io.warn('No release version found')
return output_tags
meta_type = matches[0]['meta']
meta_number = matches[0]['metanum']
if meta_type and meta_type not in allowed_meta:
self.io().warn('Version meta part is not allowed, not calculating propagation')
return output_tags
original_tag = tag
base_version = matches[0]['version']
optional_prefix = tag[0:tag.find(matches[0]['version'])]
base_version_with_optional_prefix = optional_prefix + base_version
meta = '-' + meta_type if meta_type else None
to_strip_at_beginning = optional_prefix if not keep_prefix else ''
# :latest
if global_latest:
output_tags.append(image.replace(original_tag, 'latest'))
# case 1: 1.0.0-RC1 -> 1.0.0-latest-RC
if meta and meta_number:
output_tags = self.generate_tags_for_numbered_pre_release(
output_tags=output_tags,
base_version_with_optional_prefix=base_version_with_optional_prefix,
original_tag=original_tag,
image=image,
latest_per_version=latest_per_version,
meta=meta,
meta_number=meta_number
)
# case 2: 1.0.0-RC (without meta number)
elif meta and not meta_number:
output_tags = self.generate_tags_for_pre_release_without_number(
output_tags=output_tags,
base_version_with_optional_prefix=base_version_with_optional_prefix,
original_tag=original_tag,
image=image,
latest_per_version=latest_per_version,
meta=meta
)
# release
elif not meta:
output_tags = self.generate_tags_for_release(
output_tags=output_tags,
base_version_with_optional_prefix=base_version_with_optional_prefix,
original_tag=original_tag,
image=image
)
return self.strip_out_each_tag(output_tags, to_strip_at_beginning, image)
@staticmethod
def strip_out_each_tag(input_tagged_images: list, to_strip_at_beginning: str, originally_tagged_image: str):
"""
Removes a prefix like a "release-", "v" from beginning of each tagged image
:param input_tagged_images:
:param to_strip_at_beginning:
:param originally_tagged_image:
:return:
"""
if not to_strip_at_beginning:
return input_tagged_images
output_tagged_images = []
image = originally_tagged_image[0:originally_tagged_image.find(':')] # separate image from tag
for tagged_image in input_tagged_images:
tag = tagged_image[len(image) + 1:]
# strip out the prefix eg. "release-", "v" or other
if tag.startswith(to_strip_at_beginning):
tag = tag[len(to_strip_at_beginning):]
output_tagged_images.append(image + ':' + tag)
return output_tagged_images
@staticmethod
def generate_originally_tagged_image(original_tag: str, to_strip_at_beginning: str, image: str):
if to_strip_at_beginning:
return image.replace(original_tag, original_tag[len(to_strip_at_beginning):], 1)
return image
def generate_tags_for_numbered_pre_release(self, output_tags: list, base_version_with_optional_prefix: str,
original_tag: str, image: str, meta: str,
latest_per_version: bool, meta_number: str):
"""
EXAMPLE CASES:
- v2.0.0-BETA1
- 2.0.0-BETA1
- release-2.0.0-BETA1
"""
output_tags.append(image.replace(original_tag, base_version_with_optional_prefix + '-latest%s' % meta, 1))
if latest_per_version:
output_tags = self._generate_for_each_version(
image, original_tag, output_tags,
lambda version: original_tag.replace(base_version_with_optional_prefix + meta + meta_number,
version + '-latest%s' % meta, 1)
)
return output_tags
def generate_tags_for_pre_release_without_number(self, output_tags: list, base_version_with_optional_prefix: str,
original_tag: str, image: str, meta: str,
latest_per_version: bool):
"""
EXAMPLE CASES:
- v2.0.0-PRE
- 2.0.0-PRE
- release-2.0.0-PRE
"""
output_tags.append(image.replace(original_tag, base_version_with_optional_prefix + '-latest%s' % meta, 1))
if latest_per_version:
output_tags = self._generate_for_each_version(
image, original_tag, output_tags,
lambda version: original_tag.replace(base_version_with_optional_prefix + meta,
version + '-latest%s' % meta, 1)
)
return output_tags
def generate_tags_for_release(self, output_tags: list, base_version_with_optional_prefix: str,
original_tag: str, image: str):
"""
EXAMPLE CASES:
- v2.0.0
- 2.0.0
- release-1.2.3
"""
output_tags = self._generate_for_each_version(
image, original_tag, output_tags,
lambda version: original_tag.replace(base_version_with_optional_prefix, version, 1)
)
return output_tags
@staticmethod
def _generate_for_each_version(image: str, original_tag: str, output_tags: list, callback: Callable) -> list:
"""
Generate a list of tags for each sub-version eg. 2.1.3 -> 2.1 -> 2
:param image: Original image eg. quay.io/riotkit/infracheck
:param original_tag: Original image tag eg. v2.0.0
:param output_tags: List of existing output tags to append generated tags to
:param callback: A callback that replaces version in original_tag part
:param to_strip_at_beginning: Strip a string at the beginning eg. "release-" or "v" or "v."
:return:
"""
parts = original_tag.split('.')
for part_num in range(0, len(parts)):
version = ".".join(parts[0:part_num])
if not version:
continue
output_tags.append(
image.replace(
original_tag,
callback(version)
)
)
return output_tags
def _print_images(self, images: list, action: str):
for image in images:
self._io.info(' -> Going to %s image "%s"' % (action, image))
def get_group_name(self) -> str:
return ':docker'
def configure_argparse(self, parser: ArgumentParser):
parser.add_argument('--image', '-i', help='Image name', required=True)
parser.add_argument('--without-latest', '-wl', help='Do not tag latest per version', action='store_true')
parser.add_argument('--without-global-latest', '-wgl', help='Do not tag :latest', action='store_true')
parser.add_argument('--propagate', '-p', help='Propagate tags? eg. 1.0.0 -> 1.0 -> 1 -> latest',
action='store_true')
parser.add_argument('--allowed-meta', '-m', help='Allowed meta part eg. rc, alpha, beta',
default='rc,alpha,stable,dev,prod,test,beta,build,b,pre,a,preprod,prerelease,early,ea,stage')
parser.add_argument('--keep-prefix', '-k', help='Keep prefix eg. "release-", "v" or "v." if present in tag')
class TagImageTask(DockerBaseTask):
"""Re-tag images to propagate version tags in docker-like format eg. 1.0.1 -> 1.0 -> 1 -> latest
Examples:
1.0.0 -> 1.0 -> 1 -> latest
1.0.0-RC1 -> 1.0.0-latest-rc
"""
def get_name(self) -> str:
return ':tag'
def execute(self, context: ExecutionContext) -> bool:
original_image = context.args['image']
if context.args['propagate']:
images = self.calculate_images(
image=original_image,
latest_per_version=not context.args['without_latest'],
global_latest=not context.args['without_global_latest'],
allowed_meta_list=context.args['allowed_meta'],
keep_prefix=bool(context.args['keep_prefix'])
)
else:
images = [original_image]
self._print_images(images, 'tag')
for image in images:
try:
self.exec('docker tag %s %s' % (original_image, image))
except CalledProcessError as e:
print(e)
return False
return True
class PushTask(DockerBaseTask):
"""Pushes all re-tagged images
"""
def get_name(self) -> str:
return ':push'
def execute(self, context: ExecutionContext) -> bool:
original_image = context.args['image']
images = []
if context.args['propagate']:
images += self.calculate_images(
image=original_image,
latest_per_version=not context.args['without_latest'],
global_latest=not context.args['without_global_latest'],
allowed_meta_list=context.args['allowed_meta'],
keep_prefix=bool(context.args['keep_prefix'])
)
else:
images = [original_image]
self._print_images(images, 'push')
for image in images:
try:
self.exec('docker push %s' % image)
except CalledProcessError as e:
print(e)
return False
return True
def imports():
return [
TaskDeclaration(TagImageTask()),
TaskDeclaration(PushTask())
] | /rkt_utils-3.0.4-py3-none-any.whl/rkt_utils/docker.py | 0.799755 | 0.171061 | docker.py | pypi |
from copy import deepcopy
from logging import getLogger
from typing import TYPE_CHECKING, Any, Generator
# pylint:disable=cyclic-import # but pylint doesn't understand this feature
if TYPE_CHECKING:
from .archivist import Archivist
from .assets import Asset
from .constants import (
ACCESS_POLICIES_LABEL,
ACCESS_POLICIES_SUBPATH,
ASSETS_LABEL,
)
from .dictmerge import _deepmerge
LOGGER = getLogger(__name__)
class AccessPolicy(dict):
"""AccessPolicy object"""
@property
def name(self) -> "str | None":
"""str: name of the access policy"""
return self.get("display_name")
class _AccessPoliciesClient:
"""AccessPoliciesClient
Access to access_policies entities using CRUD interface. This class is usually
accessed as an attribute of the Archivist class.
Args:
archivist (Archivist): :class:`Archivist` instance
"""
def __init__(self, archivist_instance: "Archivist"):
self._archivist = archivist_instance
self._subpath = f"{archivist_instance.root}/{ACCESS_POLICIES_SUBPATH}"
self._label = f"{self._subpath}/{ACCESS_POLICIES_LABEL}"
def __str__(self) -> str:
return f"AccessPoliciesClient({self._archivist.url})"
def create(
self,
props: "dict[str, Any]",
filters: "list[dict[str, Any]]",
access_permissions: "list[dict[str, Any]]",
) -> AccessPolicy:
"""Create access policy
Creates access policy with defined attributes.
Args:
props (dict): properties of created access policy.
filters (list): assets filters
access permissions (list): list of access permissions
Returns:
:class:`AccessPolicy` instance
"""
LOGGER.debug("Create Access Policy %s", props)
return self.create_from_data(
self.__params(
props, filters=filters, access_permissions=access_permissions
),
)
def create_from_data(self, data: "dict[str, Any]") -> AccessPolicy:
"""Create access policy
Creates access policy with request body from data stream.
Suitable for reading data from a file using json.load or yaml.load
Args:
data (dict): request body of access policy.
Returns:
:class:`AccessPolicy` instance
"""
return AccessPolicy(
**self._archivist.post(
f"{self._subpath}/{ACCESS_POLICIES_LABEL}",
data,
)
)
def read(self, identity: str) -> AccessPolicy:
"""Read Access Policy
Reads access policy.
Args:
identity (str): access_policies identity e.g. access_policies/xxxxxxxxxxxxxxxxxxxxxxx
Returns:
:class:`AccessPolicy` instance
"""
return AccessPolicy(**self._archivist.get(f"{self._subpath}/{identity}"))
def update(
self,
identity,
*,
props: "dict[str, Any] | None " = None,
filters: "list[dict] | None " = None,
access_permissions: "list[dict] | None " = None,
) -> AccessPolicy:
"""Update Access Policy
Update access policy.
Args:
identity (str): access_policies identity e.g. access_policies/xxxxxxxxxxxxxxxxxxxxxxx
props (dict): properties of created access policy.
filters (list): assets filters
access permissions (list): list of access permissions
Returns:
:class:`AccessPolicy` instance
"""
return AccessPolicy(
**self._archivist.patch(
f"{self._subpath}/{identity}",
self.__params(
props, filters=filters, access_permissions=access_permissions
),
)
)
def delete(self, identity: str) -> "dict[str, Any]":
"""Delete Access Policy
Deletes access policy.
Args:
identity (str): access_policies identity e.g. access_policies/xxxxxxxxxxxxxxxxxxxxxxx
Returns:
:class:`AccessPolicy` instance - empty?
"""
return self._archivist.delete(f"{self._subpath}/{identity}")
def __params(
self,
props: "dict[str, Any] | None",
*,
filters: "list[dict] | None" = None,
access_permissions: "list[dict] | None" = None,
) -> "dict[str, Any]":
params = deepcopy(props) if props else {}
if filters is not None:
params["filters"] = filters
if access_permissions is not None:
params["access_permissions"] = access_permissions
return _deepmerge(self._archivist.fixtures.get(ACCESS_POLICIES_LABEL), params)
def count(self, *, display_name: "str | None" = None) -> int:
"""Count access policies.
Counts number of access policies that match criteria.
Args:
display_name (str): display name (optional0
Returns:
integer count of access policies.
"""
params = {"display_name": display_name} if display_name is not None else None
return self._archivist.count(self._label, params=params)
def list(
self, *, page_size: "int|None" = None, display_name: "str|None" = None
) -> Generator[AccessPolicy, None, None]:
"""List access policies.
List access policies that match criteria.
Args:
display_name (str): display name (optional0
page_size (int): optional page size. (Rarely used).
Returns:
iterable that returns :class:`AccessPolicy` instances
"""
params = {"display_name": display_name} if display_name is not None else None
return (
AccessPolicy(**a)
for a in self._archivist.list(
self._label,
ACCESS_POLICIES_LABEL,
page_size=page_size,
params=params,
)
)
# additional queries on different endpoints
def list_matching_assets(
self, access_policy_id: str, *, page_size: "int|None" = None
) -> Generator[Asset, None, None]:
"""List matching assets.
List assets that match access policy.
Args:
access_policy_id (str): e.g. access_policies/xxxxxxxxxxxxxxx
page_size (int): optional page size. (Rarely used).
Returns:
iterable that returns :class:`Asset` instances
"""
return (
Asset(**a)
for a in self._archivist.list(
f"{self._subpath}/{access_policy_id}/{ASSETS_LABEL}",
ASSETS_LABEL,
page_size=page_size,
)
)
def list_matching_access_policies(
self, asset_id: str, *, page_size: "int|None" = None
) -> Generator[AccessPolicy, None, None]:
"""List matching access policies.
List access policies that match asset.
Args:
asset_id (str): e.g. assets/xxxxxxxxxxxxxxx
page_size (int): optional page size. (Rarely used).
Returns:
iterable that returns :class:`AccessPolicy` instances
"""
return (
AccessPolicy(**a)
for a in self._archivist.list(
f"{self._subpath}/{asset_id}/{ACCESS_POLICIES_LABEL}",
ACCESS_POLICIES_LABEL,
page_size=page_size,
)
) | /rkvst-archivist-0.25.2.tar.gz/rkvst-archivist-0.25.2/archivist/access_policies.py | 0.892454 | 0.153517 | access_policies.py | pypi |
from base64 import b64decode
from json import loads as json_loads
from logging import getLogger
from typing import TYPE_CHECKING, Any
# pylint:disable=cyclic-import # but pylint doesn't understand this feature
from . import subjects_confirmer
from .constants import (
SUBJECTS_LABEL,
SUBJECTS_SELF_ID,
SUBJECTS_SUBPATH,
)
from .dictmerge import _deepmerge
if TYPE_CHECKING:
from .archivist import Archivist
LOGGER = getLogger(__name__)
class Subject(dict):
"""Subject object"""
class _SubjectsClient:
"""SubjectsClient
Access to subjects entities using CRUD interface. This class is usually
accessed as an attribute of the Archivist class.
Args:
archivist (Archivist): :class:`Archivist` instance
"""
maxDiff = None
def __init__(self, archivist_instance: "Archivist"):
self._archivist = archivist_instance
self._subpath = f"{archivist_instance.root}/{SUBJECTS_SUBPATH}"
self._label = f"{self._subpath}/{SUBJECTS_LABEL}"
def __str__(self) -> str:
return f"SubjectsClient({self._archivist.url})"
def create(
self,
display_name: str,
wallet_pub_key: "list[str]",
tessera_pub_key: "list[str]",
) -> Subject:
"""Create subject
Creates subject with defined attributes.
Args:
display_name (str): display name of subject.
wallet_pub_key (list): wallet public keys
tessera_pub_key (list): tessera public keys
Returns:
:class:`Subject` instance
"""
LOGGER.debug("Create Subject %s", display_name)
return self.create_from_data(
self.__params(
display_name=display_name,
wallet_pub_key=wallet_pub_key,
tessera_pub_key=tessera_pub_key,
),
)
def share(
self, name: str, other_name: str, other_archivist: "Archivist"
) -> "tuple[Subject, Subject]":
"""Import the self subjects from the foreign archivist connection
from another organization - mutually share.
Args:
name (str): display_name of the foreign self subject in this archivist
other_name (str): display_name of the self subject in other archivist
other_archivist (Archivist): Archivist object
Returns:
2-tuple of :class:`Subject` instance
"""
subject1 = self.import_subject(
name, other_archivist.subjects.read(SUBJECTS_SELF_ID)
)
subject2 = other_archivist.subjects.import_subject(
other_name, self.read(SUBJECTS_SELF_ID)
)
subject1 = self.wait_for_confirmation(subject1["identity"])
subject2 = other_archivist.subjects.wait_for_confirmation(subject2["identity"])
return subject1, subject2
def import_subject(self, display_name: str, subject: Subject) -> Subject:
"""Create subject from another subject usually
from another organization.
Args:
display_name (str): display_name of the subject
subject (Subject): Subject object
Returns:
:class:`Subject` instance
"""
return self.create(
display_name,
subject["wallet_pub_key"],
subject["tessera_pub_key"],
)
def create_from_data(self, data: "dict[str, Any]") -> Subject:
"""Create subject
Creates subject with request body from data stream.
Suitable for reading data from a file using json.load or yaml.load
Args:
data (dict): request body of subject.
Returns:
:class:`Subject` instance
"""
LOGGER.debug("Create Subject from data %s", data)
return Subject(**self._archivist.post(self._label, data))
def create_from_b64(self, data: "dict[str, Any]") -> Subject:
"""Create subject
Creates subject with request body from b64 encoded string
Args:
data (dict): Dictionary with 2 fields:
A YAML representation of the data argument would be:
.. code-block:: yaml
display_name: An imported subject
subject_string: ey66...
Returns:
:class:`Subject` instance
"""
decoded = b64decode(data["subject_string"])
LOGGER.debug("decoded %s", decoded)
outdata = {
k: v
for k, v in json_loads(decoded).items()
if k in ("wallet_pub_key", "tessera_pub_key")
}
outdata["display_name"] = data["display_name"]
LOGGER.debug("data %s", outdata)
return Subject(**self._archivist.post(self._label, outdata))
def wait_for_confirmation(self, identity: str) -> Subject:
"""Wait for subject to be confirmed.
Waits for subject to be confirmed.
Args:
identity (str): identity of asset
Returns:
True if subject is confirmed.
"""
subjects_confirmer.MAX_TIME = self._archivist.max_time
# pylint: disable=protected-access
return subjects_confirmer._wait_for_confirmation(self, identity)
def read(self, identity: str) -> Subject:
"""Read Subject
Reads subject.
Args:
identity (str): subjects identity e.g. subjects/xxxxxxxxxxxxxxxxxxxxxxx
Returns:
:class:`Subject` instance
"""
return Subject(**self._archivist.get(f"{self._subpath}/{identity}"))
def update(
self,
identity: str,
*,
display_name: "str|None" = None,
wallet_pub_key: "list[str]|None" = None,
tessera_pub_key: "list[str]|None" = None,
) -> Subject:
"""Update Subject
Update subject.
Args:
identity (str): subjects identity e.g. subjects/xxxxxxxxxxxxxxxxxxxxxxx
display_name (str): display name of subject.
wallet_pub_key (list): wallet public keys
tessera_pub_key (list): tessera public keys
Returns:
:class:`Subject` instance
"""
return Subject(
**self._archivist.patch(
f"{self._subpath}/{identity}",
self.__params(
display_name=display_name,
wallet_pub_key=wallet_pub_key,
tessera_pub_key=tessera_pub_key,
),
)
)
def delete(self, identity: str) -> "dict[str, Any]":
"""Delete Subject
Deletes subject.
Args:
identity (str): subjects identity e.g. subjects/xxxxxxxxxxxxxxxxxxxxxxx
Returns:
:class:`Subject` instance - empty?
"""
return self._archivist.delete(f"{self._subpath}/{identity}")
def __params(
self,
*,
display_name: "str|None" = None,
wallet_pub_key: "list[str]|None" = None,
tessera_pub_key: "list[str]|None" = None,
) -> "dict[str, Any]":
params = {}
if display_name is not None:
params["display_name"] = display_name
if wallet_pub_key is not None:
params["wallet_pub_key"] = wallet_pub_key
if tessera_pub_key is not None:
params["tessera_pub_key"] = tessera_pub_key
return _deepmerge(self._archivist.fixtures.get(SUBJECTS_LABEL), params)
def count(self, *, display_name: "str|None" = None) -> int:
"""Count subjects.
Counts number of subjects that match criteria.
Args:
display_name (str): display name (optional)
Returns:
integer count of subjects.
"""
return self._archivist.count(
self._label,
params=self.__params(display_name=display_name),
)
def list(
self,
*,
page_size: "int|None" = None,
display_name: "str|None" = None,
):
"""List subjects.
List subjects that match criteria.
Args:
display_name (str): display name (optional)
page_size (int): optional page size. (Rarely used).
Returns:
iterable that returns :class:`Subject` instances
"""
LOGGER.debug("List '%s'", display_name)
return (
Subject(**a)
for a in self._archivist.list(
self._label,
SUBJECTS_LABEL,
page_size=page_size,
params=self.__params(display_name=display_name),
)
) | /rkvst-archivist-0.25.2.tar.gz/rkvst-archivist-0.25.2/archivist/subjects.py | 0.872075 | 0.185062 | subjects.py | pypi |
from contextlib import suppress
from copy import deepcopy
from logging import getLogger
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
# pylint:disable=cyclic-import # but pylint doesn't understand this feature
from .archivist import Archivist
from .constants import LOCATIONS_LABEL, LOCATIONS_SUBPATH
from .dictmerge import _deepmerge
from .errors import ArchivistNotFoundError
from .utils import selector_signature
LOGGER = getLogger(__name__)
class Location(dict):
"""Location
Location object has dictionary attributes.
"""
@property
def name(self) -> "str | None":
"""str: name of the location"""
name = None
with suppress(KeyError):
name = self["display_name"]
return name
class _LocationsClient:
"""LocationsClient
Access to locations entities using CRUD interface. This class is usually
accessed as an attribute of the Archivist class.
Args:
archivist (Archivist): :class:`Archivist` instance
"""
def __init__(self, archivist_instance: "Archivist"):
self._archivist = archivist_instance
self._subpath = f"{archivist_instance.root}/{LOCATIONS_SUBPATH}"
self._label = f"{self._subpath}/{LOCATIONS_LABEL}"
def __str__(self) -> str:
return f"LocationsClient({self._archivist.url})"
def create(
self, props: "dict[str, Any]", *, attrs: "dict[str, Any]|None" = None
) -> Location:
"""Create location
Creates location with defined properties and attributes.
Args:
props (dict): properties for this location.
attrs (dict): attributes of created location.
Returns:
:class:`Location` instance
"""
LOGGER.debug("Create Location %s", props)
return self.create_from_data(self.__params(props, attrs))
def create_from_data(self, data: "dict[str, Any]") -> Location:
"""Create location
Creates location with request body from data stream.
Suitable for reading data from a file using json.load or yaml.load
Args:
data (dict): request body of location.
Returns:
:class:`Location` instance
"""
return Location(**self._archivist.post(self._label, data))
def create_if_not_exists(self, data: "dict[str, Any]") -> "tuple[Location, bool]":
"""
Create a location if not already exists
Args:
data (dict): request body of location.
A YAML representation of the data argument would be:
.. code-block:: yaml
selector:
- display_name
- attributes:
- wavestone_ext
display_name: Apartements du Gare du Nord
description: Residential apartment building in new complex above GdN station
latitude: 48.8809
longitude: 2.3553
attributes:
address: 18 Rue de Dunkerque, 75010 Paris, France
wavestone_ext: managed
The 'selector' setting is required.
Returns:
tuple of :class:`Location` instance, Boolean True if already exists
"""
data = deepcopy(data)
selector = data.pop("selector") # must exist
props, attrs = selector_signature(selector, data)
try:
location = self.read_by_signature(props=props, attrs=attrs)
except ArchivistNotFoundError:
LOGGER.info(
"location with selector %s,%s does not exist - creating", props, attrs
)
else:
LOGGER.info("location with selector %s,%s already exists", props, attrs)
return location, True
return self.create_from_data(data), False
def read(self, identity: str) -> Location:
"""Read location
Reads location.
Args:
identity (str): location identity e.g. locations/xxxxxxxxxxxxxxxxxxxxxxx
Returns:
:class:`Location` instance
"""
return Location(**self._archivist.get(f"{self._subpath}/{identity}"))
def __params(
self, props: "dict[str, Any]|None", attrs: "dict[str, Any]|None"
) -> "dict[str, Any]":
params = deepcopy(props) if props else {}
if attrs:
params["attributes"] = attrs
return _deepmerge(self._archivist.fixtures.get(LOCATIONS_LABEL), params)
def count(
self,
*,
props: "dict[str, Any]|None" = None,
attrs: "dict[str, Any]|None" = None,
) -> int:
"""Count locations.
Counts number of locations that match criteria.
Args:
props (dict): e.g. {"display_name": "Macclesfield" }
attrs (dict): e.g. {"director": "john smith" }
Returns:
integer count of locations.
"""
return self._archivist.count(self._label, params=self.__params(props, attrs))
def list(
self,
*,
page_size: "int|None" = None,
props: "dict[str, Any]|None" = None,
attrs: "dict[str, Any]|None" = None,
):
"""List locations.
Lists locations that match criteria.
Args:
props (dict): optional e.g. {"display_name": "Macclesfield" }
attrs (dict): optional e.g. {"director": "john smith" }
page_size (int): optional page size. (Rarely used)
Returns:
iterable that returns :class:`Location` instances
"""
return (
Location(**a)
for a in self._archivist.list(
self._label,
LOCATIONS_LABEL,
page_size=page_size,
params=self.__params(props, attrs),
)
)
def read_by_signature(
self,
*,
props: "dict[str, Any]|None" = None,
attrs: "dict[str, Any]|None" = None,
) -> Location:
"""Read location by signature.
Reads location that meets criteria. Only one location is expected.
Args:
props (dict): e.g. {"display_name": "Macclesfield" }
attrs (dict): e.g. {"director": "john smith" }
Returns:
:class:`Location` instance
"""
return Location(
**self._archivist.get_by_signature(
self._label,
LOCATIONS_LABEL,
params=self.__params(props, attrs),
)
) | /rkvst-archivist-0.25.2.tar.gz/rkvst-archivist-0.25.2/archivist/locations.py | 0.909501 | 0.225715 | locations.py | pypi |
from copy import deepcopy
from logging import getLogger
from typing import TYPE_CHECKING, Any, Union
if TYPE_CHECKING:
# pylint:disable=cyclic-import # but pylint doesn't understand this feature
from .archivist import Archivist
from .compliance_policy_requests import (
CompliancePolicyCurrentOutstanding,
CompliancePolicyDynamicTolerance,
CompliancePolicyPeriodOutstanding,
CompliancePolicyRichness,
CompliancePolicySince,
)
from .constants import (
COMPLIANCE_POLICIES_LABEL,
COMPLIANCE_POLICIES_SUBPATH,
)
from .dictmerge import _deepmerge
LOGGER = getLogger(__name__)
class CompliancePolicy(dict):
"""CompliancePolicy
CompliancePolicy object has dictionary of all the compliance policy attributes.
"""
@property
def name(self):
"""str: name of the compliance policy"""
return self.get("display_name")
class _CompliancePoliciesClient:
"""CompliancePoliciesClient
Access to compliance policy entities using CRUD interface. This class is usually
accessed as an attribute of the Archivist class.
Args:
archivist (Archivist): :class:`Archivist` instance
"""
def __init__(self, archivist_instance: "Archivist"):
self._archivist = archivist_instance
self._subpath = f"{archivist_instance.root}/{COMPLIANCE_POLICIES_SUBPATH}"
self._label = f"{self._subpath}/{COMPLIANCE_POLICIES_LABEL}"
def __str__(self) -> str:
return f"CompliancePoliciesClient({self._archivist.url})"
def create(
self,
policy: Union[
"CompliancePolicySince",
"CompliancePolicyCurrentOutstanding",
"CompliancePolicyPeriodOutstanding",
"CompliancePolicyDynamicTolerance",
"CompliancePolicyRichness",
],
) -> CompliancePolicy:
"""Create A compliance policy
Args:
policy (CompliancePolicy): the policy object.
One of:
CompliancePolicySince
CompliancePolicyCurrentOutstanding
CompliancePolicyPeriodOutstanding
CompliancePolicyDynamicTolerance
CompliancePolicyRichness
Returns:
:class:`CompliancePolicy` instance
"""
return self.create_from_data(policy.dict())
def create_from_data(self, data: "dict[str, Any]") -> "CompliancePolicy":
"""Create compliance_policy
Creates compliance_policy with request body from data stream.
Suitable for reading data from a file using json.load or yaml.load
Args:
data (dict): request body of compliance_policy.
Returns:
:class:`CompliancePolicy` instance
"""
return CompliancePolicy(**self._archivist.post(self._label, data))
def read(self, identity: str) -> CompliancePolicy:
"""Read compliance policy
Reads compliance policy.
Args:
identity (str): compliance policy identity
e.g. compliance_policies/xxxxxxxxxxxxxxxxxxxxxxx
Returns:
:class:`CompliancePolicy` instance
"""
return CompliancePolicy(**self._archivist.get(f"{self._subpath}/{identity}"))
def delete(self, identity: str) -> "dict[str, Any]":
"""Delete Compliance Policy
Deletes compliance policy.
Args:
identity (str): compliance policy identity
e.g. compliance_policies/xxxxxxxxxxxxxxxxxxxxxxx
Returns:
:class:`CompliancePolicy` instance - empty?
"""
return self._archivist.delete(f"{self._subpath}/{identity}")
def __params(self, props: "dict[str, Any]|None") -> "dict[str, Any]":
params = deepcopy(props) if props else {}
# pylint: disable=protected-access
return _deepmerge(
self._archivist.fixtures.get(COMPLIANCE_POLICIES_LABEL), params
)
def count(self, *, props: "dict[str, Any]|None" = None) -> int:
"""Count compliance policies.
Counts number of compliance policies that match criteria.
Args:
props (dict): e.g. {"compliance_type": "COMPLIANCE_RICHNESS" }
Returns:
integer count of compliance policies.
"""
return self._archivist.count(
self._label,
params=self.__params(props),
)
def list(
self, *, page_size: "int|None" = None, props: "dict[str, Any]|None" = None
):
"""List compliance policies.
Lists compliance policies that match criteria.
Args:
props (dict): optional e.g. {"compliance_type": "COMPLIANCE_DYNAMIC_TOLERANCE" }
page_size (int): optional page size. (Rarely used).
Returns:
iterable that returns :class:`CompliancePolicy` instances
"""
return (
CompliancePolicy(**a)
for a in self._archivist.list(
self._label,
COMPLIANCE_POLICIES_LABEL,
page_size=page_size,
params=self.__params(props),
)
)
def read_by_signature(self, *, props: "dict[str, Any]|None" = None):
"""Read compliance policy by signature.
Reads compliance policy that meets criteria. Only one compliance policy is expected.
Args:
props (dict): e.g. {"display_name": "foo" }
Returns:
:class:`CompliancePolicy` instance
"""
return CompliancePolicy(
**self._archivist.get_by_signature(
self._label,
COMPLIANCE_POLICIES_LABEL,
params=self.__params(props),
)
) | /rkvst-archivist-0.25.2.tar.gz/rkvst-archivist-0.25.2/archivist/compliance_policies.py | 0.928595 | 0.184529 | compliance_policies.py | pypi |
from copy import deepcopy
from io import BytesIO
from logging import getLogger
from os import path
from typing import TYPE_CHECKING, Any, BinaryIO
if TYPE_CHECKING:
from requests.models import Response
# pylint:disable=cyclic-import # but pylint doesn't understand this feature
from .archivist import Archivist
from .constants import (
ATTACHMENTS_LABEL,
ATTACHMENTS_SUBPATH,
)
from .dictmerge import _deepmerge
from .utils import get_url
LOGGER = getLogger(__name__)
class Attachment(dict):
"""Attachment
Attachment object has dictionary attributes.
"""
class _AttachmentsClient:
"""AttachmentsClient
Access to attachments entities using CRUD interface. This class is usually
accessed as an attribute of the Archivist class.
Args:
archivist (Archivist): :class:`Archivist` instance
"""
def __init__(self, archivist_instance: "Archivist"):
self._archivist = archivist_instance
self._subpath = f"{archivist_instance.root}/{ATTACHMENTS_SUBPATH}"
self._label = f"{self._subpath}/{ATTACHMENTS_LABEL}"
def __str__(self) -> str:
return f"AttachmentsClient({self._archivist.url})"
def get_default_key(self, data: "dict[str, str]") -> str:
"""
Return a key to use if no key was provided
either use filename or url as one of them is required
"""
attachment_key = (
data.get("filename", "")
if data.get("filename", "")
else data.get("url", "")
)
return attachment_key.replace(".", "_")
def create(self, data: "dict[str, Any]") -> "dict[str, Any]": # pragma: no cover
"""
Create an attachment and return struct suitable for use in an asset
or event creation.
Args:
data (dict): dictionary
A YAML representation of the data argument would be:
.. code-block:: yaml
filename: functests/test_resources/doors/assets/gdn_front.jpg
content_type: image/jpg
display_name: arc_primary_image
OR
.. code-block:: yaml
url: https://secure.eicar.org/eicar.com.zip"
content_type: application/zip
display_name: Test malware
Either 'filename' or 'url' is required.
'content_type' is required.
Returns:
A dict suitable for adding to an asset or event creation
A YAML representation of the result would be:
.. code-block:: yaml
arc_display_name: Telephone
arc_blob_identity: blobs/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
arc_blob_hash_alg: SHA256
arc_blob_hash_value: xxxxxxxxxxxxxxxxxxxxxxx
arc_file_name: gdn_front.jpg
"""
result = None
file_part = None
filename = data.get("filename")
if filename is not None:
_, file_part = path.split(filename)
with open(filename, "rb") as fd:
attachment = self.upload(fd, mtype=data.get("content_type"))
else:
url = data["url"]
fd = BytesIO()
get_url(url, fd)
attachment = self.upload(fd, mtype=data.get("content_type"))
result = {
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": attachment["identity"],
"arc_blob_hash_alg": attachment["hash"]["alg"],
"arc_blob_hash_value": attachment["hash"]["value"],
}
if file_part:
result["arc_file_name"] = file_part
display_name = data.get("display_name")
if display_name is not None:
result["arc_display_name"] = display_name
return result
def upload(self, fd: BinaryIO, *, mtype: "str|None" = None) -> Attachment:
"""Create attachment
Creates attachment from opened file or other data source.
Args:
fd (file): opened file descriptor or other file-type iterable.
mtype (str): mimetype of data.
Returns:
:class:`Attachment` instance
"""
LOGGER.debug("Upload Attachment")
return Attachment(
**self._archivist.post_file(
self._label,
fd,
mtype,
)
)
def __params(self, params: "dict[str, Any]|None") -> "dict[str, Any]":
params = deepcopy(params) if params else {}
# pylint: disable=protected-access
return _deepmerge(self._archivist.fixtures.get(ATTACHMENTS_LABEL), params)
def download(
self,
identity: str,
fd: BinaryIO,
*,
params: "dict[str, Any]|None" = None,
) -> "Response":
"""Read attachment
Reads attachment into data sink (usually a file opened for write)..
Note that returns the response as the body will be consumed by the
fd iterator
Args:
identity (str): attachment identity e.g. blobs/xxxxxxxxxxxxxxxxxxxxxxx
fd (file): opened file descriptor or other file-type sink..
params (dict): e.g. {"allow_insecure": "true"} OR {"strict": "true" }
Returns:
JSON as dict
"""
return self._archivist.get_file(
f"{self._subpath}/{identity}",
fd,
params=self.__params(params),
)
def info(
self,
identity: str,
) -> "dict[str, Any]":
"""Read attachment info
Reads attachment info
Args:
identity (str): attachment identity e.g. blobs/xxxxxxxxxxxxxxxxxxxxxxx
Returns:
REST response
"""
return self._archivist.get(f"{self._subpath}/{identity}/info") | /rkvst-archivist-0.25.2.tar.gz/rkvst-archivist-0.25.2/archivist/attachments.py | 0.873363 | 0.189203 | attachments.py | pypi |
from dataclasses import asdict, dataclass
from .compliance_policy_type import CompliancePolicyType
from .or_dict import and_list
# NB: the order of the fields is important. Fields with default values must
# appear after fields without. This is why the compliance_type is last
# in every case.
@dataclass(frozen=True)
class CompliancePolicyBase:
"""
Compliance policy base definition
"""
description: str
display_name: str
asset_filter: "list[list]"
def dict(self):
"""Emit dictionary representation"""
d = asdict(self)
d["asset_filter"] = and_list(d["asset_filter"])
return d
@dataclass(frozen=True)
class CompliancePolicySince(CompliancePolicyBase):
"""
Compliance policy that indicates if an event has 'expired'
"""
event_display_type: str
time_period_seconds: int
compliance_type: str = CompliancePolicyType.COMPLIANCE_SINCE.name
@dataclass(frozen=True)
class CompliancePolicyCurrentOutstanding(CompliancePolicyBase):
"""
Compliance policy that indicates if an event has been 'closed'
"""
event_display_type: str
closing_event_display_type: str
compliance_type: str = CompliancePolicyType.COMPLIANCE_CURRENT_OUTSTANDING.name
@dataclass(frozen=True)
class CompliancePolicyPeriodOutstanding(CompliancePolicyBase):
"""
Compliance policy that indicates if an event has been 'closed' within
a specified time
"""
event_display_type: str
closing_event_display_type: str
time_period_seconds: int
compliance_type: str = CompliancePolicyType.COMPLIANCE_PERIOD_OUTSTANDING.name
@dataclass(frozen=True)
class CompliancePolicyDynamicTolerance(CompliancePolicyBase):
"""
Compliance policy that indicates if the average time between opening
and closing events in a specified period of time does not exceed a
specified number of standard deviations from the mean.
"""
event_display_type: str
closing_event_display_type: str
dynamic_window: int
dynamic_variability: float
compliance_type: str = CompliancePolicyType.COMPLIANCE_DYNAMIC_TOLERANCE.name
@dataclass(frozen=True)
class CompliancePolicyRichness(CompliancePolicyBase):
"""
Compliance policy that indicates if an asset has an attribute that
complies with a set of assertions.
"""
richness_assertions: "list[list]"
compliance_type: str = CompliancePolicyType.COMPLIANCE_RICHNESS.name
def dict(self):
"""Emit dictionary representation"""
d = asdict(self)
d["asset_filter"] = and_list(d["asset_filter"])
d["richness_assertions"] = and_list(d["richness_assertions"])
return d | /rkvst-archivist-0.25.2.tar.gz/rkvst-archivist-0.25.2/archivist/compliance_policy_requests.py | 0.839273 | 0.339882 | compliance_policy_requests.py | pypi |
from hexbytes import HexBytes
ELEMENT_ID_SLOTARRAY = "eip1186sp:1:sa"
ELEMENT_ID_FIELDVALUES = "eip1186sp:2:fv"
ELEMENT_ID_BYTESLIST = (
"eip1186sp:3:loba" # TODO change all these suffixes to sensibly literate values
)
class MetadataError(Exception):
"""
Raised when there is an unexpected formatting or content issue with the metadata.
"""
# pylint
class SlotArray:
"""
Helper for eip1186:1:sa
After construction the concatenation of all slot values is available on .value
The individual slot values are available in .values
No extra metadata is required for this type. However, if the number of
significant bytes for the last slot is known it can be supplied via the
`lenlast` constructor param. This does not impact the proof, that is for the
full slot contents regardless of storage layout within that slot.
This accommodates:
* a storage proof for a solidity dynamic array of bytes (string or bytes)
* a storage proof for a struct whose fields are all uint256
"""
def __init__(self, storageproofs: list, lenlast: int = None):
"""
The list of proofs in storageproof is treated a list of slots where the
raw values are the application bytes.
:param storageproof: The storageProofs field from the EIP1186 response,
its a list of merkle proofs
:param lenlast: If the number of bytes stored in the last slot is known
the last slot can be trimmed
"""
self.values = [bytes(HexBytes(proof["value"])) for proof in storageproofs]
if lenlast is not None and len(storageproofs):
self.values[-1] = self.values[-1][:lenlast]
self.value = b"".join(self.values)
class ByteArrays:
"""
Helper for eip1186:3:loba
The list of proofs in storage proof proves the presence of a *list* of byte
arrays. The metadata contains a slot count for
*each* byte array.
For example
.. code-block::
metadata: {
slots: [3, 2, 5],
lenlasts: [28, 3, 32]
}
Describes 3 distinct byte arrays. The first consumes the values for the
first 3 proofs, the second proof values 3 & 4, and the last takes the
remaining 5 proof values.
The last slot of each respective array has 28 bytes, 3 bytes and finally
exactly 32 bytes
"""
def __init__(self, storageproofs: list, metadata: dict):
"""
:param storageproofs: the list of storage proofs, typically
["proof"]["storageProof"] from the EIP 1186 response
:param metadata: the metadata for id eip1186:loba describing the layout
of the proven values
"""
# The metadata uses associative arrays rather than structured objects as
# it keeps the size of the metadata down. It also makes it more
# composable and open for extension.
if len(metadata["slots"]) != len(metadata["lenlasts"]):
raise MetadataError("mismatched slots and 'length last slot' counts")
self.arrays = []
start = 0
for count, lenlast in zip(metadata["slots"], metadata["lenlasts"]):
self.arrays.append(
SlotArray(storageproofs[start : start + count], lenlast=lenlast).value
)
start += count
class FieldValues:
"""
Helper for eip1186:2:fv
The list of proofs in storage proof proves the presence of an array of
storage slots backing a solidity ABI structure. The metadata defines the
variable field structure in those slots (but does not currently contain
the original type info)
For example:
.. code-block::
metadata: {
fields: [
{ "name": "foo", "slot": 0, "offset": 0, "size": 2},
{ "name": "bar", "slot": 0, "offset": 2, "size": 4},
{ "name": "baz", "slot": 1, "offset": 0, "size": 32}
}
}
Defines the variables foo, bar and baz. foo and bar are packed into the
first slot. Due to occupying a full 32 byte slot, baz.
Notes:
a) In the current rkvst useage fields occupied by nested slots are
typically omitted and dealt with as a special case (who & when). We
may in future allow size to be a multiple of 32 to allow for inline
nested structs.
b) the offset is from the low register address, which frustratingly
is at the right end of the word. Eg the offsets should actually be
visualized like this. (We intend to change the backend to make this
more intuitive)
`|31|30 ... 2|1|0|`
c) we plan to add the original solidity type names in a future
backend release.
"""
def __init__(self, storageproofs: list, metadata: dict):
"""Apply the metadata interpretation to the storageproofs
:param storageproofs: the list of storage proofs, typically ["proof"]["storageProof"] from the EIP 1186 response
:param metadata: the metadata for id eip1186:fv describing the layout of the proven values
"""
self._fields = {}
self._slotvalues = []
for proof in storageproofs:
# Notice: the proof values omit the 0's from the big end, so we must put them back.
self._slotvalues.append(bytes(HexBytes(proof["value"])).rjust(32, b"\x00"))
# Notice: the slot number in the metadata is the original storage slot
# relative to the base storage location of the struct.
islotvalue = 0
storageslot = 0
for field in metadata["fields"]:
# every time the storage slot changes we bump the index into our
# slotvalues array. This is a bit awkward, we will likely change the
# metadata to make it less so.
if field["slot"] != storageslot:
islotvalue += 1
slotvalue = self._slotvalues[islotvalue]
name, offset, size = field["name"], field["offset"], field["size"]
# we should do this in the backend, the low address of the 32 byte word is on the 'right'
offset = 32 - offset - size
fieldvalue = slotvalue[offset : offset + size]
self._fields[name] = dict(
islot=islotvalue,
value=fieldvalue
# we don't currently include the solidity abi type in the metadata but we may do
)
def fields(self):
"""return the list of field names"""
return list(self._fields)
def value(self, name: str):
"""
return the value of the field
"""
return self._fields[name]["value"]
def __getattr__(self, name: str):
"""
dynamic attribute access to the fields by name
"""
if name in self._fields:
return self._fields[name]["value"]
raise AttributeError(f"{name} is not a field or an attribute") | /rkvst-receipt-scitt-0.2.0a0.tar.gz/rkvst-receipt-scitt-0.2.0a0/rkvst_receipt_scitt/elementmetadata.py | 0.501709 | 0.736697 | elementmetadata.py | pypi |
from eth_utils import decode_hex
from . import trie_alg
from . import ethproofs
from . import elementmetadata
class NamedProofsMissingPayloadKey(KeyError):
"""An expected payload key was missing from the receipt contents"""
class NamedProofsMissingProof(KeyError):
"""An expected payload key was missing from the receipt contents"""
class NamedProofsMissingApplicationParameter(KeyError):
"""An expected application parameter was missing from the receipt contents[application_parameters]"""
class NamedProofs:
"""NamedProofs
Access the proven values referred to by the named proofs in an EIP1186NamedProofs receipt
"""
def __init__(self, contents, serviceparams=None):
"""
:param contents: json object representation of the trie-alg specific 'contents' of a receipt
:param serviceparams: the trusted service parameters
"""
self.contents = contents
self.serviceprams = serviceparams
# name -> the proof elements from the receipt
self._proofs = {}
# the raw proof values decoded into (closer) to application format
self._decoded = {}
@property
def proofs(self):
"""property accessor for _proofs"""
return self._proofs.copy()
@property
def decodedvalues(self):
"""property accessor for decoded proven values"""
return self._decoded.copy()
def check_payload_keys(self):
"""checks the payload has the required top level keys"""
for k in trie_alg.PAYLOAD_KEYS:
if k not in self.contents:
raise NamedProofsMissingPayloadKey(f"{k} not found in contents")
def check_application_parameters(self, *extras):
"""
Check the expected application_parameters are present
:param extras: any additional application specific parameters (described by app_id, app_content_ref)
"""
for k in trie_alg.APPLICATION_PARAMETERS + list(extras):
if k not in self.contents[trie_alg.APPLICATION_PARAMETERS_KEY]:
raise NamedProofsMissingApplicationParameter(
f"{k} not found in contents[application_parameters]"
)
def collect_proofs(self, *required):
"""
process the contents collecting each of the named proofs
Note: assumes the check methods have been called
:param required: the required set of names, there may be more but this
list is required.
"""
# Note: the format allows for multiple proofs with the same name. RKVST
# khipu proofs do not make use of that so all names are known to be
# unique.
required = set(required)
for proof in self.contents[trie_alg.NAMED_PROOFS_KEY]:
name = proof["name"]
self._proofs[name] = proof
try:
required.remove(name)
except KeyError:
pass
if required:
raise NamedProofsMissingProof(f"{', '.join(list(required))}")
def verify_proofs(self, worldroot):
"""
* If the worldroot is supplied, the presence of the contract storage account is verified
If no parameters are supplied this method simple verifies the storage
proofs are consistent with the storage roots in the proof itself.
:param worldroot: ethereum world state root from the block header
"""
# pylint: disable="unused-argument"
for name, proofelement in self._proofs.items():
try:
ethproofs.verify_eth_storage_proof(proofelement["proof"])
except ethproofs.VerifyFailed:
# pylint: disable="raise-missing-from"
raise ethproofs.VerifyFailed(f"Failed to verify {name}")
if worldroot:
ethproofs.verify_eth_account_proof(
self.contents["account"],
proofelement["proof"],
decode_hex(worldroot),
)
def decode(self):
"""
decode all the proven values using the metadata from the receipt.
typically called after verifying in order to reconstruct application
data from the proven values.
"""
for name, proofelement in self._proofs.items():
sp = proofelement["proof"]["storageProof"]
if proofelement["id"] == elementmetadata.ELEMENT_ID_SLOTARRAY:
decoded = elementmetadata.SlotArray(sp, lenlast=None)
elif proofelement["id"] == elementmetadata.ELEMENT_ID_FIELDVALUES:
decoded = elementmetadata.FieldValues(sp, proofelement["metadata"])
elif proofelement["id"] == elementmetadata.ELEMENT_ID_BYTESLIST:
decoded = elementmetadata.ByteArrays(sp, proofelement["metadata"])
self._decoded[name] = decoded
def decoded(self, name):
"""
returns the decoded value container.
Which will be a SlotArray, a FieldValues or a ByteArrays instance.
Typically, the caller will know which type to expect based on context.
"""
return self._decoded[name] | /rkvst-receipt-scitt-0.2.0a0.tar.gz/rkvst-receipt-scitt-0.2.0a0/rkvst_receipt_scitt/namedproofs.py | 0.883481 | 0.514095 | namedproofs.py | pypi |
import uuid
from datetime import datetime
import rfc3339
from eth_utils import to_checksum_address
from . import trie_alg
from .namedproofs import NamedProofs
from .attribute_decoder import (
decode_attribute_key,
decode_attribute_value,
AttributeType,
)
class KhipuReceiptMalformedAttributes(ValueError):
"""
The receipt encoding of the rkvst attributes is malformed
"""
class KhipuReceiptMalformedValue(ValueError):
"""
The receipt encoding of a storage value is not as expected
"""
EXTRA_PARAMETERS = ["monotonic_version"]
APPLICATION_PARAMETERS = trie_alg.APPLICATION_PARAMETERS + EXTRA_PARAMETERS
MANIFEST_ELEMENTS = "who_declared who_accepted essentials attribute_kindnames attribute_values when".split()
ATTRIBUTE_KINDNAMES = "attribute_kindnames"
ATTRIBUTE_VALUES = "attribute_values"
ESSENTIALS = "essentials"
ESSENTIALS_CREATOR = "creator"
ESSENTIALS_KHIPUIDENTITY = "khipuIdentity"
ESSENTIALS_ASSETIDENTITY = "assetIdentity"
WHO_ISSUER = 0
WHO_SUBJECT = 1
WHO_DISPLAY_NAME = 2
WHO_EMAIL = 3
WHEN_DECLARED = 0
WHEN_ACCEPTED = 1
WHEN_COMMITTED = 2
def _principal_from_rawstorage(rawstorage):
"""
:param rawstorage: the 4 element list of strings representing a principal
"""
return {
"issuer": rawstorage[WHO_ISSUER].decode("utf-8"),
"subject": rawstorage[WHO_SUBJECT].decode("utf-8"),
"display_name": rawstorage[WHO_DISPLAY_NAME].decode("utf-8"),
"email": rawstorage[WHO_EMAIL].decode("utf-8"),
}
def _bto3339(b: bytes, scale=1):
"""
convert a bytes array, interpreted as a big endian integer, to a utc timestamp RFC 3339
:param b: bytes
:param scale: the timestamp from the block chain has a consensus specific scale factor in the case of raft
"""
unix = int.from_bytes(b, "big") / scale
return rfc3339.rfc3339(datetime.utcfromtimestamp(unix))
def _whens_from_rawstorage(rawstorage):
"""
:param rawstorage: the 3 element list of slot values from the 'when' proof
"""
# rkvst_simplehash.V1_FIELDS (in v1.py) defines constants for these dict
# keys these in alignment with the public rkvst events api
return {
"timestamp_declared": _bto3339(rawstorage[WHEN_DECLARED]),
"timestamp_accepted": _bto3339(rawstorage[WHEN_ACCEPTED]),
# scale down by 1000,000,000. raft block time stamps are in nanoseconds
# and we need seconds to do the RFC 3339 conversion
"timestamp_committed": _bto3339(rawstorage[WHEN_COMMITTED], scale=1000000000),
}
def _u256touuid(b: bytes) -> str:
"""
convert a 32 byte value from khipu event storage to a uuid
"""
if len(b) != 32:
raise KhipuReceiptMalformedValue(
f"expected 32 bytes for a uuid storage value not {len(b)}"
)
b = b[16:] # the high bytes are zero
return uuid.UUID(int=int.from_bytes(b, "big"))
class KhipuReceipt:
"""
This class uses the EIP1186 *neutral* receipt format to encode a receipt for an RKVST 'khipu' event.
serviceparams and contents are as per draft-birkholz-scitt-receipts 2. "Common parameters" & 3. "Generic Receipt Structure".
But in essence the serviceparams identify the service and the appropriate interpretation of contents. Here,
our trie-alg is cEIP1186NamedProofs and the basic structure of the contents is:
.. code-block::
{
application_parameters: {
app_id: trusted service application identifier,
app_content_ref: trusted service application references,
element_manifest: [] the complete set of app-defined-names,
1:1 associative with named_proofs
},
block: hex-str block number the proof was read from
account: the contract account the proof was read from
named_proofs: [ list of named proofs, 1 per entry in element_manifest
{
name: app-defined-name
id: proof-element-id - one of the three trie alg intrinsics defined
in elementmetadata.py or app specific defined by app_content_ref
... one or more EIP 1186 merkle inclusion proofs and supporting
metadata
}
]
]
For serviceparams to be fully compliant we need at least two items here:
* a permanent service identifier (likely app.rkvst.io)
* the trie alg defining the format of contents, currently EIP1186NamedProofs
But the implementation simply assumes this for now.
"""
def __init__(self, contents, serviceparams=None):
"""
:param contents: this is the trie-alg "EIP1186NamedProofs" defined receipt contents
:param serviceparams: the service parameters required by draft-birkholz-scitt-receipts 2. "Common parameters"
"""
self.namedproofs = NamedProofs(contents, serviceparams=serviceparams)
def verify(self, worldroot: str = None):
"""Verify the named proofs
* If the worldroot is supplied, the presence of the contract storage account is verified
If no parameters are supplied this method simple verifies the storage
proofs are consistent with the storage roots in the proof itself.
:param worldroot: ethereum world state root from the block header
"""
# TODO: pass in stateroot and timestamp so caller can provide it from block header
self.namedproofs.check_payload_keys()
self.namedproofs.check_application_parameters()
self.namedproofs.collect_proofs(*MANIFEST_ELEMENTS)
self.namedproofs.verify_proofs(worldroot)
def decode(self):
"""decode the application values from the proof"""
# ensure we have the proofs from the contents collected
if not self.namedproofs.proofs:
self.namedproofs.collect_proofs(*MANIFEST_ELEMENTS)
self.namedproofs.decode()
# Now use RKVST API assumptions to rebuild the event and asset attributes map
kindnames = self.namedproofs.decoded(ATTRIBUTE_KINDNAMES).arrays
values = self.namedproofs.decoded(ATTRIBUTE_VALUES).arrays
if len(kindnames) != len(values):
raise KhipuReceiptMalformedAttributes(
"number of names inconsistent with number of values"
)
assetattributes = {}
eventattributes = {}
for kindname, rlpvalue in zip(kindnames, values):
kind, name = decode_attribute_key(kindname)
value = decode_attribute_value(rlpvalue)
if kind == AttributeType.ASSET:
assetattributes[name] = value
elif kind == AttributeType.EVENT:
eventattributes[name] = value
else:
raise KhipuReceiptMalformedAttributes(
f"unsupported kind '{kind}' for attribute '{name}'"
)
# Note we don't currently include the aggregate sharing policy
# attributes in the receipt. We may do in future.
essentials = self.namedproofs.decoded(ESSENTIALS)
creator = to_checksum_address(
essentials.value(ESSENTIALS_CREATOR)
) # aka from address
eventUUID = _u256touuid(essentials.value(ESSENTIALS_KHIPUIDENTITY))
assetUUID = _u256touuid(essentials.value(ESSENTIALS_ASSETIDENTITY))
# TODO: missing the khipu schema version number 'monotonicversion'
who_declared = self.namedproofs.decoded("who_declared")
who_accepted = self.namedproofs.decoded("who_accepted")
whens = _whens_from_rawstorage(self.namedproofs.decoded("when").values)
# Note: this dict is aligned with the constants and event structure we
# work with in the rkvst-simplehash-python package. see
# rkvst_simplehash.V1_FIELDS (in v1.py)
event = {
"identity": f"assets/{assetUUID}/events/{eventUUID}",
"asset_identity": f"assets/{assetUUID}",
"from": creator,
"principal_declared": _principal_from_rawstorage(who_declared.arrays),
"principal_accepted": _principal_from_rawstorage(who_accepted.arrays),
"asset_attributes": assetattributes,
"event_attributes": eventattributes,
}
event.update(whens)
return event | /rkvst-receipt-scitt-0.2.0a0.tar.gz/rkvst-receipt-scitt-0.2.0a0/rkvst_receipt_scitt/khipureceipt.py | 0.614857 | 0.314781 | khipureceipt.py | pypi |
from eth_utils import keccak, to_checksum_address
import rlp
from rlp.sedes import (
Binary,
big_endian_int,
)
from trie import HexaryTrie
from trie.exceptions import BadTrieProof
from hexbytes import HexBytes
class VerifyFailed(Exception):
"""raised if a proof verification operation fails"""
def verify_eth_account_proof(account: str, ethproof: dict, root: HexBytes):
"""
verifies the given account proof with the given root
:param dict proof: the merkle proof as per the response
from `eth_getProof`
:param HexBytes root: the state root of the block to verify the
account proof against
"""
trie_root = Binary.fixed_length(32, allow_empty=True)
hash32 = Binary.fixed_length(32)
class _Account(rlp.Serializable):
fields = [
("nonce", big_endian_int),
("balance", big_endian_int),
("storage", trie_root),
("code_hash", hash32),
]
acc = _Account(
int(ethproof["nonce"], 16),
int(ethproof["balance"], 16),
HexBytes(ethproof["storageHash"]),
HexBytes(ethproof["codeHash"]),
)
rlp_account = rlp.encode(acc)
account = to_checksum_address(account)
trie_key = keccak(hexstr=account)
proof = [rlp.decode(bytes(HexBytes(node))) for node in ethproof["accountProof"]]
try:
if rlp_account != HexaryTrie.get_from_proof(root, trie_key, proof):
raise VerifyFailed(f"Failed to verify account proof for {account}")
except BadTrieProof as e:
raise VerifyFailed(f"Failed to verify account proof for {account}") from e
def verify_eth_storage_proof(ethproof):
"""
verifies the given account proof with the given root
:param ethproof: the merkle proof as per the
response from `eth_getProof`
"""
for storage_proof in ethproof["storageProof"]:
trie_key = keccak(HexBytes(storage_proof["key"]).rjust(32, b"\x00"))
root = HexBytes(ethproof["storageHash"])
value = HexBytes(storage_proof["value"])
if value == b"\x00":
rlp_value = b""
else:
rlp_value = rlp.encode(value)
proof = [rlp.decode(bytes(HexBytes(node))) for node in storage_proof["proof"]]
if rlp_value != HexaryTrie.get_from_proof(root, trie_key, proof):
raise VerifyFailed(f"Failed to verify storage proof {storage_proof['key']}")
return True | /rkvst-receipt-scitt-0.2.0a0.tar.gz/rkvst-receipt-scitt-0.2.0a0/rkvst_receipt_scitt/ethproofs.py | 0.820362 | 0.555375 | ethproofs.py | pypi |
from importlib import resources
import logging
from copy import copy
# pylint:disable=unused-import # To prevent cyclical import errors forward referencing is used
# pylint:disable=cyclic-import # but pylint doesn't understand this feature
from typing import TYPE_CHECKING
from . import document_files
from ..testing.assets import make_assets_create, AttachmentDescription
if TYPE_CHECKING:
from archivist.archivist import Archivist
LOGGER = logging.getLogger(__name__)
def upload_attachment(arch, attachment_description: AttachmentDescription):
with resources.open_binary(document_files, attachment_description.filename) as fd:
blob = arch.attachments.upload(fd)
attachment = {
# sample-specific attr to relay attachment name
"rkvst_samples_display_name": attachment_description.attribute_name,
"arc_file_name": attachment_description.filename,
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": blob["identity"],
"arc_blob_hash_alg": blob["hash"]["alg"],
"arc_blob_hash_value": blob["hash"]["value"],
}
return attachment
def attachment_create(arch, attachment_description: AttachmentDescription):
with resources.open_binary(document_files, attachment_description.filename) as fd:
attachment = arch.attachments.upload(fd)
result = {
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": attachment["identity"],
"arc_blob_hash_alg": attachment["hash"]["alg"],
"arc_blob_hash_value": attachment["hash"]["value"],
"arc_display_name": attachment_description.attribute_name,
"arc_file_name": attachment_description.filename,
}
return result
document_creator = make_assets_create(
attachment_creator=attachment_create, confirm=True
)
class Document:
selector_key = "OnboardingSampleID"
selector_value = "DocumentLineage"
def __init__(
self,
arch: "Archivist",
display_type: str,
):
arch_ = copy(arch)
arch_.fixtures = {
"assets": {
"attributes": {
"arc_display_type": display_type,
},
},
}
self._arch = arch_
self._asset = None
self._existed = False
@property
def arch(self):
return self._arch
@property
def asset(self):
return self._asset
@property
def existed(self):
return self._existed
def create(
self,
name: str,
description: str,
*,
attachments: "list|None" = None,
custom_attrs: "dict|None" = None,
):
attrs = {
"arc_description": description,
"arc_profile": "Document",
}
if custom_attrs is not None:
attrs.update(custom_attrs)
self._asset, self._existed = document_creator(
self.arch,
name,
attrs,
attachments=attachments,
selector_key=self.selector_key,
selector_value=self.selector_value,
)
return self._asset
# Publish new version of the document
# pylint: disable=too-many-arguments
def publish(
self,
document: dict,
version: str,
description: str,
doc_hash: str,
authors: "list[dict]",
name: str,
custom_attrs: "dict|None" = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_display_type": "Publish",
"arc_description": description,
"document_version_authors": authors,
}
if custom_attrs is not None:
attrs.update(custom_attrs)
asset_attrs = {
"arc_display_name": name,
"document_document": document,
"document_hash_value": doc_hash,
"document_hash_alg": "sha256",
"document_version": version,
"document_status": "Published",
}
return self.arch.events.create(
self.asset["identity"],
props=props,
attrs=attrs,
asset_attrs=asset_attrs,
confirm=True,
)
# Withdraw version of the document
def withdraw(self, document: dict, version: str, doc_hash: str, name: str):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {"arc_display_type": "Withdraw", "document_status": "Withdrawn"}
asset_attrs = {
"arc_display_name": name,
"document_document": document,
"document_hash_value": doc_hash,
"document_version": version,
}
return self.arch.events.create(
self.asset["identity"],
props=props,
attrs=attrs,
asset_attrs=asset_attrs,
confirm=True,
) | /rkvst-samples-0.12.1.tar.gz/rkvst-samples-0.12.1/archivist_samples/document/document.py | 0.745213 | 0.205396 | document.py | pypi |
from typing import Optional
# pylint:disable=unused-import # To prevent cyclical import errors forward referencing is used
# pylint:disable=cyclic-import # but pylint doesn't understand this feature
from archivist import archivist as type_helper
from .software_package import sboms_creator
class SoftwareDeployment:
def __init__(
self,
arch: "type_helper.Archivist",
):
self._arch = arch
self._asset = None
self._attachments = None
self._environment = None
@property
def arch(self):
return self._arch
@property
def asset(self):
return self._asset
@property
def attachments(self):
return self._attachments
@property
def environment(self):
return self._environment
# Create Software Deployment
def create(
self,
sbom_name: str,
sbom_description: str,
*,
sbom_environment: Optional[str],
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
):
if sbom_environment is not None:
self._environment = sbom_environment
else:
sbom_environment = self._environment
attrs = {
"arc_description": sbom_description,
"arc_display_type": "Software Deployment",
"sbom_environment": sbom_environment,
}
if custom_attrs is not None:
attrs.update(custom_attrs)
self._asset = sboms_creator(
self.arch,
sbom_name,
attrs,
attachments=attachments,
)
return self._asset
# Installation Event
def installation(
self,
sbom_installation: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
custom_asset_attrs: Optional[dict] = None,
):
if sbom_installation["environment"] is not None:
self._environment = sbom_installation["environment"]
else:
sbom_installation["environment"] = self._environment
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_installation["description"],
"arc_evidence": "Installation",
"arc_display_type": "Installation",
"sbom_installation_component": sbom_installation["name"],
"sbom_installation_hash": sbom_installation["hash"],
"sbom_installation_version": sbom_installation["version"],
"sbom_installation_author": sbom_installation["author"],
"sbom_installation_supplier": sbom_installation["supplier"],
"sbom_installation_uuid": sbom_installation["uuid"],
"sbom_installation_environment": sbom_installation["environment"],
}
for i, attachment in enumerate(self._attachments):
attrs[f"attachment_attr_{i}"] = {
"arc_display_name": sbom_installation["description"],
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": attachment["identity"],
"arc_blob_hash_alg": attachment["hash"]["alg"],
"arc_blob_hash_value": attachment["hash"]["value"],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
asset_attrs = {
"sbom_component": sbom_installation["name"],
"sbom_hash": sbom_installation["hash"],
"sbom_version": sbom_installation["version"],
"sbom_author": sbom_installation["author"],
"sbom_supplier": sbom_installation["supplier"],
"sbom_uuid": sbom_installation["uuid"],
"sbom_environment": sbom_installation["environment"],
}
if custom_asset_attrs is not None:
asset_attrs.update(custom_asset_attrs)
return self.arch.events.create(
self._asset["identity"],
props=props,
attrs=attrs,
asset_attrs=asset_attrs,
confirm=True,
)
def decommission(
self,
sbom_decomission: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
custom_asset_attrs: Optional[dict] = None,
):
if sbom_decomission["environment"] is not None:
self._environment = sbom_decomission["environment"]
else:
sbom_decomission["environment"] = self._environment
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_decomission["description"],
"arc_evidence": "Decomission",
"arc_display_type": "Decomission",
"sbom_decomission_component": sbom_decomission["name"],
"sbom_decomission_version": sbom_decomission["version"],
"sbom_decomission_author": sbom_decomission["author"],
"sbom_decomission_supplier": sbom_decomission["supplier"],
"sbom_decomission_uuid": sbom_decomission["uuid"],
"sbom_decomission_target_date": sbom_decomission["target_date"],
"sbom_decomission_status": sbom_decomission["status"],
"sbom_decomission_environment": sbom_decomission["environment"],
}
for i, attachment in enumerate(self._attachments):
attrs[f"attachment_attr_{i}"] = {
"arc_display_name": sbom_decomission["description"],
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": attachment["identity"],
"arc_blob_hash_alg": attachment["hash"]["alg"],
"arc_blob_hash_value": attachment["hash"]["value"],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
asset_attrs = {
"sbom_decomission_target_date": sbom_decomission["target_date"],
"sbom_decomission_status": sbom_decomission["status"],
"sbom_environment": sbom_decomission["environment"],
}
if custom_asset_attrs is not None:
asset_attrs.update(custom_asset_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, asset_attrs=asset_attrs
)
# Upgrade Events
def upgrade(
self,
sbom_upgrade: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
custom_asset_attrs: Optional[dict] = None,
):
if sbom_upgrade["environment"] is not None:
self._environment = sbom_upgrade["environment"]
else:
sbom_upgrade["environment"] = self._environment
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_upgrade["description"],
"arc_evidence": "Upgrade",
"arc_display_type": "Upgrade",
"sbom_upgrade_component": sbom_upgrade["name"],
"sbom_upgrade_hash": sbom_upgrade["hash"],
"sbom_upgrade_version": sbom_upgrade["version"],
"sbom_upgrade_author": sbom_upgrade["author"],
"sbom_upgrade_supplier": sbom_upgrade["supplier"],
"sbom_upgrade_uuid": sbom_upgrade["uuid"],
"sbom_upgrade_environment": sbom_upgrade["environment"],
}
for i, attachment in enumerate(self._attachments):
attrs[f"attachment_attr_{i}"] = {
"arc_display_name": sbom_upgrade["description"],
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": attachment["identity"],
"arc_blob_hash_alg": attachment["hash"]["alg"],
"arc_blob_hash_value": attachment["hash"]["value"],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
asset_attrs = {
"sbom_component": sbom_upgrade["name"],
"sbom_hash": sbom_upgrade["hash"],
"sbom_version": sbom_upgrade["version"],
"sbom_author": sbom_upgrade["author"],
"sbom_supplier": sbom_upgrade["supplier"],
"sbom_uuid": sbom_upgrade["uuid"],
}
if custom_asset_attrs is not None:
asset_attrs.update(custom_asset_attrs)
return self.arch.events.create(
self._asset["identity"],
props=props,
attrs=attrs,
asset_attrs=asset_attrs,
confirm=True,
)
def upgrade_plan(
self,
sbom_planned: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
):
if sbom_planned["environment"] is not None:
self._environment = sbom_planned["environment"]
else:
sbom_planned["environment"] = self._environment
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_planned["description"],
"arc_evidence": "Upgrade Plan",
"arc_display_type": "Upgrade Plan",
"sbom_planned_date": sbom_planned["date"],
"sbom_planned_captain": sbom_planned["captain"],
"sbom_planned_component": sbom_planned["name"],
"sbom_planned_version": sbom_planned["version"],
"sbom_planned_reference": sbom_planned["reference"],
"sbom_planned_environment": sbom_planned["environment"],
}
for i, attachment in enumerate(self._attachments):
attrs[f"attachment_attr_{i}"] = {
"arc_display_name": sbom_planned["description"],
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": attachment["identity"],
"arc_blob_hash_alg": attachment["hash"]["alg"],
"arc_blob_hash_value": attachment["hash"]["value"],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
def upgrade_accepted(
self,
sbom_accepted: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
):
if sbom_accepted["environment"] is not None:
self._environment = sbom_accepted["environment"]
else:
sbom_accepted["environment"] = self._environment
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_accepted["description"],
"arc_evidence": "Upgrade Accepted",
"arc_display_type": "Upgrade Accepted",
"sbom_accepted_date": sbom_accepted["date"],
"sbom_accepted_captain": sbom_accepted["captain"],
"sbom_accepted_component": sbom_accepted["name"],
"sbom_accepted_version": sbom_accepted["version"],
"sbom_accepted_reference": sbom_accepted["reference"],
"sbom_accepted_environment": sbom_accepted["environment"],
}
for i, attachment in enumerate(self._attachments):
attrs[f"attachment_attr_{i}"] = {
"arc_display_name": sbom_accepted["description"],
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": attachment["identity"],
"arc_blob_hash_alg": attachment["hash"]["alg"],
"arc_blob_hash_value": attachment["hash"]["value"],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
# Rollback Events
def rollback(
self,
sbom_rollback: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
custom_asset_attrs: Optional[dict] = None,
):
if sbom_rollback["environment"] is not None:
self._environment = sbom_rollback["environment"]
else:
sbom_rollback["environment"] = self._environment
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_rollback["description"],
"arc_evidence": "Rollback",
"arc_display_type": "Rollback",
"sbom_rollback_component": sbom_rollback["name"],
"sbom_rollback_hash": sbom_rollback["hash"],
"sbom_rollback_version": sbom_rollback["version"],
"sbom_rollback_author": sbom_rollback["author"],
"sbom_rollback_supplier": sbom_rollback["supplier"],
"sbom_rollback_uuid": sbom_rollback["uuid"],
"sbom_rollback_environment": sbom_rollback["environment"],
}
for i, attachment in enumerate(self._attachments):
attrs[f"attachment_attr_{i}"] = {
"arc_display_name": sbom_rollback["description"],
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": attachment["identity"],
"arc_blob_hash_alg": attachment["hash"]["alg"],
"arc_blob_hash_value": attachment["hash"]["value"],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
asset_attrs = {
"sbom_component": sbom_rollback["name"],
"sbom_hash": sbom_rollback["hash"],
"sbom_version": sbom_rollback["version"],
"sbom_author": sbom_rollback["author"],
"sbom_supplier": sbom_rollback["supplier"],
"sbom_uuid": sbom_rollback["uuid"],
}
if custom_asset_attrs is not None:
asset_attrs.update(custom_asset_attrs)
return self.arch.events.create(
self._asset["identity"],
props=props,
attrs=attrs,
asset_attrs=asset_attrs,
confirm=True,
)
def rollback_plan(
self,
sbom_planned: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
):
if sbom_planned["environment"] is not None:
self._environment = sbom_planned["environment"]
else:
sbom_planned["environment"] = self._environment
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_planned["description"],
"arc_evidence": "Rollback Plan",
"arc_display_type": "Rollback Plan",
"sbom_planned_date": sbom_planned["date"],
"sbom_planned_captain": sbom_planned["captain"],
"sbom_planned_component": sbom_planned["name"],
"sbom_planned_version": sbom_planned["version"],
"sbom_planned_reference": sbom_planned["reference"],
"sbom_planned_environment": sbom_planned["environment"],
}
for i, attachment in enumerate(self._attachments):
attrs[f"attachment_attr_{i}"] = {
"arc_display_name": sbom_planned["description"],
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": attachment["identity"],
"arc_blob_hash_alg": attachment["hash"]["alg"],
"arc_blob_hash_value": attachment["hash"]["value"],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
def rollback_accepted(
self,
sbom_accepted: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
):
if sbom_accepted["environment"] is not None:
self._environment = sbom_accepted["environment"]
else:
sbom_accepted["environment"] = self._environment
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_accepted["description"],
"arc_evidence": "Rollback Accepted",
"arc_display_type": "Rollback Accepted",
"sbom_accepted_date": sbom_accepted["date"],
"sbom_accepted_captain": sbom_accepted["captain"],
"sbom_accepted_component": sbom_accepted["name"],
"sbom_accepted_version": sbom_accepted["version"],
"sbom_accepted_reference": sbom_accepted["reference"],
"sbom_accepted_environment": sbom_accepted["environment"],
}
for i, attachment in enumerate(self._attachments):
attrs[f"attachment_attr_{i}"] = {
"arc_display_name": sbom_accepted["description"],
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": attachment["identity"],
"arc_blob_hash_alg": attachment["hash"]["alg"],
"arc_blob_hash_value": attachment["hash"]["value"],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
# Vulnerability Events
def vuln_disclosure(
self,
vuln: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
):
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": vuln["description"],
"arc_evidence": "Vulnerability Disclosure",
"arc_display_type": "Vulnerability Disclosure",
"vuln_name": vuln["name"],
"vuln_reference": vuln["reference"],
"vuln_id": vuln["id"],
"vuln_category": vuln["category"],
"vuln_severity": vuln["severity"],
"vuln_status": vuln["status"],
"vuln_author": vuln["author"],
"vuln_target_component": vuln["target_component"],
"vuln_target_version": vuln["target_version"],
}
for i, attachment in enumerate(self._attachments):
attrs[f"attachment_attr_{i}"] = {
"arc_display_name": vuln["description"],
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": attachment["identity"],
"arc_blob_hash_alg": attachment["hash"]["alg"],
"arc_blob_hash_value": attachment["hash"]["value"],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
def vuln_update(
self,
vuln: dict,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
):
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": vuln["description"],
"arc_evidence": "Vulnerability Update",
"arc_display_type": "Vulnerability Update",
"vuln_name": vuln["name"],
"vuln_reference": vuln["reference"],
"vuln_id": vuln["id"],
"vuln_category": vuln["category"],
"vuln_severity": vuln["severity"],
"vuln_status": vuln["status"],
"vuln_author": vuln["author"],
"vuln_target_component": vuln["target_component"],
"vuln_target_version": vuln["target_version"],
}
for i, attachment in enumerate(self._attachments):
attrs[f"attachment_attr_{i}"] = {
"arc_display_name": vuln["description"],
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": attachment["identity"],
"arc_blob_hash_alg": attachment["hash"]["alg"],
"arc_blob_hash_value": attachment["hash"]["value"],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
def _add_attachments(self, attachments: list):
self._attachments = []
for attachment in attachments:
with open(f"{attachment}", "rb") as fd:
self._attachments.append(self.arch.attachments.upload(fd)) | /rkvst-samples-0.12.1.tar.gz/rkvst-samples-0.12.1/archivist_samples/software_bill_of_materials/software_deployment.py | 0.88565 | 0.225054 | software_deployment.py | pypi |
from importlib import resources
import logging
from sys import exit as sys_exit
from typing import List, Optional
from archivist import archivist as type_helper
from ..testing.assets import make_assets_create, AttachmentDescription
from . import sbom_files
LOGGER = logging.getLogger(__name__)
def attachment_create(sboms, attachment_description: AttachmentDescription):
LOGGER.info("sbom attachment creator: %s", attachment_description.filename)
with resources.open_binary(sbom_files, attachment_description.filename) as fd:
attachment = sboms.attachments.upload(fd)
result = {
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": attachment["identity"],
"arc_blob_hash_alg": attachment["hash"]["alg"],
"arc_blob_hash_value": attachment["hash"]["value"],
"arc_display_name": attachment_description.attribute_name,
"arc_file_name": attachment_description.filename,
}
return result
sboms_creator = make_assets_create(attachment_creator=attachment_create, confirm=True)
class SoftwarePackage:
def __init__(self, arch: "type_helper.Archivist"):
self._arch = arch
self._asset = None
self._existed = False
@property
def arch(self):
return self._arch
@property
def asset(self):
return self._asset
@property
def existed(self):
return self._existed
# Asset Creation
def create(
self,
sbom_name: str,
sbom_description: str,
*,
attachments: Optional[List[AttachmentDescription]] = None,
custom_attrs: Optional[dict] = None,
):
attrs = {
"arc_description": sbom_description,
"arc_display_type": "Software Package",
}
if custom_attrs is not None:
attrs.update(custom_attrs)
self._asset, self._existed = sboms_creator(
self._arch,
sbom_name,
attrs,
attachments=attachments,
)
return self._asset
# Asset load by unique identity
def read(
self,
identity: str,
):
self._asset = self.arch.assets.read(identity)
# Asset load by attribute(s)
def read_by_signature(
self,
attributes: Optional[dict],
):
# Hard-wire the Asset type
newattrs = attributes.copy()
newattrs["arc_display_type"] = "Software Package"
# Note: underlying Archivist will raise ArchivistNotFoundError or
# ArchivistDuplicateError unless this set of attributes points to
# a single unique asset
self._asset = self.arch.assets.read_by_signature(attrs=newattrs)
# Release Events
def release(
self,
sbom: dict,
*,
attachments: Optional[List[AttachmentDescription]] = None,
latest_sbom: Optional[dict] = None,
custom_attrs: Optional[dict] = None,
custom_asset_attrs: Optional[dict] = None,
):
# sbom_name: str,
# sbom_description: str,
# sbom_hash: str,
# sbom_version: str,
# sbom_author: str,
# sbom_supplier: str,
# sbom_uuid: str,
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
if latest_sbom is None:
latest_sbom = sbom
attrs = {
"arc_description": sbom["description"],
"arc_evidence": "Release",
"arc_display_type": "Release",
"sbom_component": sbom["name"],
"sbom_hash": sbom["hash"],
"sbom_version": sbom["version"],
"sbom_author": sbom["author"],
"sbom_supplier": sbom["supplier"],
"sbom_uuid": sbom["uuid"],
}
if attachments:
for attachment in attachments:
attrs[attachment.attribute_name] = attachment_create(
self.arch, attachment
)
if custom_attrs is not None:
attrs.update(custom_attrs)
asset_attrs = {
"arc_display_name": latest_sbom["name"],
"sbom_component": latest_sbom["name"],
"sbom_hash": latest_sbom["hash"],
"sbom_version": latest_sbom["version"],
"sbom_author": latest_sbom["author"],
"sbom_supplier": latest_sbom["supplier"],
"sbom_uuid": latest_sbom["uuid"],
}
if custom_asset_attrs is not None:
asset_attrs.update(custom_asset_attrs)
return self.arch.events.create(
self.asset["identity"],
props=props,
attrs=attrs,
asset_attrs=asset_attrs,
confirm=True,
)
def release_plan(
self,
sbom_planned: dict,
*,
attachments: Optional[List[AttachmentDescription]] = None,
custom_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_planned["description"],
"arc_evidence": "Release Plan",
"arc_display_type": "Release Plan",
"sbom_planned_date": sbom_planned["date"],
"sbom_planned_captain": sbom_planned["captain"],
"sbom_planned_component": sbom_planned["name"],
"sbom_planned_version": sbom_planned["version"],
"sbom_planned_reference": sbom_planned["reference"],
}
if attachments:
for attachment in attachments:
attrs[attachment.attribute_name] = attachment_create(
self.arch, attachment
)
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
def release_accepted(
self,
sbom_accepted: dict,
*,
attachments: Optional[List[AttachmentDescription]] = None,
custom_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_accepted["description"],
"arc_evidence": "Release Accepted",
"arc_display_type": "Release Accepted",
"sbom_accepted_date": sbom_accepted["date"],
"sbom_accepted_captain": sbom_accepted["captain"],
"sbom_accepted_component": sbom_accepted["name"],
"sbom_accepted_version": sbom_accepted["version"],
"sbom_accepted_approver": sbom_accepted["approver"],
"sbom_accepted_vuln_reference": sbom_accepted["reference"],
}
if attachments:
for attachment in attachments:
attrs[attachment.attribute_name] = attachment_create(
self.arch, attachment
)
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
# Patch Events
def patch(
self,
sbom_patch: dict,
*,
attachments: Optional[List[AttachmentDescription]] = None,
custom_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_patch["description"],
"arc_evidence": "Patch",
"arc_display_type": "Patch",
"sbom_patch_component": sbom_patch["target_component"],
"sbom_patch_hash": sbom_patch["hash"],
"sbom_patch_target_version": sbom_patch["target_version"],
"sbom_patch_author": sbom_patch["author"],
"sbom_patch_supplier": sbom_patch["supplier"],
"sbom_patch_uuid": sbom_patch["uuid"],
}
if attachments:
for attachment in attachments:
attrs[attachment.attribute_name] = attachment_create(
self.arch, attachment
)
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
def private_patch(
self,
sbom_patch: dict,
*,
attachments: Optional[List[AttachmentDescription]] = None,
custom_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_patch["description"],
"arc_evidence": sbom_patch["private_id"] + "_Patch",
"arc_display_type": sbom_patch["private_id"] + "_Patch",
"sbom_patch_component": sbom_patch["target_component"],
"sbom_patch_hash": sbom_patch["hash"],
"sbom_patch_version": sbom_patch["target_version"],
"sbom_patch_author": sbom_patch["author"],
"sbom_patch_supplier": sbom_patch["supplier"],
"sbom_patch_uuid": sbom_patch["uuid"],
"sbom_patch_vuln_reference": sbom_patch["reference"],
}
if attachments:
for attachment in attachments:
attrs[attachment.attribute_name] = attachment_create(
self.arch, attachment
)
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
# Vulnerability Events
def vuln_disclosure(
self,
vuln: dict,
*,
attachments: Optional[List[AttachmentDescription]] = None,
custom_attrs: Optional[dict],
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": vuln["description"],
"arc_evidence": "Vulnerability Disclosure",
"arc_display_type": "Vulnerability Disclosure",
"vuln_name": vuln["name"],
"vuln_reference": vuln["reference"],
"vuln_id": vuln["id"],
"vuln_category": vuln["category"],
"vuln_severity": vuln["severity"],
"vuln_status": vuln["status"],
"vuln_author": vuln["author"],
"vuln_target_component": vuln["target_component"],
"vuln_target_version": vuln["target_version"],
}
if attachments:
for attachment in attachments:
attrs[attachment.attribute_name] = attachment_create(
self.arch, attachment
)
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
def vuln_update(
self,
vuln: dict,
attachments: Optional[List[AttachmentDescription]] = None,
custom_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": vuln["description"],
"arc_evidence": "Vulnerability Update",
"arc_display_type": "Vulnerability Update",
"vuln_name": vuln["name"],
"vuln_reference": vuln["reference"],
"vuln_id": vuln["id"],
"vuln_category": vuln["category"],
"vuln_severity": vuln["severity"],
"vuln_status": vuln["status"],
"vuln_author": vuln["author"],
"vuln_target_component": vuln["target_component"],
"vuln_target_version": vuln["target_version"],
}
if attachments:
for attachment in attachments:
attrs[attachment.attribute_name] = attachment_create(
self.arch, attachment
)
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
# EOL/Deprecation
def deprecation(
self,
sbom_eol: dict,
*,
attachments: Optional[List[AttachmentDescription]] = None,
custom_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_eol["description"],
"arc_evidence": "Deprecation",
"arc_display_type": "Deprecation",
"sbom_eol_target_component": sbom_eol["target_component"],
"sbom_eol_target_version": sbom_eol["target_version"],
"sbom_eol_target_uuid": sbom_eol["target_uuid"],
"sbom_eol_target_date": sbom_eol["target_date"],
}
if attachments:
for attachment in attachments:
attrs[attachment.attribute_name] = attachment_create(
self.arch, attachment
)
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
) | /rkvst-samples-0.12.1.tar.gz/rkvst-samples-0.12.1/archivist_samples/software_bill_of_materials/software_package.py | 0.696062 | 0.232931 | software_package.py | pypi |
from importlib import resources
import logging
from sys import exit as sys_exit
from typing import List, Optional
from archivist import archivist as type_helper
from ..testing.assets import make_assets_create, AttachmentDescription
from . import sbom_files
LOGGER = logging.getLogger(__name__)
def attachment_create(arch, attachment_description: AttachmentDescription):
LOGGER.info("sbom attachment creator: %s", attachment_description.filename)
with resources.open_binary(sbom_files, attachment_description.filename) as fd:
attachment = arch.attachments.upload(fd)
result = {
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": attachment["identity"],
"arc_blob_hash_alg": attachment["hash"]["alg"],
"arc_blob_hash_value": attachment["hash"]["value"],
"arc_display_name": attachment_description.attribute_name,
"arc_file_name": attachment_description.filename,
}
return result
sboms_creator = make_assets_create(attachment_creator=attachment_create, confirm=True)
class SoftwarePackageDocument:
selector_key = "OnboardingSampleID"
selector_value = "SBOM"
def __init__(self, arch: "type_helper.Archivist"):
self._arch = arch
self._asset = None
self._existed = False
@property
def arch(self):
return self._arch
@property
def asset(self):
return self._asset
@property
def existed(self):
return self._existed
# Asset Creation
def create(
self,
sbom_name: str,
sbom_description: str,
*,
attachments: Optional[List[AttachmentDescription]] = None,
custom_attrs: Optional[dict] = None,
):
attrs = {
"arc_description": sbom_description,
"arc_profile": "Document",
"arc_display_type": "Software Package",
}
if custom_attrs is not None:
attrs.update(custom_attrs)
self._asset, self._existed = sboms_creator(
self._arch,
sbom_name,
attrs,
attachments=attachments,
selector_key=self.selector_key,
selector_value=self.selector_value,
)
return self._asset
# Asset load by unique identity
def read(
self,
identity: str,
):
self._asset = self.arch.assets.read(identity)
# Asset load by attribute(s)
def read_by_signature(
self,
attributes: Optional[dict],
):
# Hard-wire the Asset type
newattrs = attributes.copy()
newattrs["arc_display_type"] = "Software Package"
# Note: underlying Archivist will raise ArchivistNotFoundError or
# ArchivistDuplicateError unless this set of attributes points to
# a single unique asset
self._asset = self.arch.assets.read_by_signature(attrs=newattrs)
# Release Events
def publish(
self,
document: dict,
sbom: dict,
*,
attachments: Optional[List[AttachmentDescription]] = None,
latest_sbom: Optional[dict] = None,
custom_attrs: Optional[dict] = None,
custom_asset_attrs: Optional[dict] = None,
):
# sbom_name: str,
# sbom_description: str,
# sbom_hash: str,
# sbom_version: str,
# sbom_author: str,
# sbom_supplier: str,
# sbom_uuid: str,
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
if latest_sbom is None:
latest_sbom = sbom
author_email = f"{sbom['author']}.com"
attrs = {
"arc_description": sbom["description"],
"arc_evidence": "Release",
"arc_display_type": "Publish",
"sbom_component": sbom["name"],
"sbom_hash": sbom["hash"],
"sbom_version": sbom["version"],
"sbom_author": sbom["author"],
"sbom_supplier": sbom["supplier"],
"sbom_uuid": sbom["uuid"],
"document_version_authors": [
{"display_name": sbom["author"], "email": author_email}
],
}
if attachments:
for attachment in attachments:
attrs[attachment.attribute_name] = attachment_create(
self.arch, attachment
)
if custom_attrs is not None:
attrs.update(custom_attrs)
asset_attrs = {
"arc_display_name": latest_sbom["name"],
"document_document": document,
"document_hash_value": latest_sbom["hash"],
"document_hash_alg": "sha256",
"document_version": latest_sbom["version"],
"document_status": "Published",
"sbom_component": latest_sbom["name"],
"sbom_version": latest_sbom["version"],
"sbom_author": latest_sbom["author"],
"sbom_hash": latest_sbom["hash"],
"sbom_supplier": latest_sbom["supplier"],
"sbom_uuid": latest_sbom["uuid"],
}
if custom_asset_attrs is not None:
asset_attrs.update(custom_asset_attrs)
return self.arch.events.create(
self.asset["identity"],
props=props,
attrs=attrs,
asset_attrs=asset_attrs,
confirm=True,
)
def release_plan(
self,
sbom_planned: dict,
*,
attachments: Optional[List[AttachmentDescription]] = None,
custom_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_planned["description"],
"arc_evidence": "Release Plan",
"arc_display_type": "Release Plan",
"sbom_planned_date": sbom_planned["date"],
"sbom_planned_component": sbom_planned["name"],
"sbom_planned_version": sbom_planned["version"],
"sbom_planned_reference": sbom_planned["reference"],
}
if attachments:
for attachment in attachments:
attrs[attachment.attribute_name] = attachment_create(
self.arch, attachment
)
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
def release_accepted(
self,
sbom_accepted: dict,
*,
attachments: Optional[List[AttachmentDescription]] = None,
custom_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_accepted["description"],
"arc_evidence": "Release Accepted",
"arc_display_type": "Release Accepted",
"sbom_accepted_date": sbom_accepted["date"],
"sbom_accepted_component": sbom_accepted["name"],
"sbom_accepted_version": sbom_accepted["version"],
"sbom_accepted_vuln_reference": sbom_accepted["reference"],
}
if attachments:
for attachment in attachments:
attrs[attachment.attribute_name] = attachment_create(
self.arch, attachment
)
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
# Patch Events
def patch(
self,
sbom_patch: dict,
*,
attachments: Optional[List[AttachmentDescription]] = None,
custom_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_patch["description"],
"arc_evidence": "Patch",
"arc_display_type": "Patch",
"sbom_patch_component": sbom_patch["target_component"],
"sbom_patch_hash": sbom_patch["hash"],
"sbom_patch_target_version": sbom_patch["target_version"],
"sbom_patch_author": sbom_patch["author"],
"sbom_patch_supplier": sbom_patch["supplier"],
"sbom_patch_uuid": sbom_patch["uuid"],
}
if attachments:
for attachment in attachments:
attrs[attachment.attribute_name] = attachment_create(
self.arch, attachment
)
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
def private_patch(
self,
sbom_patch: dict,
*,
attachments: Optional[List[AttachmentDescription]] = None,
custom_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_patch["description"],
"arc_evidence": sbom_patch["private_id"] + "_Patch",
"arc_display_type": sbom_patch["private_id"] + "_Patch",
"sbom_patch_component": sbom_patch["target_component"],
"sbom_patch_hash": sbom_patch["hash"],
"sbom_patch_version": sbom_patch["target_version"],
"sbom_patch_author": sbom_patch["author"],
"sbom_patch_supplier": sbom_patch["supplier"],
"sbom_patch_uuid": sbom_patch["uuid"],
"sbom_patch_vuln_reference": sbom_patch["reference"],
}
if attachments:
for attachment in attachments:
attrs[attachment.attribute_name] = attachment_create(
self.arch, attachment
)
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
# Vulnerability Events
def vuln_disclosure(
self,
vuln: dict,
*,
attachments: Optional[List[AttachmentDescription]] = None,
custom_attrs: Optional[dict],
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": vuln["description"],
"arc_evidence": "Vulnerability Disclosure",
"arc_display_type": "Vulnerability Disclosure",
"vuln_name": vuln["name"],
"vuln_reference": vuln["reference"],
"vuln_id": vuln["id"],
"vuln_category": vuln["category"],
"vuln_severity": vuln["severity"],
"vuln_status": vuln["status"],
"vuln_author": vuln["author"],
"vuln_target_component": vuln["target_component"],
"vuln_target_version": vuln["target_version"],
}
if attachments:
for attachment in attachments:
attrs[attachment.attribute_name] = attachment_create(
self.arch, attachment
)
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
def vuln_update(
self,
vuln: dict,
attachments: Optional[List[AttachmentDescription]] = None,
custom_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": vuln["description"],
"arc_evidence": "Vulnerability Update",
"arc_display_type": "Vulnerability Update",
"vuln_name": vuln["name"],
"vuln_reference": vuln["reference"],
"vuln_id": vuln["id"],
"vuln_category": vuln["category"],
"vuln_severity": vuln["severity"],
"vuln_status": vuln["status"],
"vuln_author": vuln["author"],
"vuln_target_component": vuln["target_component"],
"vuln_target_version": vuln["target_version"],
}
if attachments:
for attachment in attachments:
attrs[attachment.attribute_name] = attachment_create(
self.arch, attachment
)
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
# EOL/Deprecation
def deprecation(
self,
sbom_eol: dict,
*,
attachments: Optional[List[AttachmentDescription]] = None,
custom_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_eol["description"],
"arc_evidence": "Deprecation",
"arc_display_type": "Deprecation",
"sbom_eol_target_component": sbom_eol["target_component"],
"sbom_eol_target_version": sbom_eol["target_version"],
"sbom_eol_target_uuid": sbom_eol["target_uuid"],
"sbom_eol_target_date": sbom_eol["target_date"],
}
if attachments:
for attachment in attachments:
attrs[attachment.attribute_name] = attachment_create(
self.arch, attachment
)
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
) | /rkvst-samples-0.12.1.tar.gz/rkvst-samples-0.12.1/archivist_samples/sbom_document/software_package.py | 0.673406 | 0.229557 | software_package.py | pypi |
# pylint: disable=missing-docstring
import logging
from ..testing.assets import assets_create_if_not_exists
from .util import asset_attachment_upload_from_file
LOGGER = logging.getLogger(__name__)
def initialise_asset_types(ac):
type_map = {}
newattachment = asset_attachment_upload_from_file(
ac, "outdoor_cctv.jpg", "image/jpg"
)
type_map["Outdoor security camera"] = newattachment
newattachment = asset_attachment_upload_from_file(
ac, "traffic_light_with_violation_camera.jpg", "image/jpg"
)
type_map["Traffic light with violation camera"] = newattachment
newattachment = asset_attachment_upload_from_file(
ac, "traffic_light.jpg", "image/jpg"
)
type_map["Traffic light"] = newattachment
newattachment = asset_attachment_upload_from_file(
ac, "street_light_controller.jpg", "image/jpg"
)
type_map["Street light controller"] = newattachment
newattachment = asset_attachment_upload_from_file(
ac, "outdoor_air_quality_meter.jpg", "image/jpg"
)
type_map["Outdoor air quality meter"] = newattachment
LOGGER.debug(type_map)
return type_map
def create_smartcity_device(
ac, locationid, displayname, displaytype, serial, description, image
):
attrs = {
"arc_firmware_version": "1.0",
"arc_serial_number": serial,
"arc_display_name": displayname,
"arc_description": description,
"arc_home_location_identity": locationid,
"arc_display_type": displaytype,
"arc_primary_image": {
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": image["identity"],
"arc_blob_hash_alg": image["hash"]["alg"],
"arc_blob_hash_value": image["hash"]["value"],
},
}
newasset = assets_create_if_not_exists(ac, attrs)
LOGGER.debug(newasset)
return newasset
def create_newmarketroad_roundabout(ac, asset_types):
# Parkside junction has:
# - 4-way traffic lights with red light violation cameras
# - 2 general CCTV stand
# - 2 streetlight controller
props = {
"display_name": "Newmarket Road Roundabout",
"description": (
"Circulatory intersection between Newmarket " "Road and East Road"
),
"latitude": 52.208479,
"longitude": 0.137648,
}
attrs = {
"intersection_type": "roundabout",
}
newlocation = ac.locations.create(props, attrs=attrs)
LOGGER.debug(newlocation)
location_identity = newlocation["identity"]
create_smartcity_device(
ac,
location_identity,
"tcl.nmr.n01",
"Traffic light with violation camera",
"vtl-x4-01",
"Traffic flow control light at Newmarket Road East entrance",
asset_types["Traffic light with violation camera"],
)
create_smartcity_device(
ac,
location_identity,
"tcl.nmr.002",
"Traffic light with violation camera",
"vtl-x4-02",
"Traffic flow control light at A1134 West entrance",
asset_types["Traffic light with violation camera"],
)
create_smartcity_device(
ac,
location_identity,
"tcl.nmr.003",
"Traffic light with violation camera",
"vtl-x4-03",
"Traffic flow control light at A603 South entrance",
asset_types["Traffic light with violation camera"],
)
create_smartcity_device(
ac,
location_identity,
"tcl.nmr.004",
"Traffic light with violation camera",
"vtl-x4-04",
"Traffic flow control light at A1134 North entrance",
asset_types["Traffic light with violation camera"],
)
create_smartcity_device(
ac,
location_identity,
"cctv-01-01",
"Outdoor security camera",
"gmr-123-01",
"East-facing camera surveying Newmarket Road",
asset_types["Outdoor security camera"],
)
create_smartcity_device(
ac,
location_identity,
"cctv-01-02",
"Outdoor security camera",
"gmr-123-02",
"West-facing camera surveying East Road",
asset_types["Outdoor security camera"],
)
create_smartcity_device(
ac,
location_identity,
"lighting.street.22c022",
"Street light controller",
"ssl-a4l-01",
"Street light controller for column ID 22c022",
asset_types["Street light controller"],
)
create_smartcity_device(
ac,
location_identity,
"lighting.street.22c023",
"Street light controller",
"ssl-a4l-02",
"Street light controller for column ID 22c023",
asset_types["Street light controller"],
)
def create_parkside_junction(ac, asset_types):
# Parkside junction has:
# - 4-way traffic lights with red light violation cameras
# - 1 general CCTV stand
# - 1 streetlight controller
props = {
"display_name": "Parkside Junction",
"description": "Box intersection between Mill Road and East Road",
"latitude": 52.202502,
"longitude": 0.131148,
}
attrs = {
"intersection_type": "box",
}
newlocation = ac.locations.create(props, attrs=attrs)
location_identity = newlocation["identity"]
create_smartcity_device(
ac,
location_identity,
"tcl.ppj.n01",
"Traffic light with violation camera",
"vtl-x4-05",
"Traffic flow control light at Mill Road South East",
asset_types["Traffic light with violation camera"],
)
create_smartcity_device(
ac,
location_identity,
"tcl.ppj.002",
"Traffic light with violation camera",
"vtl-x4-06",
"Traffic flow control light at Parkside North West",
asset_types["Traffic light with violation camera"],
)
create_smartcity_device(
ac,
location_identity,
"tcl.ppj.003",
"Traffic light with violation camera",
"vtl-x4-07",
"Traffic flow control light at A603 North East",
asset_types["Traffic light with violation camera"],
)
create_smartcity_device(
ac,
location_identity,
"tcl.ppj.004",
"Traffic light with violation camera",
"vtl-x4-08",
"Traffic flow control light at A603 South West",
asset_types["Traffic light with violation camera"],
)
create_smartcity_device(
ac,
location_identity,
"cctv-02-01",
"Outdoor security camera",
"gmr-123-03",
"Camera surveying the skate park",
asset_types["Outdoor security camera"],
)
create_smartcity_device(
ac,
location_identity,
"lighting.street.22c010",
"Street light controller",
"ssl-a4l-03",
"Street light controller for column ID 22c010",
asset_types["Street light controller"],
)
def create_drummerstreet_terminal(ac, asset_types):
# Drummer Street Bus Terminal has:
# - 1 traffic light
# - 4 general CCTV stand
# - 4 streetlight controller
# - 1 air quality meter
props = {
"display_name": "Drummer Street Terminal",
"description": "Drummer Street Bus Terminal",
"latitude": 52.205345,
"longitude": 0.123922,
}
attrs = {
"intersection_type": "terminal",
}
newlocation = ac.locations.create(props, attrs=attrs)
location_identity = newlocation["identity"]
create_smartcity_device(
ac,
location_identity,
"tcl.dst.n01",
"Traffic light",
"tl-x1-01",
"Traffic flow control light at terminal entrance",
asset_types["Traffic light"],
)
create_smartcity_device(
ac,
location_identity,
"cctv-03-01",
"Outdoor security camera",
"gmr-123-04",
"South-facing shelter camera",
asset_types["Outdoor security camera"],
)
create_smartcity_device(
ac,
location_identity,
"cctv-03-02",
"Outdoor security camera",
"gmr-123-05",
"North-facing shelter camera",
asset_types["Outdoor security camera"],
)
create_smartcity_device(
ac,
location_identity,
"cctv-03-03",
"Outdoor security camera",
"gmr-123-06",
"Safety camera surveying turning area",
asset_types["Outdoor security camera"],
)
create_smartcity_device(
ac,
location_identity,
"cctv-04-04",
"Outdoor security camera",
"gmr-123-07",
"Safety camera surveying public lavatories",
asset_types["Outdoor security camera"],
)
create_smartcity_device(
ac,
location_identity,
"lighting.street.22c106",
"Street light controller",
"ssl-a4l-04",
"Street light controller for column ID 22c106",
asset_types["Street light controller"],
)
create_smartcity_device(
ac,
location_identity,
"lighting.street.22c108",
"Street light controller",
"ssl-a4l-05",
"Street light controller for column ID 22c108",
asset_types["Street light controller"],
)
create_smartcity_device(
ac,
location_identity,
"lighting.street.22c110",
"Street light controller",
"ssl-a4l-06",
"Street light controller for column ID 22c110",
asset_types["Street light controller"],
)
create_smartcity_device(
ac,
location_identity,
"lighting.street.22c112",
"Street light controller",
"ssl-a4l-07",
"Street light controller for column ID 22c112",
asset_types["Street light controller"],
)
create_smartcity_device(
ac,
location_identity,
"airqualmet00",
"Outdoor air quality meter",
"tm-1417-a61",
"Pedstrian safety air quality meter at Drummer Street bus shelter",
asset_types["Outdoor air quality meter"],
)
def create_catholicchurch_junction(ac, asset_types):
# Catholic Church Junction has:
# - 4-way traffic light
# - 1 streetlight controller
# - 1 air quality monitor
props = {
"display_name": "Catholic Church Junction",
"description": (
"Junction of Lensfield Road and Hills Road at the "
"Church of Our Lady and the English Martyrs"
),
"latitude": 52.199308,
"longitude": 0.127378,
}
attrs = {
"intersection_type": "cross",
}
newlocation = ac.locations.create(props, attrs=attrs)
location_identity = newlocation["identity"]
create_smartcity_device(
ac,
location_identity,
"tcl.ccj.001",
"Traffic light",
"vtl-x4-05",
"Traffic flow control light at Hills Road South East",
asset_types["Traffic light"],
)
create_smartcity_device(
ac,
location_identity,
"tcl.ccj.002",
"Traffic light",
"vtl-x4-06",
"Traffic flow control light at Regent Street North West",
asset_types["Traffic light"],
)
create_smartcity_device(
ac,
location_identity,
"tcl.ccj.003",
"Traffic light",
"vtl-x4-07",
"Traffic flow control light at A603 North East",
asset_types["Traffic light"],
)
create_smartcity_device(
ac,
location_identity,
"tcl.ccj.004",
"Traffic light",
"vtl-x4-08",
"Traffic flow control light at A603 South West",
asset_types["Traffic light"],
)
create_smartcity_device(
ac,
location_identity,
"lighting.street.22c045",
"Street light controller",
"ssl-a4l-08",
"Street light controller for column ID 22c045",
asset_types["Street light controller"],
)
create_smartcity_device(
ac,
location_identity,
"airqualmet01",
"Outdoor air quality meter",
"tm-1416-a61",
(
"Pedstrian safety air quality meter at the Church of "
"Our Lady and the English Martyrs"
),
asset_types["Outdoor air quality meter"],
)
def initialise_all(ac):
LOGGER.info("Creating data for Synsation Services Smart City...")
# Unlike the others, the smartcity scenario is not randomly created
# and distributed, and does not allow changing the number of locations
# and so on. Everything is planned and fixed in place
asset_types = initialise_asset_types(ac)
create_newmarketroad_roundabout(ac, asset_types)
create_parkside_junction(ac, asset_types)
create_drummerstreet_terminal(ac, asset_types)
create_catholicchurch_junction(ac, asset_types)
LOGGER.info("Smart City data initialized") | /rkvst-samples-0.12.1.tar.gz/rkvst-samples-0.12.1/archivist_samples/synsation/synsation_smartcity.py | 0.557604 | 0.276776 | synsation_smartcity.py | pypi |
# pylint: disable=missing-docstring
import logging
import random
import string
from ..testing.assets import assets_create_if_not_exists
from .util import asset_attachment_upload_from_file
LOGGER = logging.getLogger(__name__)
def initialise_asset_types(ac):
type_map = {}
newattachment = asset_attachment_upload_from_file(
ac, "small_ev_charger.jpg", "image/jpg"
)
type_map["Small EV Charger"] = newattachment
newattachment = asset_attachment_upload_from_file(
ac, "large_ev_charger.jpg", "image/jpg"
)
type_map["Large EV Charger"] = newattachment
LOGGER.debug(type_map)
return type_map
def make_charger_location(ac, displayname, description, plat, plong):
props = {
"display_name": displayname,
"description": description,
"latitude": float(plat),
"longitude": float(plong),
}
newlocation = ac.locations.create(props)
return newlocation
def make_charger_asset(
ac, displayname, serial, description, image, loc_id, charger_type
):
attrs = {
"arc_firmware_version": "1.0",
"arc_serial_number": serial,
"arc_display_name": displayname,
"arc_description": description,
"arc_home_location_identity": loc_id,
"arc_display_type": "EV charging station",
"synsation_ev_charger_type": charger_type,
"arc_primary_image": {
"arc_display_name": "arc_primary_image",
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": image["identity"],
"arc_blob_hash_alg": image["hash"]["alg"],
"arc_blob_hash_value": image["hash"]["value"],
},
}
newasset = assets_create_if_not_exists(ac, attrs)
return newasset
def create_charging_stations(ac, stations, airport_code, charger_type, attachment):
serialrand = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(8)
)
for i, station in enumerate(stations):
displayname = f"{airport_code}-{airport_code}-{i}"
description = f"{charger_type} charging station at {airport_code}, position {i}"
serial = f"evc-{serialrand}-{i}"
newlocation = make_charger_location(
ac, displayname, description, station[0], station[1]
)
make_charger_asset(
ac,
displayname,
serial,
description,
attachment,
newlocation["identity"],
charger_type,
)
def initialise_all(ac):
asset_types = initialise_asset_types(ac)
# San Francisco International
stations = [
["37.635647", "-122.399518"],
["37.635536", "-122.399464"],
["37.635366", "-122.399389"],
["37.635230", "-122.399292"],
["37.635089", "-122.399215"],
["37.634936", "-122.399140"],
["37.634562", "-122.400299"],
["37.634825", "-122.400460"],
["37.634689", "-122.400374"],
]
create_charging_stations(
ac, stations, "SFO", "Large EV Charger", asset_types["Large EV Charger"]
)
# San Jose
stations = [
["37.362388", "-121.922858"],
["37.362264", "-121.922705"],
["37.362128", "-121.922576"],
["37.362004", "-121.922431"],
["37.361873", "-121.922288"],
]
create_charging_stations(
ac, stations, "SJC", "Large EV Charger", asset_types["Large EV Charger"]
)
# JFK
stations = [
["40.661593", "-73.793409"],
["40.661567", "-73.792591"],
["40.663480", "-73.791990"],
["40.663618", "-73.793353"],
]
create_charging_stations(
ac, stations, "JFK", "Large EV Charger", asset_types["Large EV Charger"]
)
# Chicago O'Hare
stations = [
["41.990217", "-87.884960"],
["41.990527", "-87.884964"],
["41.990539", "-87.884505"],
["41.990220", "-87.884505"],
["41.990218", "-87.884266"],
["41.990527", "-87.884271"],
["41.990535", "-87.883828"],
["41.990220", "-87.883809"],
]
create_charging_stations(
ac, stations, "ORD", "Large EV Charger", asset_types["Large EV Charger"]
)
# Chicago Midway
stations = [["41.778129", "-87.749422"], ["41.777948", "-87.749397"]]
create_charging_stations(
ac, stations, "MDW", "Small EV Charger", asset_types["Small EV Charger"]
)
LOGGER.info("Synsation Industries EV charger data initialized") | /rkvst-samples-0.12.1.tar.gz/rkvst-samples-0.12.1/archivist_samples/synsation/synsation_industries.py | 0.485112 | 0.216663 | synsation_industries.py | pypi |
# pylint: disable=missing-docstring
# pylint: disable=too-many-arguments
import string
import random
from ..testing.assets import make_assets_create, AttachmentDescription
from .util import asset_attachment_upload_from_file
def attachment_create(arch, attachment_description: AttachmentDescription):
attachment = asset_attachment_upload_from_file(
arch, attachment_description.filename, "image/jpg"
)
result = {
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": attachment["identity"],
"arc_blob_hash_alg": attachment["hash"]["alg"],
"arc_blob_hash_value": attachment["hash"]["value"],
"arc_file_name": attachment_description.filename,
}
return result
crates_creator = make_assets_create(attachment_creator=attachment_create, confirm=True)
def initialise_asset_types():
type_map = {}
type_map["Shipping Crate"] = "crate.jpg"
return type_map
def create_locations():
locations = {}
# According to wikipedia, the canonical location of
# Flint, Michigan is 43° 1′ 8″ N, 83° 41′ 36″ W
displayname = "Flint Manufacturing Center"
locations[displayname] = {
"props": {
"display_name": displayname,
"description": "Global Headquarters",
"latitude": 43.018889,
"longitude": -83.693333,
},
"attrs": {
"address": "Flint 48501, Michigan",
"Facility Type": "Manufacturing",
"reception_email": "reception_FM@synsation.io",
"reception_phone": "+1 (810) 123-4567",
},
}
# According to wikipedia, the canonical location of
# Stuttgart is 48° 47′ 0″ N, 9° 11′ 0″ E
displayname = "Stuttgart Finishing Plant"
locations[displayname] = {
"props": {
"display_name": displayname,
"description": "European Distribution Center",
"latitude": 48.783333,
"longitude": 9.183333,
},
"attrs": {
"address": "70173 Stuttgart, Germany",
"Facility Type": "Manufacturing",
"reception_email": "reception_ST@synsation.io",
"reception_phone": "+49 (711) 123-456",
},
}
return locations
def create_shipping_crate(
arch, name, serial, description, track_id, image, loc_id, capacity
):
newasset, existed = crates_creator(
arch,
name,
{
"arc_firmware_version": "1.0",
"arc_serial_number": serial,
"arc_description": description,
"arc_display_type": "Widget shipping crate",
"synsation_crate_tracking_id": track_id,
"synsation_crate_capacity": capacity,
},
location=loc_id,
attachments=[
AttachmentDescription(image, "arc_primary_image"),
],
)
return newasset, existed
def initialise_all(arch):
asset_types = initialise_asset_types()
manufacturing_locations = create_locations()
# Create a single crate to demonstrate mobile assets use case
tracking_id = "FLINT-" + "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(12)
)
batch_num = "2019x" + "".join(random.choice(string.digits) for _ in range(8))
displayname = "Flint-SMC Shipping Crate"
description = f"Small crate for batch {batch_num}, capacity 500"
return create_shipping_crate(
arch,
displayname,
batch_num,
description,
tracking_id,
asset_types["Shipping Crate"],
manufacturing_locations["Flint Manufacturing Center"],
"500",
) | /rkvst-samples-0.12.1.tar.gz/rkvst-samples-0.12.1/archivist_samples/synsation/synsation_manufacturing.py | 0.696475 | 0.328583 | synsation_manufacturing.py | pypi |
# pylint: disable=missing-docstring
# pylint: disable=logging-fstring-interpolation
from datetime import datetime, timezone
import logging
from sys import exit as sys_exit
from sys import stdout as sys_stdout
from archivist import about
from archivist.timestamp import parse_timestamp
from ..testing.archivist_parser import common_parser
from ..testing.asset import (
MAINTENANCE_PERFORMED,
MAINTENANCE_REQUEST,
VULNERABILITY_ADDRESSED,
VULNERABILITY_REPORT,
)
from ..testing.parser import common_endpoint
LOGGER = logging.getLogger(__name__)
def analyze_matched_pairs(label, p1, p2, events):
if p1 in events and p2 in events:
matched = set(events[p1]).intersection(events[p2])
unmatched = set(events[p1]).difference(events[p2])
LOGGER.info(f"There are {len(matched)} completed {label} events")
for cv in matched:
# Check how long it was outstanding
time_req = parse_timestamp(events[p1][cv]["timestamp_declared"])
time_resp = parse_timestamp(events[p2][cv]["timestamp_declared"])
response_time = time_resp - time_req
LOGGER.info(f" --> Response time: {response_time}")
LOGGER.info(
f"There are {len(unmatched)} uncompleted {label} events outstanding"
)
# Check how long it has been outstanding
now = datetime.now(timezone.utc)
for cv in unmatched:
time_req = parse_timestamp(events[p1][cv]["timestamp_declared"])
outstanding_time = now - time_req
LOGGER.info(f" --> Outstanding for {outstanding_time}")
else:
LOGGER.info(f"There are NO {label} events to analyse")
def analyze_asset(conn, asset):
# Fetch basic asset info. If any of these fields is missing it's fatal...
try:
aid = asset["identity"]
attrs = asset["attributes"]
aname = attrs["arc_display_name"]
atype = attrs["arc_display_type"]
aversion = attrs["arc_firmware_version"]
aserial = attrs["arc_serial_number"]
adesc = attrs["arc_description"]
except KeyError:
# Some devices won't have this property. Just ignore failures.
LOGGER.error("Malformed Asset.")
return
LOGGER.info("<---------------------------------------->")
LOGGER.info(f"Analyzing {atype} '{aname}' (serial # {aserial})")
LOGGER.info(f'"{adesc}"')
LOGGER.info(f"Current Firmware Version: {aversion}")
# Get all the events for this device
number_of_events = conn.events.count(asset_id=aid)
if number_of_events == 0:
LOGGER.debug("No events found for asset")
LOGGER.info("No events to analyse.")
return
allevents = conn.events.list(asset_id=aid)
# Sort the events into paired buckets that we care about, keyed on
# the events' "correlation_value". Only works for unique pairs of
# correlation values, which is the suggested convention but not
# enforced by Archivist services
sortedevents = {}
for event in allevents:
try:
etype = event["event_attributes"]["arc_display_type"]
corval = event["event_attributes"]["arc_correlation_value"]
except KeyError:
LOGGER.debug("Couldn't get essential info for this event.")
continue
if etype not in sortedevents:
sortedevents[etype] = {}
sortedevents[etype][corval] = event
# Now we've got them all we can do the analysis
# + Which events weren't fixed at all?
# + For events that were fixed, how long did it take?
# maintenance events
analyze_matched_pairs(
"maintenance", MAINTENANCE_REQUEST, MAINTENANCE_PERFORMED, sortedevents
)
# vulnerability events
analyze_matched_pairs(
"firmware", VULNERABILITY_REPORT, VULNERABILITY_ADDRESSED, sortedevents
)
# Summarize TBD
LOGGER.info("---")
def run(arch, args):
LOGGER.info("Using version %s of rkvst-archivist", about.__version__)
LOGGER.info("Fetching use case test assets namespace %s", args.namespace)
for asset in arch.assets.list():
analyze_asset(arch, asset)
LOGGER.info("Done.")
sys_exit(0)
def entry():
parser = common_parser("Checks maintenance and update performance for assets")
parser.add_argument(
"--namespace",
type=str,
dest="namespace",
action="store",
default="synsation",
help="namespace of item population (to enable parallel demos",
)
args = parser.parse_args()
arch = common_endpoint("synsation", args)
run(arch, args)
parser.print_help(sys_stdout)
sys_exit(1) | /rkvst-samples-0.12.1.tar.gz/rkvst-samples-0.12.1/archivist_samples/synsation/analyze.py | 0.412175 | 0.245209 | analyze.py | pypi |
# pylint: disable=missing-docstring
import logging
import random
import time
from ..testing.assets import make_assets_create, AttachmentDescription
from .util import (
asset_attachment_upload_from_file,
locations_from_yaml_file,
)
LOGGER = logging.getLogger(__name__)
def attachment_create(arch, attachment_description: AttachmentDescription):
attachment = asset_attachment_upload_from_file(
arch, attachment_description.filename, "image/jpg"
)
result = {
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": attachment["identity"],
"arc_blob_hash_alg": attachment["hash"]["alg"],
"arc_blob_hash_value": attachment["hash"]["value"],
"arc_file_name": attachment_description.filename,
}
return result
machines_creator = make_assets_create(
attachment_creator=attachment_create, confirm=False
)
def initialise_asset_types():
type_map = {}
type_map["Multifunction Printer"] = "multifunction_printer.jpg"
type_map["Connected Coffee Machine"] = "coffee_machine.jpg"
type_map["Security Camera"] = "black_cctv.jpg"
LOGGER.debug(type_map)
return type_map
def create_locations():
corporation_locations = {}
newlocation = locations_from_yaml_file("grayslake.yaml")
corporation_locations[newlocation["props"]["display_name"]] = newlocation
newlocation = locations_from_yaml_file("baltimore.yaml")
corporation_locations[newlocation["props"]["display_name"]] = newlocation
newlocation = locations_from_yaml_file("european.yaml")
corporation_locations[newlocation["props"]["display_name"]] = newlocation
newlocation = locations_from_yaml_file("asia.yaml")
corporation_locations[newlocation["props"]["display_name"]] = newlocation
newlocation = locations_from_yaml_file("za.yaml")
corporation_locations[newlocation["props"]["display_name"]] = newlocation
LOGGER.debug(corporation_locations)
return corporation_locations
def create_assets(arch, asset_types, locations, num_assets, timedelay):
corporation_assets = {}
for i in range(num_assets):
displaytype = random.choice(list(asset_types))
safetype = displaytype.replace(" ", "").lower()
displayname = f"synsation.assets.{safetype}_{i}"
description = (
f"This is my {displaytype}. There are many like it, "
f"but this one is #{i}"
)
location = "Cape Town" # reserved location
while location == "Cape Town":
location = random.choice(list(locations))
location = locations[location]
newasset, _ = machines_creator(
arch,
displayname,
{
"arc_description": description,
"arc_firmware_version": "1.0",
"arc_serial_number": "f867662g.1",
"arc_display_type": displaytype,
},
location=location,
attachments=[
AttachmentDescription(asset_types[displaytype], "arc_primary_image"),
],
)
corporation_assets[displayname] = newasset["identity"]
time.sleep(timedelay)
LOGGER.debug(corporation_assets)
return corporation_assets
def initialise_all(ac, num_assets, timedelay):
LOGGER.info("Creating data for Synsation Corporation...")
asset_types = initialise_asset_types()
locations = create_locations()
assets = create_assets(ac, asset_types, locations, num_assets, timedelay)
LOGGER.info(
"%d assets of %d different types created across %d locations.",
len(assets),
len(asset_types),
len(locations),
) | /rkvst-samples-0.12.1.tar.gz/rkvst-samples-0.12.1/archivist_samples/synsation/synsation_corporation.py | 0.454714 | 0.222806 | synsation_corporation.py | pypi |
from importlib import resources
import logging
from copy import copy
from typing import Optional
# pylint:disable=unused-import # To prevent cyclical import errors forward referencing is used
# pylint:disable=cyclic-import # but pylint doesn't understand this feature
from archivist import archivist as type_helper
from ..testing.assets import make_assets_create, AttachmentDescription
from . import wipp_files
LOGGER = logging.getLogger(__name__)
def upload_attachment(arch, attachment_description: AttachmentDescription):
with resources.open_binary(wipp_files, attachment_description.filename) as fd:
blob = arch.attachments.upload(fd)
attachment = {
# sample-specific attr to relay attachment name
"rkvst_samples_display_name": attachment_description.attribute_name,
"arc_file_name": attachment_description.filename,
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": blob["identity"],
"arc_blob_hash_alg": blob["hash"]["alg"],
"arc_blob_hash_value": blob["hash"]["value"],
}
return attachment
def attachment_create(arch, attachment_description: AttachmentDescription):
with resources.open_binary(wipp_files, attachment_description.filename) as fd:
attachment = arch.attachments.upload(fd)
result = {
"arc_attribute_type": "arc_attachment",
"arc_blob_identity": attachment["identity"],
"arc_blob_hash_alg": attachment["hash"]["alg"],
"arc_blob_hash_value": attachment["hash"]["value"],
"arc_display_name": attachment_description.attribute_name,
"arc_file_name": attachment_description.filename,
}
return result
wipp_creator = make_assets_create(attachment_creator=attachment_create, confirm=True)
class Wipp:
def __init__(
self,
arch: "type_helper.Archivist",
display_type: str,
):
arch_ = copy(arch)
arch_.fixtures = {
"assets": {
"attributes": {
"arc_display_type": display_type,
},
},
}
self._arch = arch_
self._asset = None
self._existed = False
@property
def arch(self):
return self._arch
@property
def asset(self):
return self._asset
@property
def existed(self):
return self._existed
def create(
self,
name: str,
description: str,
serial: str,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
):
attrs = {
"arc_description": description,
"arc_serial_number": serial,
}
if custom_attrs is not None:
attrs.update(custom_attrs)
self._asset, self._existed = wipp_creator(
self.arch,
name,
attrs,
attachments=attachments,
)
return self._asset
# Assset load by unique identity
def read(
self,
identity: str,
):
self._asset = self._arch.assets.read(identity)
# Asset load by attributes(s)
def read_by_signature(
self,
attributes: Optional[dict],
):
# Hard-wire the Asset type
newattrs = attributes.copy()
newattrs["arc_display_type"] = "55 gallon drum"
# Note: underlying Archivist will reaise ArchivistNotFoundError or
# ArchivistDuplicateError unless this set of attributes points to
# a single unique asset
self._asset = self._arch.assets.read_by_signature(attrs=newattrs)
# Drum Characerize Events
def characterize(
self,
wipp: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
custom_asset_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_display_type": "WO Characterize",
"arc_description": wipp["description"],
"arc_evidence": "N/A",
}
safe_attachments = attachments or []
for attachment in safe_attachments:
attrs[attachment["rkvst_samples_display_name"]] = attachment
if custom_attrs is not None:
attrs.update(custom_attrs)
asset_attrs = {
"wipp_weight": wipp["weight"],
"wipp_a2fraction_characterized": wipp["a2fraction_characterized"],
"wipp_activity_characterized": wipp["activity_characterized"],
"wipp_total_characterized": wipp["total_characterized"],
}
if custom_asset_attrs is not None:
asset_attrs.update(custom_asset_attrs)
return self.arch.events.create(
self.asset["identity"],
props=props,
attrs=attrs,
asset_attrs=asset_attrs,
confirm=True,
)
# Tomography Events
def tomography(
self,
wipp_tom: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
custom_asset_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_display_type": "WO Confirmation",
"arc_description": wipp_tom["description"],
"arc_evidence": "Radiograph attached",
}
safe_attachments = attachments or []
for attachment in safe_attachments:
attrs[attachment["rkvst_samples_display_name"]] = attachment
if custom_attrs is not None:
attrs.update(custom_attrs)
asset_attrs = {
"wipp_weight": wipp_tom["weight"],
"wipp_a2fraction_confirmed": wipp_tom["a2fraction_confirmed"],
"wipp_activity_confirmed": wipp_tom["activity_confirmed"],
"wipp_total_confirmed": wipp_tom["total_confirmed"],
}
if custom_asset_attrs is not None:
asset_attrs.update(custom_asset_attrs)
return self.arch.events.create(
self.asset["identity"],
props=props,
attrs=attrs,
asset_attrs=asset_attrs,
confirm=True,
)
# Loading Events
def loading(
self,
wipp_load: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
custom_asset_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_display_type": "WO Loading",
"arc_description": wipp_load["description"],
"arc_evidence": "Loading placement image attached",
}
safe_attachments = attachments or []
for attachment in safe_attachments:
attrs[attachment["rkvst_samples_display_name"]] = attachment
if custom_attrs is not None:
attrs.update(custom_attrs)
asset_attrs = {
"wipp_container": wipp_load["container"],
}
if custom_asset_attrs is not None:
asset_attrs.update(custom_asset_attrs)
return self.arch.events.create(
self.asset["identity"],
props=props,
attrs=attrs,
asset_attrs=asset_attrs,
confirm=True,
)
# Pre-Shipping Events
def preshipping(
self,
wipp_preship: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_display_type": "WO Preship Inspection",
"arc_description": wipp_preship["description"],
"arc_evidence": "Image attached",
}
safe_attachments = attachments or []
for attachment in safe_attachments:
attrs[attachment["rkvst_samples_display_name"]] = attachment
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self.asset["identity"],
props=props,
attrs=attrs,
confirm=True,
)
# Departure Events
def departure(
self,
wipp_dep: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_display_type": "WO Transit",
"arc_description": wipp_dep["description"],
"arc_evidence": "Routing instructions in attachments",
}
safe_attachments = attachments or []
for attachment in safe_attachments:
attrs[attachment["rkvst_samples_display_name"]] = attachment
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self.asset["identity"],
props=props,
attrs=attrs,
confirm=True,
)
# Waypoint Events
def waypoint(
self,
wipp_way: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_display_type": "WO Transit",
"arc_description": wipp_way["description"],
"arc_evidence": "Signature: 0x1234abcd",
"arc_gis_lat": wipp_way["latitude"],
"arc_gis_lng": wipp_way["longitude"],
}
safe_attachments = attachments or []
for attachment in safe_attachments:
attrs[attachment["rkvst_samples_display_name"]] = attachment
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self.asset["identity"],
props=props,
attrs=attrs,
confirm=True,
)
# Arrival Events
def arrival(
self,
wipp_arr: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_display_type": "WO Transit",
"arc_description": wipp_arr["description"],
"arc_evidence": "Routing instructions in attachments",
}
safe_attachments = attachments or []
for attachment in safe_attachments:
attrs[attachment["rkvst_samples_display_name"]] = attachment
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self.asset["identity"],
props=props,
attrs=attrs,
confirm=True,
)
# Unloading Events
def unloading(
self,
wipp_unload: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
custom_asset_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_display_type": "WO Unloading",
"arc_description": wipp_unload["description"],
"arc_evidence": "Packing image attached",
}
safe_attachments = attachments or []
for attachment in safe_attachments:
attrs[attachment["rkvst_samples_display_name"]] = attachment
if custom_attrs is not None:
attrs.update(custom_attrs)
asset_attrs = {}
if custom_asset_attrs is not None:
asset_attrs.update(custom_asset_attrs)
return self.arch.events.create(
self.asset["identity"],
props=props,
attrs=attrs,
asset_attrs=asset_attrs,
confirm=True,
)
# Emplacement Events
def emplacement(
self,
wipp_emplace: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
custom_asset_attrs: Optional[dict] = None,
):
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_display_type": "WO Emplacement",
"arc_description": wipp_emplace["description"],
"arc_evidence": "Packing image attached",
}
safe_attachments = attachments or []
for attachment in safe_attachments:
attrs[attachment["rkvst_samples_display_name"]] = attachment
if custom_attrs is not None:
attrs.update(custom_attrs)
asset_attrs = {
"wipp_emplacement_location": wipp_emplace["location"],
}
if custom_asset_attrs is not None:
asset_attrs.update(custom_asset_attrs)
return self.arch.events.create(
self.asset["identity"],
props=props,
attrs=attrs,
asset_attrs=asset_attrs,
confirm=True,
) | /rkvst-samples-0.12.1.tar.gz/rkvst-samples-0.12.1/archivist_samples/wipp/wipp.py | 0.888221 | 0.202423 | wipp.py | pypi |
from abc import ABC, abstractmethod
import numpy as np
class BasePolicy(ABC):
"""
A basic policy for tabular agents.
The policy is a function that maps a state to an action.
"""
@abstractmethod
def __call__(self, q_values):
"""Select an action for the current timestep.
This method returns the action selected by the current policy
Parameters
----------
q_values : numpy.ndarray(float, ndim=1)
Q-value of each action.
Returns
-------
int
Index of chosen action.
"""
@abstractmethod
def update(self):
"""Update the policy.
"""
@abstractmethod
def get_values(self, q_values):
"""Return the probabilities associated with each action.
Parameters
----------
q_values : numpy.ndarray(float, ndim=1)
Q-value of each action.
Returns
-------
numpy.ndarray(float, ndim=1)
Probabilities associated with each action.
"""
class EGreedyPolicy(BasePolicy):
"""Epsilon-greedy policy for tabular agents.
Parameters
----------
epsilon : float
Epsilon parameter to control the exploration-exploitation trade-off.
Attributes
----------
epsilon : float
Exploration-exploitation parameter.
"""
def __init__(self, epsilon):
"""Instantiate a `EGreedyPolicy` object.
Parameters
----------
epsilon : float
Epsilon parameter to control the
exploration-exploitation trade-off.
"""
# Check for valid values:
if (epsilon >= 1) or (epsilon <= 0):
raise ValueError("Invalid value for epsilon, 0 <= epsilon <= 1")
self.epsilon = epsilon
def __call__(self, q_values):
r"""Select an action based on the :math:`\epsilon`-greedy policy.
Parameters
----------
q_values : numpy.ndarray(float, ndim=1)
Line from the Q-Table, corresponding to Q-values
for the chosen state.
Returns
-------
int
Chosen action index
"""
# Explorarion case:
if np.random.rand() < self.epsilon:
a_idx = np.random.randint(low=0, high=q_values.size)
# Exploitation case:
else:
a_idx = np.argmax(q_values)
return a_idx
def update(self):
pass
def get_values(self, q_values):
# Probababilites of exploration:
output = np.ones(q_values.size) * self.epsilon / q_values.size
# Add the exploitation probability:
output[q_values.argmax()] += 1 - self.epsilon
return output
class EDecreasePolicy(EGreedyPolicy):
"""Decreasing epsilon-greedy policy for tabular agents.
Parameters
----------
epsilon : float
Epsilon parameter to control the exploration-exploitation trade-off.
epsilon_min : float
Minimum epsilon acceptable
decay : float
Decay for epsilon: epsilon <- epsilon * decay
Attributes
----------
epsilon_min : float
decay : float
"""
def __init__(self, epsilon, epsilon_min, decay):
super().__init__(epsilon)
self.epsilon_min = epsilon_min
self.decay = decay
# The call method is the same as the epsilon-greedy
def update(self):
if self.epsilon > self.epsilon_min:
if self.epsilon * self.decay > self.epsilon_min:
self.epsilon = self.epsilon * self.decay
else:
self.epsilon = self.epsilon_min
# The get_values method is the same as the epsilon-greedy
class BoltzmanPolicy(BasePolicy):
r"""Boltzman(Softmax) policy for tabular agents.
.. math::
\begin{equation}
\pi\left(s, a\right) = \frac{e^{Q\left(s, a\right) / T}}
{\sum_{i=1}^{m} e^{Q\left(s, a_i\right)/ T}}
\end{equation}
Attributes
----------
temperature : float
Temperature parameter to control the
exploration-exploitation trade-off.
"""
def __init__(self, temperature):
"""Instantiate a `BoltzmanPolicy` object.
Parameters
----------
temperature : float
Temperature parameter to control the
exploration-exploitation trade-off.
"""
# Check for valid values:
if temperature < 0:
raise ValueError("Invalid temperature value, T >= 0")
self.temperature = temperature
def __call__(self, q_values):
r"""Select an action based on the Boltzman policy.
.. math::
\pi\left(s, a\right) = \frac{e^{Q\left(s, a\right) / T}}
{\sum_{i=1}^{m} e^{Q\left(s, a_i\right)/ T}}
Parameters
----------
q_values : numpy.ndarray(float, ndim=1)
Line from the Q-Table, corresponding to Q-values
for the chosen state.
Returns
-------
int
Chosen action index
"""
# Exponential:
e_x = np.exp(q_values / self.temperature)
# Probabilities
p_arms = e_x / e_x.sum()
return np.random.choice(range(q_values.size), p=p_arms)
def update(self):
pass
def get_values(self, q_values):
e_x = np.exp(q_values / self.temperature)
p_arms = e_x / e_x.sum()
return p_arms | /rl-agents-0.1.1.tar.gz/rl-agents-0.1.1/src/rl_agents/agents/policies/tabular_policies.py | 0.952253 | 0.802013 | tabular_policies.py | pypi |
import numpy as np
from rl_agents.agents.mab.base import BaseMAB
class UCB(BaseMAB):
r"""MAB Agent following a Upper Confidence Bound policy.
The UCB selects the action that maximizes the function given by:
.. math:: f(i) = \mu_i + U_i,
where :math:`\mu_i` is the average reward of arm :math:`i`, and
:math:`U_i` is given by:
.. math:: U_i = \sqrt{\frac{-\log{p}}{2 N_i} },
where :math:`N_i` is the number of pulls made to arm :math:`i`.
Parameters
----------
n_arms : int
Number of actions (arms) of the MAB.
p : float
Probability of the true value being above the estimate plus the bound.
Attributes
----------
means : numpy.array(float, ndim=1)
Vector containing the average reward of each arm.
trials : numpy.array(float, ndim=1)
Vector containing the number of trials made to each arm.
bounds : numpy.array(float, ndim=1)
Vector containing the upper bounds of each arm.
t : int
Total trial counter.
"""
def __init__(self, n_arms, p):
self.p = p
self.n_arms = n_arms
self.means = np.zeros(self.n_arms)
self.trials = np.zeros(self.n_arms)
self.bounds = np.zeros(self.n_arms)
self.t = 0
def learn(self, a_idx, reward):
"""Learn from the interaction.
Update the means, the bounds and the trials.
Parameters
----------
reward : float
Reward received from the system after taking action a_idx.
a_idx : int
Index of the arm pulled (action taken).
"""
self.means[a_idx] = (
(self.means[a_idx] * self.trials[a_idx]) + reward
) / (self.trials[a_idx] + 1)
self.trials[a_idx] += 1 # add trial
self.bounds[a_idx] = np.sqrt(-np.log(self.p) / 2 * self.trials[a_idx])
self.t += 1
def predict(self):
"""Predict next action.
Pulls each arm once, then chooses the arm that gives the best
mean + bound.
Returns
-------
int
Index of chosen action.
"""
if self.t < self.n_arms:
return self.t
return np.argmax(self.means + self.bounds)
class UCB1(BaseMAB):
"""Short summary.
Parameters
----------
n_arms : type
Description of parameter `n_arms`.
c : type
Description of parameter `c`.
Attributes
----------
means : type
Description of attribute `means`.
trials : type
Description of attribute `trials`.
bounds : type
Description of attribute `bounds`.
t : type
Description of attribute `t`.
n_arms
c
"""
def __init__(self, n_arms, c=4):
self.n_arms = n_arms
self.means = np.zeros(self.n_arms)
self.trials = np.zeros(self.n_arms)
self.bounds = np.zeros(self.n_arms)
self.c = c
self.t = 0
def learn(self, a_idx, reward):
"""Short summary.
Parameters
----------
a_idx : type
Description of parameter `a_idx`.
reward : type
Description of parameter `reward`.
Returns
-------
type
Description of returned object.
"""
self.means[a_idx] = (
(self.means[a_idx] * self.trials[a_idx]) + reward
) / (self.trials[a_idx] + 1)
self.trials[a_idx] += 1 # add trial
self.t += 1
self.bounds[a_idx] = self.c * np.sqrt(
np.log(self.t) / (self.trials[a_idx])
)
def predict(self):
"""Short summary.
Returns
-------
type
Description of returned object.
"""
if self.t < self.n_arms:
return self.t
return np.argmax(self.means + self.bounds)
class UCB2(BaseMAB):
"""Short summary.
Parameters
----------
n_arms : type
Description of parameter `n_arms`.
alpha : type
Description of parameter `alpha`.
Attributes
----------
means : type
Description of attribute `means`.
trials : type
Description of attribute `trials`.
bounds : type
Description of attribute `bounds`.
rj : type
Description of attribute `rj`.
t : type
Description of attribute `t`.
counter : type
Description of attribute `counter`.
current : type
Description of attribute `current`.
n_arms
alpha
"""
def __init__(self, n_arms, alpha):
self.n_arms = n_arms
self.means = np.zeros(self.n_arms)
self.trials = np.zeros(self.n_arms)
self.bounds = np.zeros(self.n_arms)
self.rj = np.zeros(self.n_arms)
self.alpha = alpha
self.t = 0
self.counter = 0
self.current = 0
def learn(self, a_idx, reward):
"""Short summary.
Parameters
----------
a_idx : type
Description of parameter `a_idx`.
reward : type
Description of parameter `reward`.
Returns
-------
type
Description of returned object.
"""
self.means[a_idx] = (
(self.means[a_idx] * self.trials[a_idx]) + reward
) / (self.trials[a_idx] + 1)
self.trials[a_idx] += 1 # add trial
self.t += 1
tau = self._tau(self.rj[a_idx])
self.bounds[a_idx] = np.sqrt(
(1 + self.alpha) * np.log(np.e * self.t / tau) / (2 * tau)
)
self.counter = self._tau(self.rj[a_idx] + 1) - tau
self.rj[a_idx] += 1
def predict(self):
"""Short summary.
Returns
-------
type
Description of returned object.
"""
if self.t < self.n_arms:
return self.t
if self.counter == 0:
action = np.argmax(self.means + self.bounds)
self.current = action
return action
else:
self.counter -= 1
return self.current
def _tau(self, rj):
return np.ceil((1 + self.alpha) ** rj) | /rl-agents-0.1.1.tar.gz/rl-agents-0.1.1/src/rl_agents/agents/mab/ucbs.py | 0.931416 | 0.621225 | ucbs.py | pypi |
import numpy as np
from rl_agents.agents.mab.base import BaseMAB
class EpsilonGreedy(BaseMAB):
r"""Epsilon-Greedy agent.
The agent uses the epsilon-greedy approach to solve the Multi-Armed
bandit problem.
The parameter :math:`\epsilon` is used for the
exploration-exploitation trade-off. With probability :math:`\epsilon`
the agent selects a random action, otherwise it selects the action
that has the best average reward.
Parameters
----------
n_arms : int
Number of actions (arms) of the MAB.
epsilon : float
Probability of selecting a random action.
Attributes
----------
means : numpy.array(float, ndim=1)
Vector containing the average reward of each arm.
trials : numpy.array(float, ndim=1)
Vector containing the number of trials made to each arm.
"""
def __init__(self, n_arms, epsilon):
self.epsilon = epsilon
self.n_arms = n_arms
self.means = np.zeros(self.n_arms)
self.trials = np.zeros(self.n_arms)
def learn(self, a_idx, reward):
"""Make `EpsilonGreedy` agent learn from the interaction.
The `EpsilonGreedy` agent learns from its previous choice
and the reward received from this action.
Updates the means and the trials.
Parameters
----------
reward : float
Reward received from the system after taking action a_idx.
a_idx : int
Index of the arm pulled (action taken).
"""
self.means[a_idx] = (
self.means[a_idx] * self.trials[a_idx] + reward
) / (self.trials[a_idx] + 1)
self.trials[a_idx] += 1 # add trial
def predict(self):
r"""Predict next action.
With probability :math:`\epsilon` the agent selects a random arm.
With probability :math:`1 - \epsilon` the agent selects the arm that
has the best average reward.
Returns
-------
int
Index of chosen action.
"""
if np.random.rand() < self.epsilon:
a = np.random.randint(low=0, high=self.n_arms)
else:
a = self.means.argmax()
return a
class DecayEpsilon(BaseMAB):
r"""Agent that follows an epsilon-decreasing policy.
The agent uses the epsilon-greedy approach to solve the Multi-Armed
bandit problem, but with a decay in the epsilon.
The parameter :math:`\epsilon` is used for the exploration-exploitation
trade-off. With probability :math:`\epsilon` the agent selects a
random action, otherwise it selects the action that has the
best average reward. After each interaction the epsilon is updated as
epsilon = epsilon * decay.
Parameters
----------
n_arms : int
Number of actions (arms) of the MAB.
max_epsilon : float
Initial epsilon.
decay : float
Decay of the epsilon.
Attributes
----------
epsilon : float
Epsilon of the agent. Constantly updated as epsilon = epsilon*decay
means : numpy.array(float, ndim=1)
Vector containing the average reward of each arm.
trials : numpy.array(float, ndim=1)
Vector containing the number of trials made to each arm.
"""
def __init__(self, n_arms, max_epsilon, decay):
self.epsilon = max_epsilon
self.n_arms = n_arms
self.means = np.zeros(self.n_arms)
self.trials = np.zeros(self.n_arms)
self.decay = decay
def learn(self, a_idx, reward):
"""Make the `DecayEpsilon` agent learn from the interaction.
The MAB agent learns from its previous choice and the reward received
from this action. Updates the means and the trials.
Parameters
----------
reward : float
Reward received from the system after taking action a_idx.
a_idx : int
Index of the arm pulled (action taken).
"""
self.means[a_idx] = (
self.means[a_idx] * self.trials[a_idx] + reward
) / (self.trials[a_idx] + 1)
self.trials[a_idx] += 1 # add trial
def predict(self):
r"""Predict next action and update epsilon.
With probability :math:`\epsilon` the agent selects a random arm.
With probability :math:`1 - \epsilon` the agent selects the arm that
has the best average reward.
Returns
-------
int
Index of chosen action.
"""
if np.random.rand() < self.epsilon:
a_idx = np.random.randint(low=0, high=self.n_arms)
else:
a_idx = self.means.argmax()
self.epsilon = self.epsilon * self.decay
return a_idx | /rl-agents-0.1.1.tar.gz/rl-agents-0.1.1/src/rl_agents/agents/mab/egreedy.py | 0.939865 | 0.851212 | egreedy.py | pypi |
import numpy as np
from rl_agents.agents.core import BaseAgent
from rl_agents.agents.functions import QMatrixFunction
from rl_agents.agents.policies import EGreedyPolicy
class TDAgent(BaseAgent):
"""A base Temporal-Difference Agent.
This agent is used to build the basic TD algorithms:
* Q-Learning
* SARSA
* Expected-SARSA
Parameters
----------
n_states : int
Number of states in the state space.
n_actions : int
Number of actions in the action space.
alpha : float
Learning rate.
gamma : float
Discount factor.
policy : rl_agents.agents.policies.base.BasePolicy
A policy object.
q_function : rl_agents.agents.functions.base.BaseQFunction
A Q-Function class.
Attributes
----------
n_states : int
Number of states.
n_actions : int
Number of actions.
alpha : float
Learning rate.
gamma : float
Discount factor.
policy : BasePolicy
Policy instance
q_function : BaseQFunction
Qfunction instance
"""
def __init__(
self,
n_states,
n_actions,
alpha,
gamma,
policy=EGreedyPolicy(0.1),
q_function=QMatrixFunction,
q_func_kwargs=None,
):
self.n_states = n_states
self.n_actions = n_actions
self.policy = policy
q_func_kwargs = {} if q_func_kwargs is None else q_func_kwargs.copy()
self.q_function = q_function(self.n_states, self.n_actions, **q_func_kwargs)
self.alpha = alpha
self.gamma = gamma
def predict(self, state, eval=False):
"""Predict the next action the agent should take.
Parameters
----------
state : int
Index of the state.
eval : bool
Flag to indicate if the agent is in a test setting (evaluation)
Returns
-------
int
Action index to be taken.
"""
# Get the Q-Values associated with that state:
q_values = self.q_function.get_values(state)
# Utilize the policy:
if eval:
action = np.argmax(q_values)
else:
action = self.policy(q_values)
return action
def learn(self, state, action, reward, next_state):
r"""Learn from the interaction.
TD update:
.. math::
Q(s,a) \leftarrow (1-\alpha) Q(s,a) + \alpha(r + \gamma F(s))
The difference between the TD algorithms
(Q-Learning, SARSA, Expected-SARSA) comes from the difference
in the :math:`F(s)` function.
Parameters
----------
state : type
State in which the action was taken.
action : type
Action taken
reward : float
Reward received by the transition.
next_state : type
Next state the environment transitions.
"""
# TD update Q(s,a) <- (1-alpha)Q(s,a) + alpha(r + gamma F(s))
update = self.alpha * (reward + self.gamma * self._next_value(next_state))
target_q = (1 - self.alpha) * self.q_function(state, action) + update
# Update the Q-Function with the target:
self.q_function.update(state, action, target_q)
def _next_value(self, next_state):
raise NotImplementedError
class QLearningAgent(TDAgent):
r"""A Simple Q-Learning Agent
Refer to `TDAgent` for reference on the parameters and methods.
The Q-Learning update is given as:
.. math::
Q(s, a) \leftarrow (1-\alpha) Q(s, a) +
\alpha \left[ r +
\gamma \max_{a' \in A} Q(s', a') \right]
"""
def __init__(
self,
n_states,
n_actions,
alpha,
gamma,
policy=EGreedyPolicy(0.1),
q_function=QMatrixFunction,
q_func_kwargs=None,
):
super().__init__(
n_states, n_actions, alpha, gamma, policy, q_function, q_func_kwargs,
)
def _next_value(self, next_state):
q_values = self.q_function.get_values(next_state)
return q_values.max()
class SarsaAgent(TDAgent):
r"""A Simple SARSA Agent
Refer to `TDAgent` for reference on the parameters and methods.
The SARSA update is given as:
.. math::
Q(s_t, a_t) \leftarrow (1-\alpha) Q(s, a) +
\alpha \left[ r +
\gamma Q(s',\pi(s') ) \right]
Where :math:`\pi(s)` returns the action chosen by the policy.
"""
def __init__(
self,
n_states,
n_actions,
alpha,
gamma,
policy=EGreedyPolicy(0.1),
q_function=QMatrixFunction,
q_func_kwargs=None,
):
super().__init__(
n_states, n_actions, alpha, gamma, policy, q_function, q_func_kwargs,
)
self.next_action = None
def predict(self, state, eval=False):
"""Predict the next action the SARSA agent should take.
Parameters
----------
state : int
Index of the state.
eval : bool
Flag to indicate if the agent is in a test setting (evaluation)
Returns
-------
int
Action index to be taken.
"""
# Get the Q-Values associated with that state:
q_values = self.q_function.get_values(state)
# Utilize the policy:
if eval:
action = np.argmax(q_values)
else:
if self.next_action == None:
action = self.policy(q_values)
else:
action = self.next_action
return action
def _next_value(self, next_state):
q_values = self.q_function.get_values(next_state)
action = self.policy(q_values)
self.next_action = action
return self.q_function(next_state, action)
class ExpectedSarsaAgent(TDAgent):
r"""A Simple expeted-SARSA Agent
The Expeted-SARSA update is given as:
.. math::
Q(s_t, a_t) \leftarrow (1-\alpha) Q(s, a) +
\alpha \left[ r +
\gamma \sum_{a' \in A} \pi(s',a') Q(s',a') \right]
Where :math:`\pi(s,a)` returns the probability of taking
action :math:`a` in state :math:`s`.
"""
def __init__(
self,
n_states,
n_actions,
alpha,
gamma,
policy=EGreedyPolicy(0.1),
q_function=QMatrixFunction,
q_func_kwargs=None,
):
super().__init__(
n_states, n_actions, alpha, gamma, policy, q_function, q_func_kwargs,
)
def _next_value(self, next_state):
q_values = self.q_function.get_values(next_state)
pi_values = self.policy.get_values(q_values)
return np.sum(q_values * pi_values) | /rl-agents-0.1.1.tar.gz/rl-agents-0.1.1/src/rl_agents/agents/tabular/td_learning.py | 0.893193 | 0.597872 | td_learning.py | pypi |
# rl-algo-impls
Implementations of reinforcement learning algorithms.
- [WandB benchmark reports](https://wandb.ai/sgoodfriend/rl-algo-impls-benchmarks/reportlist)
- [Basic, PyBullet, and Atari games
(v0.0.9)](https://api.wandb.ai/links/sgoodfriend/fdp5mg6h)
- [v0.0.8](https://api.wandb.ai/links/sgoodfriend/jh3cqbon)
- [v0.0.4](https://api.wandb.ai/links/sgoodfriend/09frjfcs)
- [procgen
(starpilot, hard)](https://api.wandb.ai/links/sgoodfriend/v1p4976e) and [procgen (easy)](https://api.wandb.ai/links/sgoodfriend/f3w1hwyb)
- [Gridnet MicroRTS](https://api.wandb.ai/links/sgoodfriend/zdee7ovm)
- [MicroRTS Selfplay](https://api.wandb.ai/links/sgoodfriend/5qjlr8ob)
- [Lux AI Season 2 Training](https://api.wandb.ai/links/sgoodfriend/0yrxywnd)
- [Huggingface models](https://huggingface.co/models?other=rl-algo-impls)
## Prerequisites: Weights & Biases (WandB)
Training and benchmarking assumes you have a Weights & Biases project to upload runs to.
By default training goes to a rl-algo-impls project while benchmarks go to
rl-algo-impls-benchmarks. During training and benchmarking runs, videos of the best
models and the model weights are uploaded to WandB.
Before doing anything below, you'll need to create a wandb account and run `wandb
login`.
## Setup and Usage
### Lambda Labs instance for benchmarking
Benchmark runs are uploaded to WandB, which can be made into reports ([for
example](https://api.wandb.ai/links/sgoodfriend/6p2sjqtn)). So far I've found Lambda
Labs A10 instances to be a good balance of performance (14 hours to train PPO in 14
environments [5 basic gym, 4 PyBullet, CarRacing-v0, and 4 Atari] across 3 seeds) vs
cost ($0.60/hr).
```
git clone https://github.com/sgoodfriend/rl-algo-impls.git
cd rl-algo-impls
# git checkout BRANCH_NAME if running on non-main branch
bash ./scripts/setup.sh
wandb login
bash ./scripts/benchmark.sh [-a {"ppo"}] [-e ENVS] [-j {6}] [-p {rl-algo-impls-benchmarks}] [-s {"1 2 3"}]
```
Benchmarking runs are by default upload to a rl-algo-impls-benchmarks project. Runs upload
videos of the running best model and the weights of the best and last model.
Benchmarking runs are tagged with a shorted commit hash (i.e., `benchmark_5598ebc`) and
hostname (i.e., `host_192-9-145-26`)
#### Publishing models to Huggingface
Publishing benchmarks to Huggingface requires logging into Huggingface with a
write-capable API token:
```
git config --global credential.helper store
huggingface-cli login
# For example: python benchmark_publish.py --wandb-tags host_192-9-147-166 benchmark_1d4094f --wandb-report-url https://api.wandb.ai/links/sgoodfriend/099h4lvj
# --virtual-display likely must be specified if running on a remote machine.
python benchmark_publish.py --wandb-tags HOST_TAG COMMIT_TAG --wandb-report-url WANDB_REPORT_URL [--virtual-display]
```
#### Hyperparameter tuning with Optuna
Hyperparameter tuning can be done with the `tuning/tuning.sh` script, which runs
multiple processes of optimize.py. Start by doing all the setup meant for training
before running `tuning/tuning.sh`:
```
# Setup similar to training above
wandb login
bash scripts/tuning.sh -a ALGO -e ENV -j N_JOBS -s NUM_SEEDS
```
### Google Colab Pro+
3 notebooks in the colab directory are setup to be used with Google Colab:
- [colab_benchmark.ipynb](https://github.com/sgoodfriend/rl-algo-impls/tree/main/benchmarks#:~:text=colab_benchmark.ipynb):
Even with a Google Colab Pro+ subscription you'd need to only run parts of the
benchmark. The file recommends 4 splits (basic+pybullet, carcarcing, atari1, atari2)
because it would otherwise exceed the 24-hour session limit. This mostly comes from
being unable to get pool_size above 1 because of WandB errors.
- [colab_train.ipynb](https://github.com/sgoodfriend/rl-algo-impls/blob/main/colab_train.ipynb):
Train models while being able to specify the env, seeds, and algo. By default training
runs are uploaded to the rl-algo-impls project.
- [colab_enjoy.ipynb](https://github.com/sgoodfriend/rl-algo-impls/blob/main/colab_enjoy.ipynb):
Download models from WandB and evaluate them. Training is likely to be more
interesting given videos are uploaded.
### macOS
#### Installation
My local development has been on an M1 Mac. These instructions might not be complete,
but these are the approximate setup and usage I've been using:
1. Install libraries with homebrew
```
brew install swig
brew install --cask xquartz
```
2. Download and install Miniconda for arm64
```
curl -O https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh
sh Miniconda3-latest-MacOSX-arm64.sh
```
3. Create a conda environment from this repo's
[environment.yml](https://github.com/sgoodfriend/rl-algo-impls/blob/main/environment.yml)
```
conda env create -f environment.yml -n rl_algo_impls
conda activate rl_algo_impls
```
4. Install other dependencies with poetry
```
poetry install
```
#### Usage
Training, benchmarking, and watching the agents playing the environments can be done
locally:
```
python train.py [-h] [--algo {ppo}] [--env ENV [ENV ...]] [--seed [SEED ...]] [--wandb-project-name WANDB_PROJECT_NAME] [--wandb-tags [WANDB_TAGS ...]] [--pool-size POOL_SIZE] [-virtual-display]
```
train.py by default uploads to the rl-algo-impls WandB project. Training creates videos
of the running best model, which will cause popups. Creating the first video requires a
display, so you shouldn't shutoff the display until the video of the initial model is
created (1-5 minutes depending on environment). The --virtual-display flag should allow
headless mode, but that hasn't been reliable on macOS.
```
python enjoy.py [-h] [--algo {ppo}] [--env ENV] [--seed SEED] [--render RENDER] [--best BEST] [--n_episodes N_EPISODES] [--deterministic-eval DETERMINISTIC_EVAL] [--no-print-returns]
# OR
python enjoy.py [--wandb-run-path WANDB_RUN_PATH]
```
The first enjoy.py where you specify algo, env, and seed loads a model you locally
trained with those parameters and renders the agent playing the environment.
The second enjoy.py downloads the model and hyperparameters from a WandB run. An
example run path is `sgoodfriend/rl-algo-impls-benchmarks/09gea50g`
## Hyperparameters
These are specified in yaml files in the hyperparams directory by game (`atari` is a
special case for all Atari games).
## procgen Setup
procgen envs use gym3, which don't expose a straightforward way to set seed to allow for
repeatable runs.
[openai/procgen](https://github.com/openai/procgen) doesn't support Apple Silicon, but [patch
instructions exist](https://github.com/openai/procgen/issues/69). The changes to the
repo are for now in a fork since the openai/procgen project is in maintenance mode:
```
brew install wget cmake glow qt5
git clone https://github.com/sgoodfriend/procgen.git
cd procgen
pip install -e .
python -c "from procgen import ProcgenGym3Env; ProcgenGym3Env(num=1, env_name='coinrun')"
python -m procgen.interactive
```
amd64 Linux machines (e.g., Lambda Labs and Google Colab) should install procgen with
`python -m pip install '.[procgen]'`
## gym-microrts Setup
```
python -m pip install -e '.[microrts]'
```
Requires Java SDK to also be installed.
| /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/README.md | 0.52342 | 0.886076 | README.md | pypi |
import dataclasses
import gc
import inspect
import logging
import os
from dataclasses import asdict, dataclass
from typing import Callable, List, NamedTuple, Optional, Sequence, Union
import numpy as np
import optuna
import torch
from optuna.pruners import HyperbandPruner
from optuna.samplers import TPESampler
from optuna.visualization import plot_optimization_history, plot_param_importances
from torch.utils.tensorboard.writer import SummaryWriter
import wandb
from rl_algo_impls.a2c.optimize import sample_params as a2c_sample_params
from rl_algo_impls.runner.config import Config, EnvHyperparams, RunArgs
from rl_algo_impls.runner.running_utils import (
ALGOS,
base_parser,
get_device,
hparam_dict,
load_hyperparams,
make_policy,
set_seeds,
)
from rl_algo_impls.shared.callbacks import Callback
from rl_algo_impls.shared.callbacks.lux_hyperparam_transitions import (
LuxHyperparamTransitions,
)
from rl_algo_impls.shared.callbacks.optimize_callback import (
Evaluation,
OptimizeCallback,
evaluation,
)
from rl_algo_impls.shared.callbacks.reward_decay_callback import RewardDecayCallback
from rl_algo_impls.shared.callbacks.self_play_callback import SelfPlayCallback
from rl_algo_impls.shared.stats import EpisodesStats
from rl_algo_impls.shared.vec_env import make_env, make_eval_env
from rl_algo_impls.wrappers.self_play_wrapper import SelfPlayWrapper
from rl_algo_impls.wrappers.vectorable_wrapper import find_wrapper
@dataclass
class StudyArgs:
load_study: bool
study_name: Optional[str] = None
storage_path: Optional[str] = None
n_trials: int = 100
n_jobs: int = 1
n_evaluations: int = 4
n_eval_envs: int = 8
n_eval_episodes: int = 16
timeout: Union[int, float, None] = None
wandb_project_name: Optional[str] = None
wandb_entity: Optional[str] = None
wandb_tags: Sequence[str] = dataclasses.field(default_factory=list)
wandb_group: Optional[str] = None
virtual_display: bool = False
class Args(NamedTuple):
train_args: Sequence[RunArgs]
study_args: StudyArgs
def parse_args() -> Args:
parser = base_parser()
parser.add_argument(
"--load-study",
action="store_true",
help="Load a preexisting study, useful for parallelization",
)
parser.add_argument("--study-name", type=str, help="Optuna study name")
parser.add_argument(
"--storage-path",
type=str,
help="Path of database for Optuna to persist to",
)
parser.add_argument(
"--wandb-project-name",
type=str,
default="rl-algo-impls-tuning",
help="WandB project name to upload tuning data to. If none, won't upload",
)
parser.add_argument(
"--wandb-entity",
type=str,
help="WandB team. None uses the default entity",
)
parser.add_argument(
"--wandb-tags", type=str, nargs="*", help="WandB tags to add to run"
)
parser.add_argument(
"--wandb-group", type=str, help="WandB group to group trials under"
)
parser.add_argument(
"--n-trials", type=int, default=100, help="Maximum number of trials"
)
parser.add_argument(
"--n-jobs", type=int, default=1, help="Number of jobs to run in parallel"
)
parser.add_argument(
"--n-evaluations",
type=int,
default=4,
help="Number of evaluations during the training",
)
parser.add_argument(
"--n-eval-envs",
type=int,
default=8,
help="Number of envs in vectorized eval environment",
)
parser.add_argument(
"--n-eval-episodes",
type=int,
default=16,
help="Number of episodes to complete for evaluation",
)
parser.add_argument("--timeout", type=int, help="Seconds to timeout optimization")
parser.add_argument(
"--virtual-display", action="store_true", help="Use headless virtual display"
)
# parser.set_defaults(
# algo=["a2c"],
# env=["CartPole-v1"],
# seed=[100, 200, 300],
# n_trials=5,
# virtual_display=True,
# )
train_dict, study_dict = {}, {}
for k, v in vars(parser.parse_args()).items():
if k in inspect.signature(StudyArgs).parameters:
study_dict[k] = v
else:
train_dict[k] = v
study_args = StudyArgs(**study_dict)
# Hyperparameter tuning across algos and envs not supported
assert len(train_dict["algo"]) == 1
assert len(train_dict["env"]) == 1
train_args = RunArgs.expand_from_dict(train_dict)
if not all((study_args.study_name, study_args.storage_path)):
hyperparams = load_hyperparams(train_args[0].algo, train_args[0].env)
config = Config(train_args[0], hyperparams, os.getcwd())
if study_args.study_name is None:
study_args.study_name = config.run_name(include_seed=False)
if study_args.storage_path is None:
study_args.storage_path = (
f"sqlite:///{os.path.join(config.runs_dir, 'tuning.db')}"
)
# Default set group name to study name
study_args.wandb_group = study_args.wandb_group or study_args.study_name
return Args(train_args, study_args)
def objective_fn(
args: Sequence[RunArgs], study_args: StudyArgs
) -> Callable[[optuna.Trial], float]:
def objective(trial: optuna.Trial) -> float:
if len(args) == 1:
return simple_optimize(trial, args[0], study_args)
else:
return stepwise_optimize(trial, args, study_args)
return objective
def simple_optimize(trial: optuna.Trial, args: RunArgs, study_args: StudyArgs) -> float:
base_hyperparams = load_hyperparams(args.algo, args.env)
base_config = Config(args, base_hyperparams, os.getcwd())
if args.algo == "a2c":
hyperparams = a2c_sample_params(trial, base_hyperparams, base_config)
else:
raise ValueError(f"Optimizing {args.algo} isn't supported")
config = Config(args, hyperparams, os.getcwd())
wandb_enabled = bool(study_args.wandb_project_name)
if wandb_enabled:
wandb.init(
project=study_args.wandb_project_name,
entity=study_args.wandb_entity,
config=asdict(hyperparams),
name=f"{config.model_name()}-{str(trial.number)}",
tags=study_args.wandb_tags,
group=study_args.wandb_group,
sync_tensorboard=True,
monitor_gym=True,
save_code=True,
reinit=True,
)
wandb.config.update(args)
tb_writer = SummaryWriter(config.tensorboard_summary_path)
set_seeds(args.seed, args.use_deterministic_algorithms)
env = make_env(
config, EnvHyperparams(**config.env_hyperparams), tb_writer=tb_writer
)
device = get_device(config, env)
policy_factory = lambda: make_policy(
config, env, device, **config.policy_hyperparams
)
policy = policy_factory()
algo = ALGOS[args.algo](policy, env, device, tb_writer, **config.algo_hyperparams)
self_play_wrapper = find_wrapper(env, SelfPlayWrapper)
eval_env = make_eval_env(
config,
EnvHyperparams(**config.env_hyperparams),
override_hparams={"n_envs": study_args.n_eval_envs},
self_play_wrapper=self_play_wrapper,
)
optimize_callback = OptimizeCallback(
policy,
eval_env,
trial,
tb_writer,
step_freq=config.n_timesteps // study_args.n_evaluations,
n_episodes=study_args.n_eval_episodes,
deterministic=config.eval_hyperparams.get("deterministic", True),
)
callbacks: List[Callback] = [optimize_callback]
if config.hyperparams.reward_decay_callback:
callbacks.append(
RewardDecayCallback(
config,
env,
**(config.hyperparams.reward_decay_callback_kwargs or {}),
)
)
if config.hyperparams.lux_hyperparam_transitions_kwargs:
callbacks.append(
LuxHyperparamTransitions(
config,
env,
algo,
**config.hyperparams.lux_hyperparam_transitions_kwargs,
)
)
if self_play_wrapper:
callbacks.append(SelfPlayCallback(policy, policy_factory, self_play_wrapper))
try:
algo.learn(config.n_timesteps, callbacks=callbacks)
if not optimize_callback.is_pruned:
optimize_callback.evaluate()
if not optimize_callback.is_pruned:
policy.save(config.model_dir_path(best=False))
eval_stat: EpisodesStats = callback.last_eval_stat # type: ignore
train_stat: EpisodesStats = callback.last_train_stat # type: ignore
tb_writer.add_hparams(
hparam_dict(hyperparams, vars(args)),
{
"hparam/last_mean": eval_stat.score.mean,
"hparam/last_result": eval_stat.score.mean - eval_stat.score.std,
"hparam/train_mean": train_stat.score.mean,
"hparam/train_result": train_stat.score.mean - train_stat.score.std,
"hparam/score": optimize_callback.last_score,
"hparam/is_pruned": optimize_callback.is_pruned,
},
None,
config.run_name(),
)
tb_writer.close()
if wandb_enabled:
wandb.run.summary["state"] = ( # type: ignore
"Pruned" if optimize_callback.is_pruned else "Complete"
)
wandb.finish(quiet=True)
if optimize_callback.is_pruned:
raise optuna.exceptions.TrialPruned()
return optimize_callback.last_score
except AssertionError as e:
logging.warning(e)
return np.nan
finally:
env.close()
eval_env.close()
gc.collect()
torch.cuda.empty_cache()
def stepwise_optimize(
trial: optuna.Trial, args: Sequence[RunArgs], study_args: StudyArgs
) -> float:
algo = args[0].algo
env_id = args[0].env
base_hyperparams = load_hyperparams(algo, env_id)
base_config = Config(args[0], base_hyperparams, os.getcwd())
if algo == "a2c":
hyperparams = a2c_sample_params(trial, base_hyperparams, base_config)
else:
raise ValueError(f"Optimizing {algo} isn't supported")
wandb_enabled = bool(study_args.wandb_project_name)
if wandb_enabled:
wandb.init(
project=study_args.wandb_project_name,
entity=study_args.wandb_entity,
config=asdict(hyperparams),
name=f"{str(trial.number)}-S{base_config.seed()}",
tags=study_args.wandb_tags,
group=study_args.wandb_group,
save_code=True,
reinit=True,
)
score = -np.inf
for i in range(study_args.n_evaluations):
evaluations: List[Evaluation] = []
for arg in args:
config = Config(arg, hyperparams, os.getcwd())
tb_writer = SummaryWriter(config.tensorboard_summary_path)
set_seeds(arg.seed, arg.use_deterministic_algorithms)
env = make_env(
config,
EnvHyperparams(**config.env_hyperparams),
normalize_load_path=config.model_dir_path() if i > 0 else None,
tb_writer=tb_writer,
)
device = get_device(config, env)
policy_factory = lambda: make_policy(
config, env, device, **config.policy_hyperparams
)
policy = policy_factory()
if i > 0:
policy.load(config.model_dir_path())
algo = ALGOS[arg.algo](
policy, env, device, tb_writer, **config.algo_hyperparams
)
self_play_wrapper = find_wrapper(env, SelfPlayWrapper)
eval_env = make_eval_env(
config,
EnvHyperparams(**config.env_hyperparams),
normalize_load_path=config.model_dir_path() if i > 0 else None,
override_hparams={"n_envs": study_args.n_eval_envs},
self_play_wrapper=self_play_wrapper,
)
start_timesteps = int(i * config.n_timesteps / study_args.n_evaluations)
train_timesteps = (
int((i + 1) * config.n_timesteps / study_args.n_evaluations)
- start_timesteps
)
callbacks = []
if config.hyperparams.reward_decay_callback:
callbacks.append(
RewardDecayCallback(
config,
env,
start_timesteps=start_timesteps,
**(config.hyperparams.reward_decay_callback_kwargs or {}),
)
)
if config.hyperparams.lux_hyperparam_transitions_kwargs:
callbacks.append(
LuxHyperparamTransitions(
config,
env,
algo,
start_timesteps=start_timesteps,
**config.hyperparams.lux_hyperparam_transitions_kwargs,
)
)
if self_play_wrapper:
callbacks.append(
SelfPlayCallback(policy, policy_factory, self_play_wrapper)
)
try:
algo.learn(
train_timesteps,
callbacks=callbacks,
total_timesteps=config.n_timesteps,
start_timesteps=start_timesteps,
)
evaluations.append(
evaluation(
policy,
eval_env,
tb_writer,
study_args.n_eval_episodes,
config.eval_hyperparams.get("deterministic", True),
start_timesteps + train_timesteps,
)
)
policy.save(config.model_dir_path())
tb_writer.close()
except AssertionError as e:
logging.warning(e)
if wandb_enabled:
wandb_finish("Error")
return np.nan
finally:
env.close()
eval_env.close()
gc.collect()
torch.cuda.empty_cache()
d = {}
for idx, e in enumerate(evaluations):
d[f"{idx}/eval_mean"] = e.eval_stat.score.mean
d[f"{idx}/train_mean"] = e.train_stat.score.mean
d[f"{idx}/score"] = e.score
d["eval"] = np.mean([e.eval_stat.score.mean for e in evaluations]).item()
d["train"] = np.mean([e.train_stat.score.mean for e in evaluations]).item()
score = np.mean([e.score for e in evaluations]).item()
d["score"] = score
step = i + 1
wandb.log(d, step=step)
print(f"Trial #{trial.number} Step {step} Score: {round(score, 2)}")
trial.report(score, step)
if trial.should_prune():
if wandb_enabled:
wandb_finish("Pruned")
raise optuna.exceptions.TrialPruned()
if wandb_enabled:
wandb_finish("Complete")
return score
def wandb_finish(state: str) -> None:
wandb.run.summary["state"] = state # type: ignore
wandb.finish(quiet=True)
def optimize() -> None:
from pyvirtualdisplay.display import Display
train_args, study_args = parse_args()
if study_args.virtual_display:
virtual_display = Display(visible=False, size=(1400, 900))
virtual_display.start()
sampler = TPESampler(**TPESampler.hyperopt_parameters())
pruner = HyperbandPruner()
if study_args.load_study:
assert study_args.study_name
assert study_args.storage_path
study = optuna.load_study(
study_name=study_args.study_name,
storage=study_args.storage_path,
sampler=sampler,
pruner=pruner,
)
else:
study = optuna.create_study(
study_name=study_args.study_name,
storage=study_args.storage_path,
sampler=sampler,
pruner=pruner,
direction="maximize",
)
try:
study.optimize(
objective_fn(train_args, study_args),
n_trials=study_args.n_trials,
n_jobs=study_args.n_jobs,
timeout=study_args.timeout,
)
except KeyboardInterrupt:
pass
best = study.best_trial
print(f"Best Trial Value: {best.value}")
print("Attributes:")
for key, value in list(best.params.items()) + list(best.user_attrs.items()):
print(f" {key}: {value}")
df = study.trials_dataframe()
df = df[df.state == "COMPLETE"].sort_values(by=["value"], ascending=False)
print(df.to_markdown(index=False))
fig1 = plot_optimization_history(study)
fig1.write_image("opt_history.png")
fig2 = plot_param_importances(study)
fig2.write_image("param_importances.png")
if __name__ == "__main__":
optimize() | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/optimize.py | 0.828939 | 0.184308 | optimize.py | pypi |
import argparse
import itertools
import numpy as np
import pandas as pd
import wandb
import wandb.apis.public
from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, Iterable, List, TypeVar
from rl_algo_impls.benchmark_publish import RunGroup
@dataclass
class Comparison:
control_values: List[float]
experiment_values: List[float]
def mean_diff_percentage(self) -> float:
return self._diff_percentage(
np.mean(self.control_values).item(), np.mean(self.experiment_values).item()
)
def median_diff_percentage(self) -> float:
return self._diff_percentage(
np.median(self.control_values).item(),
np.median(self.experiment_values).item(),
)
def _diff_percentage(self, c: float, e: float) -> float:
if c == e:
return 0
elif c == 0:
return float("inf") if e > 0 else float("-inf")
return 100 * (e - c) / c
def score(self) -> float:
return (
np.sum(
np.sign((self.mean_diff_percentage(), self.median_diff_percentage()))
).item()
/ 2
)
RunGroupRunsSelf = TypeVar("RunGroupRunsSelf", bound="RunGroupRuns")
class RunGroupRuns:
def __init__(
self,
run_group: RunGroup,
control: List[str],
experiment: List[str],
summary_stats: List[str] = ["best_eval", "eval", "train_rolling"],
summary_metrics: List[str] = ["mean", "result"],
) -> None:
self.algo = run_group.algo
self.env = run_group.env_id
self.control = set(control)
self.experiment = set(experiment)
self.summary_stats = summary_stats
self.summary_metrics = summary_metrics
self.control_runs = []
self.experiment_runs = []
def add_run(self, run: wandb.apis.public.Run) -> None:
wandb_tags = set(run.config.get("wandb_tags", []))
if self.control & wandb_tags:
self.control_runs.append(run)
elif self.experiment & wandb_tags:
self.experiment_runs.append(run)
def comparisons_by_metric(self) -> Dict[str, Comparison]:
c_by_m = {}
for metric in (
f"{s}/{m}"
for s, m in itertools.product(self.summary_stats, self.summary_metrics)
):
c_by_m[metric] = Comparison(
[c.summary[metric] for c in self.control_runs],
[e.summary[metric] for e in self.experiment_runs],
)
return c_by_m
@staticmethod
def data_frame(rows: Iterable[RunGroupRunsSelf]) -> pd.DataFrame:
results = defaultdict(list)
for r in rows:
if not r.control_runs or not r.experiment_runs:
continue
results["algo"].append(r.algo)
results["env"].append(r.env)
results["control"].append(r.control)
results["expierment"].append(r.experiment)
c_by_m = r.comparisons_by_metric()
results["score"].append(
sum(m.score() for m in c_by_m.values()) / len(c_by_m)
)
for m, c in c_by_m.items():
results[f"{m}_mean"].append(c.mean_diff_percentage())
results[f"{m}_median"].append(c.median_diff_percentage())
return pd.DataFrame(results)
def compare_runs() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
"-p",
"--wandb-project-name",
type=str,
default="rl-algo-impls-benchmarks",
help="WandB project name to load runs from",
)
parser.add_argument(
"--wandb-entity",
type=str,
default=None,
help="WandB team. None uses default entity",
)
parser.add_argument(
"-n",
"--wandb-hostname-tag",
type=str,
nargs="*",
help="WandB tags for hostname (i.e. host_192-9-145-26)",
)
parser.add_argument(
"-c",
"--wandb-control-tag",
type=str,
nargs="+",
help="WandB tag for control commit (i.e. benchmark_5598ebc)",
)
parser.add_argument(
"-e",
"--wandb-experiment-tag",
type=str,
nargs="+",
help="WandB tag for experiment commit (i.e. benchmark_5540e1f)",
)
parser.add_argument(
"--envs",
type=str,
nargs="*",
help="If specified, only compare these envs",
)
parser.add_argument(
"--exclude-envs",
type=str,
nargs="*",
help="Environments to exclude from comparison",
)
# parser.set_defaults(
# wandb_hostname_tag=["host_150-230-44-105", "host_155-248-214-128"],
# wandb_control_tag=["benchmark_fbc943f"],
# wandb_experiment_tag=["benchmark_f59bf74"],
# exclude_envs=[],
# )
args = parser.parse_args()
print(args)
api = wandb.Api()
all_runs = api.runs(
path=f"{args.wandb_entity or api.default_entity}/{args.wandb_project_name}",
order="+created_at",
)
runs_by_run_group: Dict[RunGroup, RunGroupRuns] = {}
wandb_hostname_tags = set(args.wandb_hostname_tag)
for r in all_runs:
if r.state != "finished":
continue
wandb_tags = set(r.config.get("wandb_tags", []))
if not wandb_tags or not wandb_hostname_tags & wandb_tags:
continue
rg = RunGroup(r.config["algo"], r.config.get("env_id") or r.config["env"])
if args.exclude_envs and rg.env_id in args.exclude_envs:
continue
if args.envs and rg.env_id not in args.envs:
continue
if rg not in runs_by_run_group:
runs_by_run_group[rg] = RunGroupRuns(
rg,
args.wandb_control_tag,
args.wandb_experiment_tag,
)
runs_by_run_group[rg].add_run(r)
df = RunGroupRuns.data_frame(runs_by_run_group.values()).round(decimals=2)
print(f"**Total Score: {sum(df.score)}**")
df.loc["mean"] = df.mean(numeric_only=True)
print(df.to_markdown())
if __name__ == "__main__":
compare_runs() | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/compare_runs.py | 0.741768 | 0.356615 | compare_runs.py | pypi |
import os
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
import argparse
import shutil
import subprocess
import tempfile
from typing import List, Optional
import requests
import wandb.apis.public
from huggingface_hub.hf_api import HfApi, upload_folder
from huggingface_hub.repocard import metadata_save
from pyvirtualdisplay.display import Display
import wandb
from rl_algo_impls.publish.markdown_format import EvalTableData, model_card_text
from rl_algo_impls.runner.config import EnvHyperparams
from rl_algo_impls.runner.evaluate import EvalArgs, evaluate_model
from rl_algo_impls.shared.callbacks.eval_callback import evaluate
from rl_algo_impls.shared.vec_env import make_eval_env
from rl_algo_impls.wrappers.vec_episode_recorder import VecEpisodeRecorder
def publish(
wandb_run_paths: List[str],
wandb_report_url: str,
huggingface_user: Optional[str] = None,
huggingface_token: Optional[str] = None,
virtual_display: bool = False,
) -> None:
if virtual_display:
display = Display(visible=False, size=(1400, 900))
display.start()
api = wandb.Api()
runs = [api.run(rp) for rp in wandb_run_paths]
algo = runs[0].config["algo"]
hyperparam_id = runs[0].config["env"]
evaluations = [
evaluate_model(
EvalArgs(
algo,
hyperparam_id,
seed=r.config.get("seed", None),
render=False,
best=True,
n_envs=None,
n_episodes=10,
no_print_returns=True,
wandb_run_path="/".join(r.path),
),
os.getcwd(),
)
for r in runs
]
run_metadata = requests.get(runs[0].file("wandb-metadata.json").url).json()
table_data = list(EvalTableData(r, e) for r, e in zip(runs, evaluations))
best_eval = sorted(
table_data, key=lambda d: d.evaluation.stats.score, reverse=True
)[0]
with tempfile.TemporaryDirectory() as tmpdirname:
_, (policy, stats, config) = best_eval
repo_name = config.model_name(include_seed=False)
repo_dir_path = os.path.join(tmpdirname, repo_name)
# Locally clone this repo to a temp directory
subprocess.run(["git", "clone", ".", repo_dir_path])
shutil.rmtree(os.path.join(repo_dir_path, ".git"))
model_path = config.model_dir_path(best=True, downloaded=True)
shutil.copytree(
model_path,
os.path.join(
repo_dir_path, "saved_models", config.model_dir_name(best=True)
),
)
github_url = "https://github.com/sgoodfriend/rl-algo-impls"
commit_hash = run_metadata.get("git", {}).get("commit", None)
env_id = runs[0].config.get("env_id") or runs[0].config["env"]
card_text = model_card_text(
algo,
env_id,
github_url,
commit_hash,
wandb_report_url,
table_data,
best_eval,
)
readme_filepath = os.path.join(repo_dir_path, "README.md")
os.remove(readme_filepath)
with open(readme_filepath, "w") as f:
f.write(card_text)
metadata = {
"library_name": "rl-algo-impls",
"tags": [
env_id,
algo,
"deep-reinforcement-learning",
"reinforcement-learning",
],
"model-index": [
{
"name": algo,
"results": [
{
"metrics": [
{
"type": "mean_reward",
"value": str(stats.score),
"name": "mean_reward",
}
],
"task": {
"type": "reinforcement-learning",
"name": "reinforcement-learning",
},
"dataset": {
"name": env_id,
"type": env_id,
},
}
],
}
],
}
metadata_save(readme_filepath, metadata)
video_env = VecEpisodeRecorder(
make_eval_env(
config,
EnvHyperparams(**config.env_hyperparams),
override_hparams={"n_envs": 1},
normalize_load_path=model_path,
),
os.path.join(repo_dir_path, "replay"),
max_video_length=3600,
)
evaluate(
video_env,
policy,
1,
deterministic=config.eval_hyperparams.get("deterministic", True),
)
api = HfApi()
huggingface_user = huggingface_user or api.whoami()["name"]
huggingface_repo = f"{huggingface_user}/{repo_name}"
api.create_repo(
token=huggingface_token,
repo_id=huggingface_repo,
private=False,
exist_ok=True,
)
repo_url = upload_folder(
repo_id=huggingface_repo,
folder_path=repo_dir_path,
path_in_repo="",
commit_message=f"{algo.upper()} playing {env_id} from {github_url}/tree/{commit_hash}",
token=huggingface_token,
delete_patterns="*",
)
print(f"Pushed model to the hub: {repo_url}")
def huggingface_publish():
parser = argparse.ArgumentParser()
parser.add_argument(
"--wandb-run-paths",
type=str,
nargs="+",
help="Run paths of the form entity/project/run_id",
)
parser.add_argument("--wandb-report-url", type=str, help="Link to WandB report")
parser.add_argument(
"--huggingface-user",
type=str,
help="Huggingface user or team to upload model cards",
default=None,
)
parser.add_argument(
"--virtual-display", action="store_true", help="Use headless virtual display"
)
args = parser.parse_args()
print(args)
publish(**vars(args))
if __name__ == "__main__":
huggingface_publish() | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/huggingface_publish.py | 0.57344 | 0.162679 | huggingface_publish.py | pypi |
import dataclasses
from dataclasses import dataclass
from typing import List, Optional
import numpy as np
import wandb
from rl_algo_impls.runner.config import Config, EnvHyperparams, RunArgs
from rl_algo_impls.runner.evaluate import Evaluation
from rl_algo_impls.runner.running_utils import (
get_device,
load_hyperparams,
make_policy,
set_seeds,
)
from rl_algo_impls.runner.wandb_load import load_player
from rl_algo_impls.shared.callbacks.eval_callback import evaluate
from rl_algo_impls.shared.vec_env import make_eval_env
from rl_algo_impls.wrappers.vec_episode_recorder import VecEpisodeRecorder
@dataclass
class SelfplayEvalArgs(RunArgs):
# Either wandb_run_paths or model_file_paths must have 2 elements in it.
wandb_run_paths: List[str] = dataclasses.field(default_factory=list)
model_file_paths: List[str] = dataclasses.field(default_factory=list)
render: bool = False
best: bool = True
n_envs: int = 1
n_episodes: int = 1
deterministic_eval: Optional[bool] = None
no_print_returns: bool = False
video_path: Optional[str] = None
def selfplay_evaluate(args: SelfplayEvalArgs, root_dir: str) -> Evaluation:
if args.wandb_run_paths:
api = wandb.Api()
args, config, player_1_model_path = load_player(
api, args.wandb_run_paths[0], args, root_dir, args.best
)
_, _, player_2_model_path = load_player(
api, args.wandb_run_paths[1], args, root_dir, args.best
)
elif args.model_file_paths:
hyperparams = load_hyperparams(args.algo, args.env)
config = Config(args, hyperparams, root_dir)
player_1_model_path, player_2_model_path = args.model_file_paths
else:
raise ValueError("Must specify 2 wandb_run_paths or 2 model_file_paths")
print(args)
set_seeds(args.seed, args.use_deterministic_algorithms)
env_make_kwargs = (
config.eval_hyperparams.get("env_overrides", {}).get("make_kwargs", {}).copy()
)
env_make_kwargs["num_selfplay_envs"] = args.n_envs * 2
env = make_eval_env(
config,
EnvHyperparams(**config.env_hyperparams),
override_hparams={
"n_envs": args.n_envs,
"selfplay_bots": {
player_2_model_path: args.n_envs,
},
"self_play_kwargs": {
"num_old_policies": 0,
"save_steps": np.inf,
"swap_steps": np.inf,
"bot_always_player_2": True,
},
"bots": None,
"make_kwargs": env_make_kwargs,
},
render=args.render,
normalize_load_path=player_1_model_path,
)
if args.video_path:
env = VecEpisodeRecorder(
env, args.video_path, max_video_length=18000, num_episodes=args.n_episodes
)
device = get_device(config, env)
policy = make_policy(
config,
env,
device,
load_path=player_1_model_path,
**config.policy_hyperparams,
).eval()
deterministic = (
args.deterministic_eval
if args.deterministic_eval is not None
else config.eval_hyperparams.get("deterministic", True)
)
return Evaluation(
policy,
evaluate(
env,
policy,
args.n_episodes,
render=args.render,
deterministic=deterministic,
print_returns=not args.no_print_returns,
),
config,
) | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/runner/selfplay_evaluate.py | 0.726134 | 0.21566 | selfplay_evaluate.py | pypi |
import os
import shutil
from dataclasses import dataclass
from typing import NamedTuple, Optional
from rl_algo_impls.runner.config import Config, EnvHyperparams, Hyperparams, RunArgs
from rl_algo_impls.runner.running_utils import (
get_device,
load_hyperparams,
make_policy,
set_seeds,
)
from rl_algo_impls.shared.callbacks.eval_callback import evaluate
from rl_algo_impls.shared.policy.policy import Policy
from rl_algo_impls.shared.stats import EpisodesStats
from rl_algo_impls.shared.vec_env import make_eval_env
@dataclass
class EvalArgs(RunArgs):
render: bool = True
best: bool = True
n_envs: Optional[int] = 1
n_episodes: int = 3
deterministic_eval: Optional[bool] = None
no_print_returns: bool = False
wandb_run_path: Optional[str] = None
class Evaluation(NamedTuple):
policy: Policy
stats: EpisodesStats
config: Config
def evaluate_model(args: EvalArgs, root_dir: str) -> Evaluation:
if args.wandb_run_path:
import wandb
api = wandb.Api()
run = api.run(args.wandb_run_path)
params = run.config
args.algo = params["algo"]
args.env = params["env"]
args.seed = params.get("seed", None)
args.use_deterministic_algorithms = params.get(
"use_deterministic_algorithms", True
)
config = Config(args, Hyperparams.from_dict_with_extra_fields(params), root_dir)
model_path = config.model_dir_path(best=args.best, downloaded=True)
model_archive_name = config.model_dir_name(best=args.best, extension=".zip")
run.file(model_archive_name).download()
if os.path.isdir(model_path):
shutil.rmtree(model_path)
shutil.unpack_archive(model_archive_name, model_path)
os.remove(model_archive_name)
else:
hyperparams = load_hyperparams(args.algo, args.env)
config = Config(args, hyperparams, root_dir)
model_path = config.model_dir_path(best=args.best)
print(args)
set_seeds(args.seed, args.use_deterministic_algorithms)
env = make_eval_env(
config,
EnvHyperparams(**config.env_hyperparams),
override_hparams={"n_envs": args.n_envs} if args.n_envs else None,
render=args.render,
normalize_load_path=model_path,
)
device = get_device(config, env)
policy = make_policy(
config,
env,
device,
load_path=model_path,
**config.policy_hyperparams,
).eval()
deterministic = (
args.deterministic_eval
if args.deterministic_eval is not None
else config.eval_hyperparams.get("deterministic", True)
)
return Evaluation(
policy,
evaluate(
env,
policy,
args.n_episodes,
render=args.render,
deterministic=deterministic,
print_returns=not args.no_print_returns,
),
config,
) | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/runner/evaluate.py | 0.680666 | 0.183832 | evaluate.py | pypi |
import dataclasses
import inspect
import itertools
import os
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict, List, Optional, Type, TypeVar, Union
RunArgsSelf = TypeVar("RunArgsSelf", bound="RunArgs")
@dataclass
class RunArgs:
algo: str
env: str
seed: Optional[int] = None
use_deterministic_algorithms: bool = True
@classmethod
def expand_from_dict(
cls: Type[RunArgsSelf], d: Dict[str, Any]
) -> List[RunArgsSelf]:
maybe_listify = lambda v: [v] if isinstance(v, str) or isinstance(v, int) else v
algos = maybe_listify(d["algo"])
envs = maybe_listify(d["env"])
seeds = maybe_listify(d["seed"])
args = []
for algo, env, seed in itertools.product(algos, envs, seeds):
_d = d.copy()
_d.update({"algo": algo, "env": env, "seed": seed})
args.append(cls(**_d))
return args
@dataclass
class EnvHyperparams:
env_type: str = "gymvec"
n_envs: int = 1
frame_stack: int = 1
make_kwargs: Optional[Dict[str, Any]] = None
no_reward_timeout_steps: Optional[int] = None
no_reward_fire_steps: Optional[int] = None
vec_env_class: str = "sync"
normalize: bool = False
normalize_kwargs: Optional[Dict[str, Any]] = None
rolling_length: int = 100
train_record_video: bool = False
video_step_interval: Union[int, float] = 1_000_000
initial_steps_to_truncate: Optional[int] = None
clip_atari_rewards: bool = True
normalize_type: Optional[str] = None
mask_actions: bool = False
bots: Optional[Dict[str, int]] = None
self_play_kwargs: Optional[Dict[str, Any]] = None
selfplay_bots: Optional[Dict[str, int]] = None
HyperparamsSelf = TypeVar("HyperparamsSelf", bound="Hyperparams")
@dataclass
class Hyperparams:
device: str = "auto"
n_timesteps: Union[int, float] = 100_000
env_hyperparams: Dict[str, Any] = dataclasses.field(default_factory=dict)
policy_hyperparams: Dict[str, Any] = dataclasses.field(default_factory=dict)
algo_hyperparams: Dict[str, Any] = dataclasses.field(default_factory=dict)
eval_hyperparams: Dict[str, Any] = dataclasses.field(default_factory=dict)
env_id: Optional[str] = None
additional_keys_to_log: List[str] = dataclasses.field(default_factory=list)
reward_decay_callback: bool = False
reward_decay_callback_kwargs: Dict[str, Any] = dataclasses.field(
default_factory=dict
)
lux_hyperparam_transitions_kwargs: Dict[str, Any] = dataclasses.field(
default_factory=dict
)
@classmethod
def from_dict_with_extra_fields(
cls: Type[HyperparamsSelf], d: Dict[str, Any]
) -> HyperparamsSelf:
return cls(
**{k: v for k, v in d.items() if k in inspect.signature(cls).parameters}
)
@dataclass
class Config:
args: RunArgs
hyperparams: Hyperparams
root_dir: str
run_id: str = datetime.now().isoformat()
def seed(self, training: bool = True) -> Optional[int]:
seed = self.args.seed
if training or seed is None:
return seed
return seed + self.env_hyperparams.get("n_envs", 1)
@property
def device(self) -> str:
return self.hyperparams.device
@property
def n_timesteps(self) -> int:
return int(self.hyperparams.n_timesteps)
@property
def env_hyperparams(self) -> Dict[str, Any]:
return self.hyperparams.env_hyperparams
@property
def policy_hyperparams(self) -> Dict[str, Any]:
return self.hyperparams.policy_hyperparams
@property
def algo_hyperparams(self) -> Dict[str, Any]:
return self.hyperparams.algo_hyperparams
@property
def eval_hyperparams(self) -> Dict[str, Any]:
return self.hyperparams.eval_hyperparams
def eval_callback_params(self) -> Dict[str, Any]:
eval_hyperparams = self.eval_hyperparams.copy()
if "env_overrides" in eval_hyperparams:
del eval_hyperparams["env_overrides"]
return eval_hyperparams
@property
def algo(self) -> str:
return self.args.algo
@property
def env_id(self) -> str:
return self.hyperparams.env_id or self.args.env
@property
def additional_keys_to_log(self) -> List[str]:
return self.hyperparams.additional_keys_to_log
def model_name(self, include_seed: bool = True) -> str:
# Use arg env name instead of environment name
parts = [self.algo, self.args.env]
if include_seed and self.args.seed is not None:
parts.append(f"S{self.args.seed}")
# Assume that the custom arg name already has the necessary information
if not self.hyperparams.env_id:
make_kwargs = self.env_hyperparams.get("make_kwargs", {})
if make_kwargs:
for k, v in make_kwargs.items():
if type(v) == bool and v:
parts.append(k)
elif type(v) == int and v:
parts.append(f"{k}{v}")
else:
parts.append(str(v))
return "-".join(parts)
def run_name(self, include_seed: bool = True) -> str:
parts = [self.model_name(include_seed=include_seed), self.run_id]
return "-".join(parts)
@property
def saved_models_dir(self) -> str:
return os.path.join(self.root_dir, "saved_models")
@property
def downloaded_models_dir(self) -> str:
return os.path.join(self.root_dir, "downloaded_models")
def model_dir_name(
self,
best: bool = False,
extension: str = "",
) -> str:
return self.model_name() + ("-best" if best else "") + extension
def model_dir_path(self, best: bool = False, downloaded: bool = False) -> str:
return os.path.join(
self.saved_models_dir if not downloaded else self.downloaded_models_dir,
self.model_dir_name(best=best),
)
@property
def runs_dir(self) -> str:
return os.path.join(self.root_dir, "runs")
@property
def tensorboard_summary_path(self) -> str:
return os.path.join(self.runs_dir, self.run_name())
@property
def logs_path(self) -> str:
return os.path.join(self.runs_dir, f"log.yml")
@property
def videos_dir(self) -> str:
return os.path.join(self.root_dir, "videos")
@property
def video_prefix(self) -> str:
return os.path.join(self.videos_dir, self.model_name())
@property
def videos_path(self) -> str:
return os.path.join(self.videos_dir, self.model_name()) | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/runner/config.py | 0.856362 | 0.219066 | config.py | pypi |
import argparse
import json
import logging
import os
import random
from dataclasses import asdict
from pathlib import Path
from typing import Dict, Optional, Type, Union
import gym
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.backends.cudnn
import yaml
from gym.spaces import Box, Discrete
from torch.utils.tensorboard.writer import SummaryWriter
from rl_algo_impls.a2c.a2c import A2C
from rl_algo_impls.dqn.dqn import DQN
from rl_algo_impls.dqn.policy import DQNPolicy
from rl_algo_impls.ppo.ppo import PPO
from rl_algo_impls.runner.config import Config, Hyperparams
from rl_algo_impls.runner.wandb_load import load_player
from rl_algo_impls.shared.algorithm import Algorithm
from rl_algo_impls.shared.callbacks.eval_callback import EvalCallback
from rl_algo_impls.shared.policy.actor_critic import ActorCritic
from rl_algo_impls.shared.policy.policy import Policy
from rl_algo_impls.shared.vec_env.utils import import_for_env_id, is_microrts
from rl_algo_impls.vpg.policy import VPGActorCritic
from rl_algo_impls.vpg.vpg import VanillaPolicyGradient
from rl_algo_impls.wrappers.vectorable_wrapper import VecEnv, single_observation_space
ALGOS: Dict[str, Type[Algorithm]] = {
"dqn": DQN,
"vpg": VanillaPolicyGradient,
"ppo": PPO,
"a2c": A2C,
}
POLICIES: Dict[str, Type[Policy]] = {
"dqn": DQNPolicy,
"vpg": VPGActorCritic,
"ppo": ActorCritic,
"a2c": ActorCritic,
}
HYPERPARAMS_PATH = "hyperparams"
def base_parser(multiple: bool = True) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument(
"--algo",
default=["dqn"],
type=str,
choices=list(ALGOS.keys()),
nargs="+" if multiple else 1,
help="Abbreviation(s) of algorithm(s)",
)
parser.add_argument(
"--env",
default=["CartPole-v1"],
type=str,
nargs="+" if multiple else 1,
help="Name of environment(s) in gym",
)
parser.add_argument(
"--seed",
default=[1],
type=int,
nargs="*" if multiple else "?",
help="Seeds to run experiment. Unset will do one run with no set seed",
)
return parser
def load_hyperparams(algo: str, env_id: str) -> Hyperparams:
root_path = Path(__file__).parent.parent
hyperparams_path = os.path.join(root_path, HYPERPARAMS_PATH, f"{algo}.yml")
with open(hyperparams_path, "r") as f:
hyperparams_dict = yaml.safe_load(f)
if env_id in hyperparams_dict:
return Hyperparams(**hyperparams_dict[env_id])
import_for_env_id(env_id)
spec = gym.spec(env_id)
entry_point_name = str(spec.entry_point) # type: ignore
if "AtariEnv" in entry_point_name and "_atari" in hyperparams_dict:
return Hyperparams(**hyperparams_dict["_atari"])
elif "gym_microrts" in entry_point_name and "_microrts" in hyperparams_dict:
return Hyperparams(**hyperparams_dict["_microrts"])
else:
raise ValueError(f"{env_id} not specified in {algo} hyperparameters file")
def get_device(config: Config, env: VecEnv) -> torch.device:
device = config.device
# cuda by default
if device == "auto":
device = "cuda"
# Apple MPS is a second choice (sometimes)
if device == "cuda" and not torch.cuda.is_available():
device = "mps"
# If no MPS, fallback to cpu
if device == "mps" and not torch.backends.mps.is_available():
device = "cpu"
# Simple environments like Discreet and 1-D Boxes might also be better
# served with the CPU.
if device == "mps":
obs_space = single_observation_space(env)
if isinstance(obs_space, Discrete):
device = "cpu"
elif isinstance(obs_space, Box) and len(obs_space.shape) == 1:
device = "cpu"
if is_microrts(config):
device = "cpu"
logging.info(f"Device: {device}")
return torch.device(device)
def set_seeds(seed: Optional[int], use_deterministic_algorithms: bool) -> None:
if seed is None:
return
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(use_deterministic_algorithms)
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
# Stop warning and it would introduce stochasticity if I was using TF
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
def make_policy(
config: Config,
env: VecEnv,
device: torch.device,
load_path: Optional[str] = None,
load_run_path: Optional[str] = None,
load_run_path_best: bool = True,
**kwargs,
) -> Policy:
policy = POLICIES[config.algo](env, **kwargs).to(device)
if load_run_path:
import wandb
api = wandb.Api()
_, _, load_path = load_player(
api, load_run_path, config.args, config.root_dir, load_run_path_best
)
assert load_path
if load_path:
policy.load(load_path)
return policy
def plot_eval_callback(callback: EvalCallback, tb_writer: SummaryWriter, run_name: str):
figure = plt.figure()
cumulative_steps = [
(idx + 1) * callback.step_freq for idx in range(len(callback.stats))
]
plt.plot(
cumulative_steps,
[s.score.mean for s in callback.stats],
"b-",
label="mean",
)
plt.plot(
cumulative_steps,
[s.score.mean - s.score.std for s in callback.stats],
"g--",
label="mean-std",
)
plt.fill_between(
cumulative_steps,
[s.score.min for s in callback.stats], # type: ignore
[s.score.max for s in callback.stats], # type: ignore
facecolor="cyan",
label="range",
)
plt.xlabel("Steps")
plt.ylabel("Score")
plt.legend()
plt.title(f"Eval {run_name}")
tb_writer.add_figure("eval", figure)
Scalar = Union[bool, str, float, int, None]
def hparam_dict(
hyperparams: Hyperparams, args: Dict[str, Union[Scalar, list]]
) -> Dict[str, Scalar]:
flattened = args.copy()
for k, v in flattened.items():
if isinstance(v, list):
flattened[k] = json.dumps(v)
for k, v in asdict(hyperparams).items():
if isinstance(v, dict):
for sk, sv in v.items():
key = f"{k}/{sk}"
if isinstance(sv, dict) or isinstance(sv, list):
flattened[key] = str(sv)
else:
flattened[key] = sv
elif isinstance(v, list):
flattened[k] = json.dumps(v)
else:
flattened[k] = v # type: ignore
return flattened # type: ignore | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/runner/running_utils.py | 0.751922 | 0.202502 | running_utils.py | pypi |
import copy
import logging
import random
from collections import deque
from typing import List, NamedTuple, Optional, TypeVar
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from torch.utils.tensorboard.writer import SummaryWriter
from rl_algo_impls.dqn.policy import DQNPolicy
from rl_algo_impls.shared.algorithm import Algorithm
from rl_algo_impls.shared.callbacks import Callback
from rl_algo_impls.shared.schedule import linear_schedule
from rl_algo_impls.wrappers.vectorable_wrapper import VecEnv, VecEnvObs
class Transition(NamedTuple):
obs: np.ndarray
action: np.ndarray
reward: float
done: bool
next_obs: np.ndarray
class Batch(NamedTuple):
obs: np.ndarray
actions: np.ndarray
rewards: np.ndarray
dones: np.ndarray
next_obs: np.ndarray
class ReplayBuffer:
def __init__(self, num_envs: int, maxlen: int) -> None:
self.num_envs = num_envs
self.buffer = deque(maxlen=maxlen)
def add(
self,
obs: VecEnvObs,
action: np.ndarray,
reward: np.ndarray,
done: np.ndarray,
next_obs: VecEnvObs,
) -> None:
assert isinstance(obs, np.ndarray)
assert isinstance(next_obs, np.ndarray)
for i in range(self.num_envs):
self.buffer.append(
Transition(obs[i], action[i], reward[i], done[i], next_obs[i])
)
def sample(self, batch_size: int) -> Batch:
ts = random.sample(self.buffer, batch_size)
return Batch(
obs=np.array([t.obs for t in ts]),
actions=np.array([t.action for t in ts]),
rewards=np.array([t.reward for t in ts]),
dones=np.array([t.done for t in ts]),
next_obs=np.array([t.next_obs for t in ts]),
)
def __len__(self) -> int:
return len(self.buffer)
DQNSelf = TypeVar("DQNSelf", bound="DQN")
class DQN(Algorithm):
def __init__(
self,
policy: DQNPolicy,
env: VecEnv,
device: torch.device,
tb_writer: SummaryWriter,
learning_rate: float = 1e-4,
buffer_size: int = 1_000_000,
learning_starts: int = 50_000,
batch_size: int = 32,
tau: float = 1.0,
gamma: float = 0.99,
train_freq: int = 4,
gradient_steps: int = 1,
target_update_interval: int = 10_000,
exploration_fraction: float = 0.1,
exploration_initial_eps: float = 1.0,
exploration_final_eps: float = 0.05,
max_grad_norm: float = 10.0,
) -> None:
super().__init__(policy, env, device, tb_writer)
self.policy = policy
self.optimizer = Adam(self.policy.q_net.parameters(), lr=learning_rate)
self.target_q_net = copy.deepcopy(self.policy.q_net).to(self.device)
self.target_q_net.train(False)
self.tau = tau
self.target_update_interval = target_update_interval
self.replay_buffer = ReplayBuffer(self.env.num_envs, buffer_size)
self.batch_size = batch_size
self.learning_starts = learning_starts
self.train_freq = train_freq
self.gradient_steps = gradient_steps
self.gamma = gamma
self.exploration_eps_schedule = linear_schedule(
exploration_initial_eps,
exploration_final_eps,
end_fraction=exploration_fraction,
)
self.max_grad_norm = max_grad_norm
def learn(
self: DQNSelf, total_timesteps: int, callbacks: Optional[List[Callback]] = None
) -> DQNSelf:
self.policy.train(True)
obs = self.env.reset()
obs = self._collect_rollout(self.learning_starts, obs, 1)
learning_steps = total_timesteps - self.learning_starts
timesteps_elapsed = 0
steps_since_target_update = 0
while timesteps_elapsed < learning_steps:
progress = timesteps_elapsed / learning_steps
eps = self.exploration_eps_schedule(progress)
obs = self._collect_rollout(self.train_freq, obs, eps)
rollout_steps = self.train_freq
timesteps_elapsed += rollout_steps
for _ in range(
self.gradient_steps if self.gradient_steps > 0 else self.train_freq
):
self.train()
steps_since_target_update += rollout_steps
if steps_since_target_update >= self.target_update_interval:
self._update_target()
steps_since_target_update = 0
if callbacks:
if not all(
c.on_step(timesteps_elapsed=rollout_steps) for c in callbacks
):
logging.info(
f"Callback terminated training at {timesteps_elapsed} timesteps"
)
break
return self
def train(self) -> None:
if len(self.replay_buffer) < self.batch_size:
return
o, a, r, d, next_o = self.replay_buffer.sample(self.batch_size)
o = torch.as_tensor(o, device=self.device)
a = torch.as_tensor(a, device=self.device).unsqueeze(1)
r = torch.as_tensor(r, dtype=torch.float32, device=self.device)
d = torch.as_tensor(d, dtype=torch.long, device=self.device)
next_o = torch.as_tensor(next_o, device=self.device)
with torch.no_grad():
target = r + (1 - d) * self.gamma * self.target_q_net(next_o).max(1).values
current = self.policy.q_net(o).gather(dim=1, index=a).squeeze(1)
loss = F.smooth_l1_loss(current, target)
self.optimizer.zero_grad()
loss.backward()
if self.max_grad_norm:
nn.utils.clip_grad_norm_(self.policy.q_net.parameters(), self.max_grad_norm)
self.optimizer.step()
def _collect_rollout(self, timesteps: int, obs: VecEnvObs, eps: float) -> VecEnvObs:
for _ in range(0, timesteps, self.env.num_envs):
action = self.policy.act(obs, eps, deterministic=False)
next_obs, reward, done, _ = self.env.step(action)
self.replay_buffer.add(obs, action, reward, done, next_obs)
obs = next_obs
return obs
def _update_target(self) -> None:
for target_param, param in zip(
self.target_q_net.parameters(), self.policy.q_net.parameters()
):
target_param.data.copy_(
self.tau * param.data + (1 - self.tau) * target_param.data
) | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/dqn/dqn.py | 0.90714 | 0.445771 | dqn.py | pypi |
import numpy as np
from gym.wrappers.monitoring.video_recorder import VideoRecorder
from rl_algo_impls.wrappers.vectorable_wrapper import (
VecEnvObs,
VecEnvStepReturn,
VectorableWrapper,
)
class VecEpisodeRecorder(VectorableWrapper):
def __init__(
self, env, base_path: str, max_video_length: int = 3600, num_episodes: int = 1
):
super().__init__(env)
self.base_path = base_path
self.max_video_length = max_video_length
self.num_episodes = num_episodes
self.video_recorder = None
self.recorded_frames = 0
self.num_completed = 0
def step(self, actions: np.ndarray) -> VecEnvStepReturn:
obs, rew, dones, infos = self.env.step(actions)
# Using first env to record episodes
if self.video_recorder:
self.video_recorder.capture_frame()
self.recorded_frames += 1
if dones[0]:
self.num_completed += 1
if dones[0] and infos[0].get("episode"):
episode_info = {
k: v.item() if hasattr(v, "item") else v
for k, v in infos[0]["episode"].items()
}
if "episodes" not in self.video_recorder.metadata:
self.video_recorder.metadata["episodes"] = []
self.video_recorder.metadata["episodes"].append(episode_info)
if (
self.num_completed == self.num_episodes
or self.recorded_frames > self.max_video_length
):
self._close_video_recorder()
return obs, rew, dones, infos
def reset(self) -> VecEnvObs:
obs = self.env.reset()
self._start_video_recorder()
return obs
def _start_video_recorder(self) -> None:
self._close_video_recorder()
self.video_recorder = VideoRecorder(
self.env,
base_path=self.base_path,
)
self.video_recorder.capture_frame()
self.recorded_frames = 1
def _close_video_recorder(self) -> None:
if self.video_recorder:
self.video_recorder.close()
self.video_recorder = None | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/wrappers/vec_episode_recorder.py | 0.712632 | 0.302893 | vec_episode_recorder.py | pypi |
from typing import Tuple, TypeVar
import gym
import numpy as np
from numpy.typing import NDArray
from rl_algo_impls.wrappers.vectorable_wrapper import (
VectorableWrapper,
single_observation_space,
)
RunningMeanStdSelf = TypeVar("RunningMeanStdSelf", bound="RunningMeanStd")
class RunningMeanStd:
def __init__(self, episilon: float = 1e-4, shape: Tuple[int, ...] = ()) -> None:
self.mean = np.zeros(shape, np.float64)
self.var = np.ones(shape, np.float64)
self.count = episilon
def update(self, x: NDArray) -> None:
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
delta = batch_mean - self.mean
total_count = self.count + batch_count
self.mean += delta * batch_count / total_count
m_a = self.var * self.count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * self.count * batch_count / total_count
self.var = M2 / total_count
self.count = total_count
def save(self, path: str) -> None:
np.savez_compressed(
path,
mean=self.mean,
var=self.var,
count=self.count,
)
def load(self, path: str) -> None:
data = np.load(path)
self.mean = data["mean"]
self.var = data["var"]
self.count = data["count"]
def load_from(self: RunningMeanStdSelf, existing: RunningMeanStdSelf) -> None:
self.mean = np.copy(existing.mean)
self.var = np.copy(existing.var)
self.count = np.copy(existing.count)
NormalizeObservationSelf = TypeVar(
"NormalizeObservationSelf", bound="NormalizeObservation"
)
class NormalizeObservation(VectorableWrapper):
def __init__(
self,
env: gym.Env,
training: bool = True,
epsilon: float = 1e-8,
clip: float = 10.0,
) -> None:
super().__init__(env)
self.rms = RunningMeanStd(shape=single_observation_space(env).shape)
self.training = training
self.epsilon = epsilon
self.clip = clip
def step(self, action):
obs, reward, done, info = self.env.step(action)
return self.normalize(obs), reward, done, info
def reset(self, **kwargs):
obs = self.env.reset(**kwargs)
return self.normalize(obs)
def normalize(self, obs: NDArray) -> NDArray:
obs_array = np.array([obs]) if not self.is_vector_env else obs
if self.training:
self.rms.update(obs_array)
normalized = np.clip(
(obs_array - self.rms.mean) / np.sqrt(self.rms.var + self.epsilon),
-self.clip,
self.clip,
)
return normalized[0] if not self.is_vector_env else normalized
def save(self, path: str) -> None:
self.rms.save(path)
def load(self, path: str) -> None:
self.rms.load(path)
def load_from(
self: NormalizeObservationSelf, existing: NormalizeObservationSelf
) -> None:
self.rms.load_from(existing.rms)
NormalizeRewardSelf = TypeVar("NormalizeRewardSelf", bound="NormalizeReward")
class NormalizeReward(VectorableWrapper):
def __init__(
self,
env: gym.Env,
training: bool = True,
gamma: float = 0.99,
epsilon: float = 1e-8,
clip: float = 10.0,
) -> None:
super().__init__(env)
self.rms = RunningMeanStd(shape=())
self.training = training
self.gamma = gamma
self.epsilon = epsilon
self.clip = clip
self.returns = np.zeros(self.num_envs)
def step(self, action):
obs, reward, done, info = self.env.step(action)
if not self.is_vector_env:
reward = np.array([reward])
reward = self.normalize(reward)
if not self.is_vector_env:
reward = reward[0]
dones = done if self.is_vector_env else np.array([done])
self.returns[dones] = 0
return obs, reward, done, info
def reset(self, **kwargs):
self.returns = np.zeros(self.num_envs)
return self.env.reset(**kwargs)
def normalize(self, rewards):
if self.training:
self.returns = self.returns * self.gamma + rewards
self.rms.update(self.returns)
return np.clip(
rewards / np.sqrt(self.rms.var + self.epsilon), -self.clip, self.clip
)
def save(self, path: str) -> None:
self.rms.save(path)
def load(self, path: str) -> None:
self.rms.load(path)
def load_from(self: NormalizeRewardSelf, existing: NormalizeRewardSelf) -> None:
self.rms.load_from(existing.rms) | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/wrappers/normalize.py | 0.957048 | 0.429071 | normalize.py | pypi |
from typing import Tuple, Union
import gym
import numpy as np
from gym.wrappers.monitoring.video_recorder import VideoRecorder
from rl_algo_impls.wrappers.vectorable_wrapper import VectorableWrapper
ObsType = Union[np.ndarray, dict]
ActType = Union[int, float, np.ndarray, dict]
class EpisodeRecordVideo(VectorableWrapper):
def __init__(
self,
env: gym.Env,
video_path_prefix: str,
step_increment: int = 1,
video_step_interval: int = 1_000_000,
max_video_length: int = 3600,
) -> None:
super().__init__(env)
self.video_path_prefix = video_path_prefix
self.step_increment = step_increment
self.video_step_interval = video_step_interval
self.max_video_length = max_video_length
self.total_steps = 0
self.next_record_video_step = 0
self.video_recorder = None
self.recorded_frames = 0
def step(self, action: ActType) -> Tuple[ObsType, float, bool, dict]:
obs, rew, done, info = self.env.step(action)
self.total_steps += self.step_increment
# Using first env to record episodes
if self.video_recorder:
self.video_recorder.capture_frame()
self.recorded_frames += 1
if info.get("episode"):
episode_info = {
k: v.item() if hasattr(v, "item") else v
for k, v in info["episode"].items()
}
self.video_recorder.metadata["episode"] = episode_info
if self.recorded_frames > self.max_video_length:
self._close_video_recorder()
return obs, rew, done, info
def reset(self, **kwargs) -> ObsType:
obs = self.env.reset(**kwargs)
if self.video_recorder:
self._close_video_recorder()
elif self.total_steps >= self.next_record_video_step:
self._start_video_recorder()
return obs
def _start_video_recorder(self) -> None:
self._close_video_recorder()
video_path = f"{self.video_path_prefix}-{self.next_record_video_step}"
self.video_recorder = VideoRecorder(
self.env,
base_path=video_path,
metadata={"step": self.total_steps},
)
self.video_recorder.capture_frame()
self.recorded_frames = 1
self.next_record_video_step += self.video_step_interval
def _close_video_recorder(self) -> None:
if self.video_recorder:
self.video_recorder.close()
self.video_recorder = None
self.recorded_frames = 0 | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/wrappers/episode_record_video.py | 0.862018 | 0.28961 | episode_record_video.py | pypi |
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Type, TypeVar
import numpy as np
from gym import Wrapper
from gym.spaces import Box, MultiDiscrete
from gym.spaces import Tuple as TupleSpace
from gym.vector.utils import batch_space
from luxai_s2.env import LuxAI_S2
from luxai_s2.state import ObservationStateDict
from rl_algo_impls.shared.lux.actions import (
ACTION_SIZES,
enqueued_action_from_obs,
to_lux_actions,
)
from rl_algo_impls.shared.lux.early import bid_action, place_factory_action
from rl_algo_impls.shared.lux.observation import observation_and_action_mask
from rl_algo_impls.shared.lux.stats import StatsTracking
from rl_algo_impls.shared.schedule import lerp
LuxRewardWeightsSelf = TypeVar("LuxRewardWeightsSelf", bound="LuxRewardWeights")
class LuxRewardWeights(NamedTuple):
# End-game rewards
win_loss: float = 0
score_vs_opponent: float = 0 # clipped between +/- 1
# Change in value stats
ice_generation: float = 0
ore_generation: float = 0
water_generation: float = 0
metal_generation: float = 0
power_generation: float = 0
lichen_delta: float = 0
built_light: float = 0
built_heavy: float = 0
lost_factory: float = 0
# Current value stats
factories_alive: float = 0
heavies_alive: float = 0
lights_alive: float = 0
# Change in value stats vs opponent
lichen_delta_vs_opponent: float = 0
@classmethod
def sparse(cls: Type[LuxRewardWeightsSelf]) -> LuxRewardWeightsSelf:
return cls(win_loss=1)
@classmethod
def default_start(cls: Type[LuxRewardWeightsSelf]) -> LuxRewardWeightsSelf:
return cls(
ice_generation=0.01, # 2 for a day of water for factory, 0.2 for a heavy dig action
ore_generation=2e-3, # 1 for building a heavy robot, 0.04 for a heavy dig action
water_generation=0.04, # 2 for a day of water for factory
metal_generation=0.01, # 1 for building a heavy robot, 0.04 for a heavy dig action
power_generation=0.0004, # factory 1/day, heavy 0.12/day, light 0.012/day, lichen 0.02/day
lost_factory=-1,
)
@classmethod
def lerp(
cls: Type[LuxRewardWeightsSelf],
start: Dict[str, float],
end: Dict[str, float],
progress: float,
) -> LuxRewardWeightsSelf:
return cls(*lerp(np.array(cls(**start)), np.array(cls(**end)), progress))
class LuxEnvGridnet(Wrapper):
def __init__(
self,
env,
bid_std_dev: float = 5,
reward_weights: Optional[Dict[str, float]] = None,
) -> None:
super().__init__(env)
self.bid_std_dev = bid_std_dev
if reward_weights is None:
self.reward_weights = LuxRewardWeights.default_start()
else:
self.reward_weights = LuxRewardWeights(**reward_weights)
self.map_size = self.unwrapped.env_cfg.map_size
self.stats = StatsTracking()
self.num_map_tiles = self.map_size * self.map_size
self.action_plane_space = MultiDiscrete(ACTION_SIZES)
self.single_action_space = MultiDiscrete(
np.array(ACTION_SIZES * self.num_map_tiles).flatten().tolist()
)
self.action_space = TupleSpace((self.single_action_space,) * 2)
self.action_mask_shape = (
self.num_map_tiles,
self.action_plane_space.nvec.sum(),
)
observation_sample = self.reset()
single_obs_shape = observation_sample.shape[1:]
self.single_observation_space = Box(
low=0,
high=1,
shape=single_obs_shape,
dtype=np.float32,
)
self.observation_space = batch_space(self.single_observation_space, n=2)
self._enqueued_actions: Dict[str, Optional[np.ndarray]] = {}
self._action_mask: Optional[np.ndarray] = None
@property
def unwrapped(self) -> LuxAI_S2:
unwrapped = super().unwrapped
assert isinstance(unwrapped, LuxAI_S2)
return unwrapped
def step(
self, action: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, List[Dict[str, Any]],]:
env = self.unwrapped
lux_actions = self._to_lux_actions(action)
lux_obs, lux_rewards, done, info = env.step(lux_actions)
all_done = all(done.values())
rewards = self._from_lux_rewards(lux_rewards, all_done, info)
if all_done:
obs = self.reset()
else:
assert not any(done.values()), "All or none should be done"
self._enqueued_actions = {
u_id: enqueued_action_from_obs(u["action_queue"])
for p in self.agents
for u_id, u in lux_obs[p]["units"][p].items()
}
obs = self._from_lux_observation(lux_obs)
return (
obs,
rewards,
np.array([done[p] for p in self.agents]),
[info[p] for p in self.agents],
)
def reset(self) -> np.ndarray:
lux_obs, self.agents = reset_and_early_phase(self.unwrapped, self.bid_std_dev)
self._enqueued_actions = {}
self.stats.reset(self.unwrapped)
return self._from_lux_observation(lux_obs)
def _from_lux_observation(
self, lux_obs: Dict[str, ObservationStateDict]
) -> np.ndarray:
observations = []
action_masks = []
for player in self.agents:
obs, action_mask = observation_and_action_mask(
player,
lux_obs[player],
self.env.state,
self.action_mask_shape,
self._enqueued_actions,
)
observations.append(obs)
action_masks.append(action_mask)
self._action_mask = np.stack(action_masks)
return np.stack(observations)
def get_action_mask(self) -> np.ndarray:
assert self._action_mask is not None
return self._action_mask
def _to_lux_actions(self, actions: np.ndarray) -> Dict[str, Any]:
action_mask = self._action_mask
assert action_mask is not None
return {
p: to_lux_actions(
p,
self.env.state,
actions[p_idx],
action_mask[p_idx],
self._enqueued_actions,
self.stats.action_stats[p_idx],
)
for p_idx, p in enumerate(self.agents)
}
def _from_lux_rewards(
self, lux_rewards: Dict[str, float], done: bool, info: Dict[str, Any]
) -> np.ndarray:
agents = self.agents
player_opponent = tuple((p, opp) for p, opp in zip(agents, reversed(agents)))
if done:
_win_loss = np.array(
[
1
if lux_rewards[p] > lux_rewards[opp]
else (-1 if lux_rewards[p] < lux_rewards[opp] else 0)
for p, opp in player_opponent
]
)
_max_score_delta = (
1 / self.reward_weights.score_vs_opponent
if not np.isclose(self.reward_weights.score_vs_opponent, 0)
else np.inf
)
_score_delta = np.clip(
np.array(
[lux_rewards[p] - lux_rewards[opp] for p, opp in player_opponent]
),
-_max_score_delta,
_max_score_delta,
)
_done_rewards = np.concatenate(
[
np.expand_dims(_win_loss, axis=-1),
np.expand_dims(_score_delta, axis=-1),
],
axis=-1,
)
for idx, agent in enumerate(self.agents):
agent_stats = self.stats.agent_stats[idx]
info[agent]["stats"] = dict(
zip(agent_stats.NAMES, agent_stats.stats.tolist())
)
state_agent_stats = self.unwrapped.state.stats[agent]
actions_success = state_agent_stats["action_queue_updates_success"]
actions_total = state_agent_stats["action_queue_updates_total"]
info[agent]["stats"]["actions_success"] = actions_success
info[agent]["stats"]["actions_failed"] = actions_total - actions_success
info[agent]["stats"].update(
self.stats.action_stats[idx].stats_dict(prefix="actions_")
)
info[agent]["results"] = {
"WinLoss": _win_loss[idx],
"win": int(_win_loss[idx] == 1),
"loss": int(_win_loss[idx] == -1),
"score": lux_rewards[agent],
"score_delta": lux_rewards[agent]
- lux_rewards[player_opponent[idx][1]],
}
else:
_done_rewards = np.zeros((2, 2))
_stats_delta = self.stats.update()
raw_rewards = np.concatenate(
[
_done_rewards,
_stats_delta,
],
axis=-1,
)
reward_weights = np.array(self.reward_weights)
return np.sum(raw_rewards * reward_weights, axis=-1)
def bid_actions(agents: List[str], bid_std_dev: float) -> Dict[str, Any]:
return {
p: bid_action(bid_std_dev, f)
for p, f in zip(agents, ["AlphaStrike", "MotherMars"])
}
def place_factory_actions(env: LuxAI_S2) -> Dict[str, Any]:
actions = {
p: place_factory_action(env.state, env.agents, p_idx)
for p_idx, p in enumerate(env.agents)
}
actions = {k: v for k, v in actions.items() if k}
return actions
def reset_and_early_phase(
env: LuxAI_S2, bid_std_dev: float
) -> Tuple[Dict[str, ObservationStateDict], List[str]]:
env.reset()
agents = env.agents
env.step(bid_actions(env.agents, bid_std_dev))
while env.state.real_env_steps < 0:
env.step(place_factory_actions(env))
lux_obs, _, _, _ = env.step(place_initial_robot_action(env))
return lux_obs, agents
def place_initial_robot_action(env: LuxAI_S2) -> Dict[str, Any]:
return {p: {f: 1 for f in env.state.factories[p].keys()} for p in env.agents} | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/wrappers/lux_env_gridnet.py | 0.932638 | 0.449755 | lux_env_gridnet.py | pypi |
from typing import Optional, Tuple, Union
import gym
import numpy as np
from rl_algo_impls.wrappers.vectorable_wrapper import VectorableWrapper
ObsType = Union[np.ndarray, dict]
ActType = Union[int, float, np.ndarray, dict]
class NoRewardTimeout(VectorableWrapper):
def __init__(
self, env: gym.Env, n_timeout_steps: int, n_fire_steps: Optional[int] = None
) -> None:
super().__init__(env)
self.n_timeout_steps = n_timeout_steps
self.n_fire_steps = n_fire_steps
self.fire_act = None
if n_fire_steps is not None:
action_meanings = env.unwrapped.get_action_meanings()
assert "FIRE" in action_meanings
self.fire_act = action_meanings.index("FIRE")
self.steps_since_reward = 0
self.episode_score = 0
self.episode_step_idx = 0
def step(self, action: ActType) -> Tuple[ObsType, float, bool, dict]:
if self.steps_since_reward == self.n_fire_steps:
assert self.fire_act is not None
self.print_intervention("Force fire action")
action = self.fire_act
obs, rew, done, info = self.env.step(action)
self.episode_score += rew
self.episode_step_idx += 1
if rew != 0 or done:
self.steps_since_reward = 0
else:
self.steps_since_reward += 1
if self.steps_since_reward >= self.n_timeout_steps:
self.print_intervention("Early terminate")
done = True
return obs, rew, done, info
def reset(self, **kwargs) -> ObsType:
self._reset_state()
return self.env.reset(**kwargs)
def _reset_state(self) -> None:
self.steps_since_reward = 0
self.episode_score = 0
self.episode_step_idx = 0
def print_intervention(self, tag: str) -> None:
print(
f"{self.__class__.__name__}: {tag} | "
f"Score: {self.episode_score} | "
f"Length: {self.episode_step_idx}"
) | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/wrappers/no_reward_timeout.py | 0.917242 | 0.369201 | no_reward_timeout.py | pypi |
from typing import Any, Dict, List, Optional
import numpy as np
from rl_algo_impls.wrappers.vectorable_wrapper import (
VecEnvObs,
VecEnvStepReturn,
VectorableWrapper,
)
class MicrortsStatsRecorder(VectorableWrapper):
def __init__(
self, env, gamma: float, bots: Optional[Dict[str, int]] = None
) -> None:
super().__init__(env)
self.gamma = gamma
self.raw_rewards = [[] for _ in range(self.num_envs)]
self.bots = bots
if self.bots:
self.bot_at_index = [None] * (env.num_envs - sum(self.bots.values()))
for b, n in self.bots.items():
self.bot_at_index.extend([b] * n)
else:
self.bot_at_index = [None] * env.num_envs
def reset(self) -> VecEnvObs:
obs = super().reset()
self.raw_rewards = [[] for _ in range(self.num_envs)]
return obs
def step(self, actions: np.ndarray) -> VecEnvStepReturn:
obs, rews, dones, infos = self.env.step(actions)
self._update_infos(infos, dones)
return obs, rews, dones, infos
def _update_infos(self, infos: List[Dict[str, Any]], dones: np.ndarray) -> None:
for idx, info in enumerate(infos):
self.raw_rewards[idx].append(info["raw_rewards"])
for idx, (info, done) in enumerate(zip(infos, dones)):
if done:
raw_rewards = np.array(self.raw_rewards[idx]).sum(0)
raw_names = [str(rf) for rf in self.env.unwrapped.rfs]
info["microrts_stats"] = dict(zip(raw_names, raw_rewards))
winloss = raw_rewards[raw_names.index("WinLossRewardFunction")]
microrts_results = {
"win": int(winloss == 1),
"draw": int(winloss == 0),
"loss": int(winloss == -1),
}
bot = self.bot_at_index[idx]
if bot:
microrts_results.update(
{f"{k}_{bot}": v for k, v in microrts_results.items()}
)
info["microrts_results"] = microrts_results
self.raw_rewards[idx] = [] | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/wrappers/microrts_stats_recorder.py | 0.860852 | 0.428891 | microrts_stats_recorder.py | pypi |
from collections import deque
from typing import Any, Dict, List, Optional
import numpy as np
from torch.utils.tensorboard.writer import SummaryWriter
from rl_algo_impls.shared.stats import Episode, EpisodesStats
from rl_algo_impls.wrappers.vectorable_wrapper import (
VecEnvObs,
VecEnvStepReturn,
VectorableWrapper,
)
class EpisodeStatsWriter(VectorableWrapper):
def __init__(
self,
env,
tb_writer: SummaryWriter,
training: bool = True,
rolling_length=100,
additional_keys_to_log: Optional[List[str]] = None,
):
super().__init__(env)
self.training = training
self.tb_writer = tb_writer
self.rolling_length = rolling_length
self.episodes = deque(maxlen=rolling_length)
self.total_steps = 0
self.episode_cnt = 0
self.last_episode_cnt_print = 0
self.additional_keys_to_log = (
additional_keys_to_log if additional_keys_to_log is not None else []
)
def step(self, actions: np.ndarray) -> VecEnvStepReturn:
obs, rews, dones, infos = self.env.step(actions)
self._record_stats(infos)
return obs, rews, dones, infos
# Support for stable_baselines3.common.vec_env.VecEnvWrapper
def step_wait(self) -> VecEnvStepReturn:
obs, rews, dones, infos = self.env.step_wait()
self._record_stats(infos)
return obs, rews, dones, infos
def _record_stats(self, infos: List[Dict[str, Any]]) -> None:
self.total_steps += getattr(self.env, "num_envs", 1)
step_episodes = []
for info in infos:
ep_info = info.get("episode")
if ep_info:
additional_info = {k: info[k] for k in self.additional_keys_to_log}
episode = Episode(ep_info["r"], ep_info["l"], info=additional_info)
step_episodes.append(episode)
self.episodes.append(episode)
if step_episodes:
tag = "train" if self.training else "eval"
step_stats = EpisodesStats(step_episodes, simple=True)
step_stats.write_to_tensorboard(self.tb_writer, tag, self.total_steps)
rolling_stats = EpisodesStats(self.episodes)
rolling_stats.write_to_tensorboard(
self.tb_writer, f"{tag}_rolling", self.total_steps
)
self.episode_cnt += len(step_episodes)
if self.episode_cnt >= self.last_episode_cnt_print + self.rolling_length:
print(
f"Episode: {self.episode_cnt} | "
f"Steps: {self.total_steps} | "
f"{rolling_stats}"
)
self.last_episode_cnt_print += self.rolling_length
def reset(self) -> VecEnvObs:
return self.env.reset() | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/wrappers/episode_stats_writer.py | 0.844168 | 0.349449 | episode_stats_writer.py | pypi |
from typing import Any, Dict, Tuple, Union
import gym
import numpy as np
from rl_algo_impls.wrappers.vectorable_wrapper import VectorableWrapper
ObsType = Union[np.ndarray, dict]
ActType = Union[int, float, np.ndarray, dict]
class EpisodicLifeEnv(VectorableWrapper):
def __init__(self, env: gym.Env, training: bool = True, noop_act: int = 0) -> None:
super().__init__(env)
self.training = training
self.noop_act = noop_act
self.life_done_continue = False
self.lives = 0
def step(self, action: ActType) -> Tuple[ObsType, float, bool, Dict[str, Any]]:
obs, rew, done, info = self.env.step(action)
new_lives = self.env.unwrapped.ale.lives()
self.life_done_continue = new_lives != self.lives and not done
# Only if training should life-end be marked as done
if self.training and 0 < new_lives < self.lives:
done = True
self.lives = new_lives
return obs, rew, done, info
def reset(self, **kwargs) -> ObsType:
# If life_done_continue (but not game over), then a reset should just allow the
# game to progress to the next life.
if self.training and self.life_done_continue:
obs, _, _, _ = self.env.step(self.noop_act)
else:
obs = self.env.reset(**kwargs)
self.lives = self.env.unwrapped.ale.lives()
return obs
class FireOnLifeStarttEnv(VectorableWrapper):
def __init__(self, env: gym.Env, fire_act: int = 1) -> None:
super().__init__(env)
self.fire_act = fire_act
action_meanings = env.unwrapped.get_action_meanings()
assert action_meanings[fire_act] == "FIRE"
assert len(action_meanings) >= 3
self.lives = 0
self.fire_on_next_step = True
def step(self, action: ActType) -> Tuple[ObsType, float, bool, Dict[str, Any]]:
if self.fire_on_next_step:
action = self.fire_act
self.fire_on_next_step = False
obs, rew, done, info = self.env.step(action)
new_lives = self.env.unwrapped.ale.lives()
if 0 < new_lives < self.lives and not done:
self.fire_on_next_step = True
self.lives = new_lives
return obs, rew, done, info
def reset(self, **kwargs) -> ObsType:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(self.fire_act)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
self.fire_on_next_step = False
return obs
class ClipRewardEnv(VectorableWrapper):
def __init__(self, env: gym.Env, training: bool = True) -> None:
super().__init__(env)
self.training = training
def step(self, action: ActType) -> Tuple[ObsType, float, bool, Dict[str, Any]]:
obs, rew, done, info = self.env.step(action)
if self.training:
info["unclipped_reward"] = rew
rew = np.sign(rew)
return obs, rew, done, info | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/wrappers/atari_wrappers.py | 0.852736 | 0.473536 | atari_wrappers.py | pypi |
import random
from collections import deque
from typing import Any, Deque, Dict, List, Optional
import numpy as np
from rl_algo_impls.runner.config import Config
from rl_algo_impls.shared.policy.policy import Policy
from rl_algo_impls.wrappers.action_mask_wrapper import find_action_masker
from rl_algo_impls.wrappers.vectorable_wrapper import (
VecEnvObs,
VecEnvStepReturn,
VectorableWrapper,
)
class SelfPlayWrapper(VectorableWrapper):
next_obs: VecEnvObs
next_action_masks: Optional[np.ndarray]
def __init__(
self,
env,
config: Config,
num_old_policies: int = 0,
save_steps: int = 20_000,
swap_steps: int = 10_000,
window: int = 10,
swap_window_size: int = 2,
selfplay_bots: Optional[Dict[str, Any]] = None,
bot_always_player_2: bool = False,
) -> None:
super().__init__(env)
assert num_old_policies % 2 == 0, f"num_old_policies must be even"
assert (
num_old_policies % swap_window_size == 0
), f"num_old_policies must be a multiple of swap_window_size"
self.config = config
self.num_old_policies = num_old_policies
self.save_steps = save_steps
self.swap_steps = swap_steps
self.swap_window_size = swap_window_size
self.selfplay_bots = selfplay_bots
self.bot_always_player_2 = bot_always_player_2
self.policies: Deque[Policy] = deque(maxlen=window)
self.policy_assignments: List[Optional[Policy]] = [None] * env.num_envs
self.steps_since_swap = np.zeros(env.num_envs)
self.selfplay_policies: Dict[str, Policy] = {}
self.num_envs = env.num_envs - num_old_policies
if self.selfplay_bots:
self.num_envs -= sum(self.selfplay_bots.values())
self.initialize_selfplay_bots()
def get_action_mask(self) -> Optional[np.ndarray]:
assert self.next_action_masks is not None
return self.next_action_masks[self.learner_indexes()]
def learner_indexes(self) -> List[int]:
return [p is None for p in self.policy_assignments]
def checkpoint_policy(self, copied_policy: Policy) -> None:
copied_policy.train(False)
self.policies.append(copied_policy)
if all(p is None for p in self.policy_assignments[: 2 * self.num_old_policies]):
for i in range(self.num_old_policies):
# Switch between player 1 and 2
self.policy_assignments[
2 * i + (i % 2 if not self.bot_always_player_2 else 1)
] = copied_policy
def swap_policy(self, idx: int, swap_window_size: int = 1) -> None:
policy = random.choice(self.policies)
idx = idx // 2 * 2
for j in range(swap_window_size * 2):
if self.policy_assignments[idx + j]:
self.policy_assignments[idx + j] = policy
self.steps_since_swap[idx : idx + swap_window_size * 2] = np.zeros(
swap_window_size * 2
)
def initialize_selfplay_bots(self) -> None:
if not self.selfplay_bots:
return
from rl_algo_impls.runner.running_utils import get_device, make_policy
env = self.env # Type: ignore
device = get_device(self.config, env)
start_idx = 2 * self.num_old_policies
for model_path, n in self.selfplay_bots.items():
policy = make_policy(
self.config,
env,
device,
load_path=model_path,
**self.config.policy_hyperparams,
).eval()
self.selfplay_policies["model_path"] = policy
for idx in range(start_idx, start_idx + 2 * n, 2):
bot_idx = (
(idx + 1) if self.bot_always_player_2 else (idx + idx // 2 % 2)
)
self.policy_assignments[bot_idx] = policy
start_idx += 2 * n
def step(self, actions: np.ndarray) -> VecEnvStepReturn:
env = self.env # type: ignore
all_actions = np.zeros((env.num_envs,) + actions.shape[1:], dtype=actions.dtype)
orig_learner_indexes = self.learner_indexes()
all_actions[orig_learner_indexes] = actions
for policy in set(p for p in self.policy_assignments if p):
policy_indexes = [policy == p for p in self.policy_assignments]
if any(policy_indexes):
all_actions[policy_indexes] = policy.act(
self.next_obs[policy_indexes],
deterministic=False,
action_masks=self.next_action_masks[policy_indexes]
if self.next_action_masks is not None
else None,
)
self.next_obs, rew, done, info = env.step(all_actions)
self.next_action_masks = env.get_action_mask()
rew = rew[orig_learner_indexes]
info = [i for i, b in zip(info, orig_learner_indexes) if b]
self.steps_since_swap += 1
for idx in range(0, self.num_old_policies * 2, 2 * self.swap_window_size):
if self.steps_since_swap[idx] > self.swap_steps:
self.swap_policy(idx, self.swap_window_size)
new_learner_indexes = self.learner_indexes()
return self.next_obs[new_learner_indexes], rew, done[new_learner_indexes], info
def reset(self) -> VecEnvObs:
self.next_obs = super().reset()
self.next_action_masks = self.env.get_action_mask()
return self.next_obs[self.learner_indexes()] | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/wrappers/self_play_wrapper.py | 0.761538 | 0.342737 | self_play_wrapper.py | pypi |
import random
from collections import deque
from typing import Any, Deque, Dict, List, Optional, Tuple
import numpy as np
from rl_algo_impls.runner.config import Config
from rl_algo_impls.shared.policy.policy import Policy
from rl_algo_impls.wrappers.action_mask_wrapper import find_action_masker
from rl_algo_impls.wrappers.self_play_wrapper import SelfPlayWrapper
from rl_algo_impls.wrappers.vectorable_wrapper import (
VecEnvObs,
VecEnvStepReturn,
VectorableWrapper,
)
class SelfPlayEvalWrapper(VectorableWrapper):
train_wrapper: Optional[SelfPlayWrapper]
next_obs: VecEnvObs
next_action_masks: Optional[np.ndarray]
def __init__(self, env: VectorableWrapper) -> None:
super().__init__(env)
assert env.num_envs % 2 == 0
self.num_envs = env.num_envs // 2
def step(self, actions: np.ndarray) -> VecEnvStepReturn:
assert self.train_wrapper, "Must have assigned train_wrapper"
env = self.env # type: ignore
all_actions = np.zeros((env.num_envs,) + actions.shape[1:], dtype=actions.dtype)
policy_assignments, learner_indexes = self._assignment_and_indexes()
all_actions[learner_indexes] = actions
for policy in set(p for p in policy_assignments if p):
policy_indexes = [policy == p for p in policy_assignments]
all_actions[policy_indexes] = policy.act(
self.next_obs[policy_indexes], # type: ignore
deterministic=False,
action_masks=self.next_action_masks[policy_indexes]
if self.next_action_masks is not None
else None,
)
self.next_obs, rew, done, info = env.step(all_actions)
self.next_action_masks = env.get_action_mask()
rew = rew[learner_indexes]
info = [_info for _info, is_learner in zip(info, learner_indexes) if is_learner]
return self.next_obs[learner_indexes], rew, done[learner_indexes], info
def reset(self) -> VecEnvObs:
self.next_obs = super().reset()
self.next_action_masks = self.env.get_action_mask() # type: ignore
_, indexes = self._assignment_and_indexes()
return self.next_obs[indexes] # type: ignore
def get_action_mask(self) -> Optional[np.ndarray]:
_, indexes = self._assignment_and_indexes()
return (
self.next_action_masks[indexes]
if self.next_action_masks is not None
else None
)
def _assignment_and_indexes(self) -> Tuple[List[Optional[Policy]], List[bool]]:
assignments: List[Optional[Policy]] = [None] * self.env.num_envs # type: ignore
policies = list(reversed(self.train_wrapper.policies))
for i in range(self.num_envs):
policy = policies[i % len(policies)]
assignments[2 * i + (i % 2)] = policy
return assignments, [p is None for p in assignments] | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/wrappers/self_play_eval_wrapper.py | 0.771585 | 0.330714 | self_play_eval_wrapper.py | pypi |
import logging
from dataclasses import asdict, dataclass
from time import perf_counter
from typing import List, NamedTuple, Optional, TypeVar
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
from torch.utils.tensorboard.writer import SummaryWriter
from rl_algo_impls.shared.algorithm import Algorithm
from rl_algo_impls.shared.callbacks import Callback
from rl_algo_impls.shared.gae import compute_advantages
from rl_algo_impls.shared.policy.actor_critic import ActorCritic
from rl_algo_impls.shared.schedule import (
constant_schedule,
linear_schedule,
schedule,
update_learning_rate,
)
from rl_algo_impls.shared.stats import log_scalars
from rl_algo_impls.wrappers.vectorable_wrapper import (
VecEnv,
single_action_space,
single_observation_space,
)
class TrainStepStats(NamedTuple):
loss: float
pi_loss: float
v_loss: float
entropy_loss: float
approx_kl: float
clipped_frac: float
val_clipped_frac: float
@dataclass
class TrainStats:
loss: float
pi_loss: float
v_loss: float
entropy_loss: float
approx_kl: float
clipped_frac: float
val_clipped_frac: float
explained_var: float
def __init__(self, step_stats: List[TrainStepStats], explained_var: float) -> None:
self.loss = np.mean([s.loss for s in step_stats]).item()
self.pi_loss = np.mean([s.pi_loss for s in step_stats]).item()
self.v_loss = np.mean([s.v_loss for s in step_stats]).item()
self.entropy_loss = np.mean([s.entropy_loss for s in step_stats]).item()
self.approx_kl = np.mean([s.approx_kl for s in step_stats]).item()
self.clipped_frac = np.mean([s.clipped_frac for s in step_stats]).item()
self.val_clipped_frac = np.mean([s.val_clipped_frac for s in step_stats]).item()
self.explained_var = explained_var
def write_to_tensorboard(self, tb_writer: SummaryWriter, global_step: int) -> None:
for name, value in asdict(self).items():
tb_writer.add_scalar(f"losses/{name}", value, global_step=global_step)
def __repr__(self) -> str:
return " | ".join(
[
f"Loss: {round(self.loss, 2)}",
f"Pi L: {round(self.pi_loss, 2)}",
f"V L: {round(self.v_loss, 2)}",
f"E L: {round(self.entropy_loss, 2)}",
f"Apx KL Div: {round(self.approx_kl, 2)}",
f"Clip Frac: {round(self.clipped_frac, 2)}",
f"Val Clip Frac: {round(self.val_clipped_frac, 2)}",
]
)
PPOSelf = TypeVar("PPOSelf", bound="PPO")
class PPO(Algorithm):
def __init__(
self,
policy: ActorCritic,
env: VecEnv,
device: torch.device,
tb_writer: SummaryWriter,
learning_rate: float = 3e-4,
learning_rate_decay: str = "none",
n_steps: int = 2048,
batch_size: int = 64,
n_epochs: int = 10,
gamma: float = 0.99,
gae_lambda: float = 0.95,
clip_range: float = 0.2,
clip_range_decay: str = "none",
clip_range_vf: Optional[float] = None,
clip_range_vf_decay: str = "none",
normalize_advantage: bool = True,
ent_coef: float = 0.0,
ent_coef_decay: str = "none",
vf_coef: float = 0.5,
ppo2_vf_coef_halving: bool = False,
max_grad_norm: float = 0.5,
sde_sample_freq: int = -1,
update_advantage_between_epochs: bool = True,
update_returns_between_epochs: bool = False,
gamma_end: Optional[float] = None,
) -> None:
super().__init__(policy, env, device, tb_writer)
self.policy = policy
self.get_action_mask = getattr(env, "get_action_mask", None)
self.gamma_schedule = (
linear_schedule(gamma, gamma_end)
if gamma_end is not None
else constant_schedule(gamma)
)
self.gae_lambda = gae_lambda
self.optimizer = Adam(self.policy.parameters(), lr=learning_rate, eps=1e-7)
self.lr_schedule = schedule(learning_rate_decay, learning_rate)
self.max_grad_norm = max_grad_norm
self.clip_range_schedule = schedule(clip_range_decay, clip_range)
self.clip_range_vf_schedule = None
if clip_range_vf:
self.clip_range_vf_schedule = schedule(clip_range_vf_decay, clip_range_vf)
if normalize_advantage:
assert (
env.num_envs * n_steps > 1 and batch_size > 1
), f"Each minibatch must be larger than 1 to support normalization"
self.normalize_advantage = normalize_advantage
self.ent_coef_schedule = schedule(ent_coef_decay, ent_coef)
self.vf_coef = vf_coef
self.ppo2_vf_coef_halving = ppo2_vf_coef_halving
self.n_steps = n_steps
self.batch_size = batch_size
self.n_epochs = n_epochs
self.sde_sample_freq = sde_sample_freq
self.update_advantage_between_epochs = update_advantage_between_epochs
self.update_returns_between_epochs = update_returns_between_epochs
def learn(
self: PPOSelf,
train_timesteps: int,
callbacks: Optional[List[Callback]] = None,
total_timesteps: Optional[int] = None,
start_timesteps: int = 0,
) -> PPOSelf:
if total_timesteps is None:
total_timesteps = train_timesteps
assert start_timesteps + train_timesteps <= total_timesteps
epoch_dim = (self.n_steps, self.env.num_envs)
step_dim = (self.env.num_envs,)
obs_space = single_observation_space(self.env)
act_space = single_action_space(self.env)
act_shape = self.policy.action_shape
next_obs = self.env.reset()
next_action_masks = self.get_action_mask() if self.get_action_mask else None
next_episode_starts = np.full(step_dim, True, dtype=np.bool_)
obs = np.zeros(epoch_dim + obs_space.shape, dtype=obs_space.dtype) # type: ignore
actions = np.zeros(epoch_dim + act_shape, dtype=act_space.dtype) # type: ignore
rewards = np.zeros(epoch_dim, dtype=np.float32)
episode_starts = np.zeros(epoch_dim, dtype=np.bool_)
values = np.zeros(epoch_dim, dtype=np.float32)
logprobs = np.zeros(epoch_dim, dtype=np.float32)
action_masks = (
np.zeros(
(self.n_steps,) + next_action_masks.shape, dtype=next_action_masks.dtype
)
if next_action_masks is not None
else None
)
timesteps_elapsed = start_timesteps
while timesteps_elapsed < start_timesteps + train_timesteps:
start_time = perf_counter()
progress = timesteps_elapsed / total_timesteps
ent_coef = self.ent_coef_schedule(progress)
learning_rate = self.lr_schedule(progress)
update_learning_rate(self.optimizer, learning_rate)
pi_clip = self.clip_range_schedule(progress)
gamma = self.gamma_schedule(progress)
chart_scalars = {
"learning_rate": self.optimizer.param_groups[0]["lr"],
"ent_coef": ent_coef,
"pi_clip": pi_clip,
"gamma": gamma,
"gae_lambda": self.gae_lambda,
}
if self.clip_range_vf_schedule:
v_clip = self.clip_range_vf_schedule(progress)
chart_scalars["v_clip"] = v_clip
else:
v_clip = None
if hasattr(self.env, "reward_weights"):
chart_scalars["first_reward_weight"] = getattr(
self.env, "reward_weights"
)[0]
log_scalars(self.tb_writer, "charts", chart_scalars, timesteps_elapsed)
self.policy.eval()
self.policy.reset_noise()
for s in range(self.n_steps):
timesteps_elapsed += self.env.num_envs
if self.sde_sample_freq > 0 and s > 0 and s % self.sde_sample_freq == 0:
self.policy.reset_noise()
obs[s] = next_obs
episode_starts[s] = next_episode_starts
if action_masks is not None:
action_masks[s] = next_action_masks
(
actions[s],
values[s],
logprobs[s],
clamped_action,
) = self.policy.step(next_obs, action_masks=next_action_masks)
next_obs, rewards[s], next_episode_starts, _ = self.env.step(
clamped_action
)
next_action_masks = (
self.get_action_mask() if self.get_action_mask else None
)
self.policy.train()
b_obs = torch.tensor(obs.reshape((-1,) + obs_space.shape)).to(self.device) # type: ignore
b_actions = torch.tensor(actions.reshape((-1,) + act_shape)).to( # type: ignore
self.device
)
b_logprobs = torch.tensor(logprobs.reshape(-1)).to(self.device)
b_action_masks = (
torch.tensor(action_masks.reshape((-1,) + next_action_masks.shape[1:])).to( # type: ignore
self.device
)
if action_masks is not None
else None
)
y_pred = values.reshape(-1)
b_values = torch.tensor(y_pred).to(self.device)
step_stats = []
# Define variables that will definitely be set through the first epoch
advantages: np.ndarray = None # type: ignore
b_advantages: torch.Tensor = None # type: ignore
y_true: np.ndarray = None # type: ignore
b_returns: torch.Tensor = None # type: ignore
for e in range(self.n_epochs):
if e == 0 or self.update_advantage_between_epochs:
advantages = compute_advantages(
rewards,
values,
episode_starts,
next_episode_starts,
next_obs,
self.policy,
gamma,
self.gae_lambda,
)
b_advantages = torch.tensor(advantages.reshape(-1)).to(self.device)
if e == 0 or self.update_returns_between_epochs:
returns = advantages + values
y_true = returns.reshape(-1)
b_returns = torch.tensor(y_true).to(self.device)
b_idxs = torch.randperm(len(b_obs))
# Only record last epoch's stats
step_stats.clear()
for i in range(0, len(b_obs), self.batch_size):
self.policy.reset_noise(self.batch_size)
mb_idxs = b_idxs[i : i + self.batch_size]
mb_obs = b_obs[mb_idxs]
mb_actions = b_actions[mb_idxs]
mb_values = b_values[mb_idxs]
mb_logprobs = b_logprobs[mb_idxs]
mb_action_masks = (
b_action_masks[mb_idxs] if b_action_masks is not None else None
)
mb_adv = b_advantages[mb_idxs]
if self.normalize_advantage:
mb_adv = (mb_adv - mb_adv.mean()) / (mb_adv.std() + 1e-8)
mb_returns = b_returns[mb_idxs]
new_logprobs, entropy, new_values = self.policy(
mb_obs, mb_actions, action_masks=mb_action_masks
)
logratio = new_logprobs - mb_logprobs
ratio = torch.exp(logratio)
clipped_ratio = torch.clamp(ratio, min=1 - pi_clip, max=1 + pi_clip)
pi_loss = torch.max(-ratio * mb_adv, -clipped_ratio * mb_adv).mean()
v_loss_unclipped = (new_values - mb_returns) ** 2
if v_clip:
v_loss_clipped = (
mb_values
+ torch.clamp(new_values - mb_values, -v_clip, v_clip)
- mb_returns
) ** 2
v_loss = torch.max(v_loss_unclipped, v_loss_clipped).mean()
else:
v_loss = v_loss_unclipped.mean()
if self.ppo2_vf_coef_halving:
v_loss *= 0.5
entropy_loss = -entropy.mean()
loss = pi_loss + ent_coef * entropy_loss + self.vf_coef * v_loss
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(
self.policy.parameters(), self.max_grad_norm
)
self.optimizer.step()
with torch.no_grad():
approx_kl = ((ratio - 1) - logratio).mean().cpu().numpy().item()
clipped_frac = (
((ratio - 1).abs() > pi_clip)
.float()
.mean()
.cpu()
.numpy()
.item()
)
val_clipped_frac = (
((new_values - mb_values).abs() > v_clip)
.float()
.mean()
.cpu()
.numpy()
.item()
if v_clip
else 0
)
step_stats.append(
TrainStepStats(
loss.item(),
pi_loss.item(),
v_loss.item(),
entropy_loss.item(),
approx_kl,
clipped_frac,
val_clipped_frac,
)
)
var_y = np.var(y_true).item()
explained_var = (
np.nan if var_y == 0 else 1 - np.var(y_true - y_pred).item() / var_y
)
TrainStats(step_stats, explained_var).write_to_tensorboard(
self.tb_writer, timesteps_elapsed
)
end_time = perf_counter()
rollout_steps = self.n_steps * self.env.num_envs
self.tb_writer.add_scalar(
"train/steps_per_second",
rollout_steps / (end_time - start_time),
timesteps_elapsed,
)
if callbacks:
if not all(
c.on_step(timesteps_elapsed=rollout_steps) for c in callbacks
):
logging.info(
f"Callback terminated training at {timesteps_elapsed} timesteps"
)
break
return self | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/ppo/ppo.py | 0.922748 | 0.408336 | ppo.py | pypi |
import logging
import os
import os.path
from pathlib import Path
from typing import Any, Dict
import numpy as np
from gym.spaces import MultiDiscrete
from luxai_s2.state import ObservationStateDict
from rl_algo_impls.lux.kit.config import EnvConfig
from rl_algo_impls.lux.kit.kit import obs_to_game_state
from rl_algo_impls.runner.config import Config, EnvHyperparams, RunArgs
from rl_algo_impls.runner.running_utils import get_device, load_hyperparams, make_policy
from rl_algo_impls.shared.lux.actions import (
ACTION_SIZES,
enqueued_action_from_obs,
to_lux_actions,
)
from rl_algo_impls.shared.lux.early import bid_action, place_factory_action
from rl_algo_impls.shared.lux.observation import observation_and_action_mask
from rl_algo_impls.shared.lux.stats import ActionStats
from rl_algo_impls.shared.vec_env.make_env import make_eval_env
from rl_algo_impls.wrappers.hwc_to_chw_observation import HwcToChwObservation
from rl_algo_impls.wrappers.vectorable_wrapper import find_wrapper
MODEL_LOAD_PATH = "saved_models/ppo-LuxAI_S2-v0-A10-S1"
class Agent:
def __init__(self, player: str, env_cfg: EnvConfig) -> None:
root_dir = Path(__file__).parent.parent.parent.absolute()
self.player = player
self.agents = ["player_0", "player_1"]
self.player_idx = self.agents.index(player)
self.faction = ["AlphaStrike", "MotherMars"][self.player_idx]
self.env_cfg = env_cfg
run_args = RunArgs(algo="ppo", env="LuxAI_S2-v0-eval", seed=1)
hyperparams = load_hyperparams(run_args.algo, run_args.env)
config = Config(
run_args,
hyperparams,
str(root_dir),
)
env = make_eval_env(
config,
EnvHyperparams(**config.env_hyperparams),
override_hparams={"n_envs": 1},
)
device = get_device(config, env)
self.policy = make_policy(
config,
env,
device,
load_path=os.path.join(root_dir, MODEL_LOAD_PATH),
**config.policy_hyperparams,
).eval()
transpose_wrapper = find_wrapper(env, HwcToChwObservation)
assert transpose_wrapper
self.transpose_wrapper = transpose_wrapper
self.map_size = env_cfg.map_size
self.num_map_tiles = self.map_size * self.map_size
self.action_plane_space = MultiDiscrete(ACTION_SIZES)
self.action_mask_shape = (
self.num_map_tiles,
self.action_plane_space.nvec.sum(),
)
def act(
self, step: int, lux_obs: ObservationStateDict, remainingOverageTime: int = 60
) -> Dict[str, Any]:
state = obs_to_game_state(step, self.env_cfg, lux_obs)
enqueued_actions = {
u_id: enqueued_action_from_obs(u["action_queue"])
for p in self.agents
for u_id, u in lux_obs["units"][p].items()
}
obs, action_mask = observation_and_action_mask(
self.player, lux_obs, state, self.action_mask_shape, enqueued_actions
)
obs = np.expand_dims(obs, axis=0)
obs = self.transpose_wrapper.observation(obs)
action_mask = np.expand_dims(action_mask, axis=0)
actions = self.policy.act(obs, deterministic=False, action_masks=action_mask)
action_stats = ActionStats()
lux_action = to_lux_actions(
self.player,
state,
actions[0],
action_mask[0],
enqueued_actions,
action_stats,
)
return lux_action
def bid_policy(
self, step: int, lux_obs: ObservationStateDict, remainingOverageTime: int = 60
) -> Dict[str, Any]:
return bid_action(5, self.faction)
def factory_placement_policy(
self, step: int, lux_obs: ObservationStateDict, remainingOverageTime: int = 60
) -> Dict[str, Any]:
state = obs_to_game_state(step, self.env_cfg, lux_obs)
return place_factory_action(state, self.agents, self.player_idx)
def place_initial_robot_action(
self, step: int, lux_obs: ObservationStateDict, remainingOverageTime: int = 60
) -> Dict[str, Any]:
state = obs_to_game_state(step, self.env_cfg, lux_obs)
return {f: 1 for f in state.factories[self.player]} | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/lux/agent.py | 0.773858 | 0.233171 | agent.py | pypi |
from dataclasses import dataclass, field
from typing import Dict
import numpy as np
from rl_algo_impls.lux.kit.cargo import UnitCargo
from rl_algo_impls.lux.kit.config import EnvConfig
from rl_algo_impls.lux.kit.factory import Factory
from rl_algo_impls.lux.kit.team import FactionTypes, Team
from rl_algo_impls.lux.kit.unit import Unit
def process_action(action):
return to_json(action)
def to_json(obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, list) or isinstance(obj, tuple):
return [to_json(s) for s in obj]
elif isinstance(obj, dict):
out = {}
for k in obj:
out[k] = to_json(obj[k])
return out
else:
return obj
def from_json(state):
if isinstance(state, list):
return np.array(state)
elif isinstance(state, dict):
out = {}
for k in state:
out[k] = from_json(state[k])
return out
else:
return state
def process_obs(player, game_state, step, obs):
if step == 0:
# at step 0 we get the entire map information
game_state = from_json(obs)
else:
# use delta changes to board to update game state
obs = from_json(obs)
for k in obs:
if k != "board":
game_state[k] = obs[k]
else:
if "valid_spawns_mask" in obs[k]:
game_state["board"]["valid_spawns_mask"] = obs[k][
"valid_spawns_mask"
]
for item in ["rubble", "lichen", "lichen_strains"]:
for k, v in obs["board"][item].items():
k = k.split(",")
x, y = int(k[0]), int(k[1])
game_state["board"][item][x, y] = v
return game_state
def obs_to_game_state(step, env_cfg: EnvConfig, obs):
units = dict()
for agent in obs["units"]:
units[agent] = dict()
for unit_id in obs["units"][agent]:
unit_data = obs["units"][agent][unit_id]
cargo = UnitCargo(**unit_data["cargo"])
unit = Unit(
**unit_data,
unit_cfg=env_cfg.ROBOTS[unit_data["unit_type"]],
env_cfg=env_cfg,
)
unit.cargo = cargo
units[agent][unit_id] = unit
factory_occupancy_map = np.ones_like(obs["board"]["rubble"], dtype=int) * -1
factories = dict()
for agent in obs["factories"]:
factories[agent] = dict()
for unit_id in obs["factories"][agent]:
f_data = obs["factories"][agent][unit_id]
cargo = UnitCargo(**f_data["cargo"])
factory = Factory(**f_data, env_cfg=env_cfg)
factory.cargo = cargo
factories[agent][unit_id] = factory
factory_occupancy_map[factory.pos_slice] = factory.strain_id
teams = dict()
for agent in obs["teams"]:
team_data = obs["teams"][agent]
faction = FactionTypes[team_data["faction"]]
teams[agent] = Team(**team_data, agent=agent)
return GameState(
env_cfg=env_cfg,
env_steps=step,
board=Board(
rubble=obs["board"]["rubble"],
ice=obs["board"]["ice"],
ore=obs["board"]["ore"],
lichen=obs["board"]["lichen"],
lichen_strains=obs["board"]["lichen_strains"],
factory_occupancy_map=factory_occupancy_map,
factories_per_team=obs["board"]["factories_per_team"],
valid_spawns_mask=obs["board"]["valid_spawns_mask"],
),
units=units,
factories=factories,
teams=teams,
)
@dataclass
class Board:
rubble: np.ndarray
ice: np.ndarray
ore: np.ndarray
lichen: np.ndarray
lichen_strains: np.ndarray
factory_occupancy_map: np.ndarray
factories_per_team: int
valid_spawns_mask: np.ndarray
@dataclass
class GameState:
"""
A GameState object at step env_steps. Copied from luxai_s2/state/state.py
"""
env_steps: int
env_cfg: EnvConfig
board: Board
units: Dict[str, Dict[str, Unit]] = field(default_factory=dict)
factories: Dict[str, Dict[str, Factory]] = field(default_factory=dict)
teams: Dict[str, Team] = field(default_factory=dict)
@property
def real_env_steps(self):
"""
the actual env step in the environment, which subtracts the time spent bidding and placing factories
"""
if self.env_cfg.BIDDING_SYSTEM:
# + 1 for extra factory placement and + 1 for bidding step
return self.env_steps - (self.board.factories_per_team * 2 + 1)
else:
return self.env_steps
# various utility functions
def is_day(self):
return self.real_env_steps % self.env_cfg.CYCLE_LENGTH < self.env_cfg.DAY_LENGTH | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/lux/kit/kit.py | 0.767167 | 0.249402 | kit.py | pypi |
import math
from dataclasses import dataclass
from typing import List
import numpy as np
from rl_algo_impls.lux.kit.cargo import UnitCargo
from rl_algo_impls.lux.kit.config import EnvConfig, UnitConfig
# a[1] = direction (0 = center, 1 = up, 2 = right, 3 = down, 4 = left)
move_deltas = np.array([[0, 0], [0, -1], [1, 0], [0, 1], [-1, 0]])
@dataclass
class Unit:
team_id: int
unit_id: str
unit_type: str # "LIGHT" or "HEAVY"
pos: np.ndarray
power: int
cargo: UnitCargo
env_cfg: EnvConfig
unit_cfg: UnitConfig
action_queue: List
@property
def agent_id(self):
if self.team_id == 0:
return "player_0"
return "player_1"
@property
def cargo_space(self):
return self.unit_cfg.CARGO_SPACE
@property
def battery_capacity(self):
return self.unit_cfg.BATTERY_CAPACITY
def action_queue_cost(self, game_state):
cost = self.env_cfg.ROBOTS[self.unit_type].ACTION_QUEUE_POWER_COST
return cost
def move_cost(self, game_state, direction):
board = game_state.board
target_pos = self.pos + move_deltas[direction]
if (
target_pos[0] < 0
or target_pos[1] < 0
or target_pos[1] >= len(board.rubble)
or target_pos[0] >= len(board.rubble[0])
):
# print("Warning, tried to get move cost for going off the map", file=sys.stderr)
return None
factory_there = board.factory_occupancy_map[target_pos[0], target_pos[1]]
if (
factory_there not in game_state.teams[self.agent_id].factory_strains
and factory_there != -1
):
# print("Warning, tried to get move cost for going onto a opposition factory", file=sys.stderr)
return None
rubble_at_target = board.rubble[target_pos[0]][target_pos[1]]
return math.floor(
self.unit_cfg.MOVE_COST
+ self.unit_cfg.RUBBLE_MOVEMENT_COST * rubble_at_target
)
def move(self, direction, repeat=0, n=1):
if isinstance(direction, int):
direction = direction
else:
pass
return np.array([0, direction, 0, 0, repeat, n])
def transfer(
self, transfer_direction, transfer_resource, transfer_amount, repeat=0, n=1
):
assert transfer_resource < 5 and transfer_resource >= 0
assert transfer_direction < 5 and transfer_direction >= 0
return np.array(
[1, transfer_direction, transfer_resource, transfer_amount, repeat, n]
)
def pickup(self, pickup_resource, pickup_amount, repeat=0, n=1):
assert pickup_resource < 5 and pickup_resource >= 0
return np.array([2, 0, pickup_resource, pickup_amount, repeat, n])
def dig_cost(self, game_state):
return self.unit_cfg.DIG_COST
def dig(self, repeat=0, n=1):
return np.array([3, 0, 0, 0, repeat, n])
def self_destruct_cost(self, game_state):
return self.unit_cfg.SELF_DESTRUCT_COST
def self_destruct(self, repeat=0, n=1):
return np.array([4, 0, 0, 0, repeat, n])
def recharge(self, x, repeat=0, n=1):
return np.array([5, 0, 0, x, repeat, n])
def __str__(self) -> str:
out = f"[{self.team_id}] {self.unit_id} {self.unit_type} at {self.pos}"
return out | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/lux/kit/unit.py | 0.621885 | 0.462959 | unit.py | pypi |
import math
from dataclasses import dataclass
from sys import stderr
import numpy as np
from rl_algo_impls.lux.kit.cargo import UnitCargo
from rl_algo_impls.lux.kit.config import EnvConfig
@dataclass
class Factory:
team_id: int
unit_id: str
strain_id: int
power: int
cargo: UnitCargo
pos: np.ndarray
# lichen tiles connected to this factory
# lichen_tiles: np.ndarray
env_cfg: EnvConfig
def build_heavy_metal_cost(self, game_state):
unit_cfg = self.env_cfg.ROBOTS["HEAVY"]
return unit_cfg.METAL_COST
def build_heavy_power_cost(self, game_state):
unit_cfg = self.env_cfg.ROBOTS["HEAVY"]
return unit_cfg.POWER_COST
def can_build_heavy(self, game_state):
return self.power >= self.build_heavy_power_cost(
game_state
) and self.cargo.metal >= self.build_heavy_metal_cost(game_state)
def build_heavy(self):
return 1
def build_light_metal_cost(self, game_state):
unit_cfg = self.env_cfg.ROBOTS["LIGHT"]
return unit_cfg.METAL_COST
def build_light_power_cost(self, game_state):
unit_cfg = self.env_cfg.ROBOTS["LIGHT"]
return unit_cfg.POWER_COST
def can_build_light(self, game_state):
return self.power >= self.build_light_power_cost(
game_state
) and self.cargo.metal >= self.build_light_metal_cost(game_state)
def build_light(self):
return 0
def water_cost(self, game_state):
"""
Water required to perform water action
"""
owned_lichen_tiles = (game_state.board.lichen_strains == self.strain_id).sum()
return np.ceil(owned_lichen_tiles / self.env_cfg.LICHEN_WATERING_COST_FACTOR)
def can_water(self, game_state):
return self.cargo.water >= self.water_cost(game_state)
def water(self):
return 2
@property
def pos_slice(self):
return slice(self.pos[0] - 1, self.pos[0] + 2), slice(
self.pos[1] - 1, self.pos[1] + 2
) | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/lux/kit/factory.py | 0.524638 | 0.269981 | factory.py | pypi |
import logging
from time import perf_counter
from typing import List, Optional, TypeVar
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard.writer import SummaryWriter
from rl_algo_impls.shared.algorithm import Algorithm
from rl_algo_impls.shared.callbacks import Callback
from rl_algo_impls.shared.gae import compute_advantages
from rl_algo_impls.shared.policy.actor_critic import ActorCritic
from rl_algo_impls.shared.schedule import schedule, update_learning_rate
from rl_algo_impls.shared.stats import log_scalars
from rl_algo_impls.wrappers.vectorable_wrapper import (
VecEnv,
single_action_space,
single_observation_space,
)
A2CSelf = TypeVar("A2CSelf", bound="A2C")
class A2C(Algorithm):
def __init__(
self,
policy: ActorCritic,
env: VecEnv,
device: torch.device,
tb_writer: SummaryWriter,
learning_rate: float = 7e-4,
learning_rate_decay: str = "none",
n_steps: int = 5,
gamma: float = 0.99,
gae_lambda: float = 1.0,
ent_coef: float = 0.0,
ent_coef_decay: str = "none",
vf_coef: float = 0.5,
max_grad_norm: float = 0.5,
rms_prop_eps: float = 1e-5,
use_rms_prop: bool = True,
sde_sample_freq: int = -1,
normalize_advantage: bool = False,
) -> None:
super().__init__(policy, env, device, tb_writer)
self.policy = policy
self.lr_schedule = schedule(learning_rate_decay, learning_rate)
if use_rms_prop:
self.optimizer = torch.optim.RMSprop(
policy.parameters(), lr=learning_rate, eps=rms_prop_eps
)
else:
self.optimizer = torch.optim.Adam(policy.parameters(), lr=learning_rate)
self.n_steps = n_steps
self.gamma = gamma
self.gae_lambda = gae_lambda
self.vf_coef = vf_coef
self.ent_coef_schedule = schedule(ent_coef_decay, ent_coef)
self.max_grad_norm = max_grad_norm
self.sde_sample_freq = sde_sample_freq
self.normalize_advantage = normalize_advantage
def learn(
self: A2CSelf,
train_timesteps: int,
callbacks: Optional[List[Callback]] = None,
total_timesteps: Optional[int] = None,
start_timesteps: int = 0,
) -> A2CSelf:
if total_timesteps is None:
total_timesteps = train_timesteps
assert start_timesteps + train_timesteps <= total_timesteps
epoch_dim = (self.n_steps, self.env.num_envs)
step_dim = (self.env.num_envs,)
obs_space = single_observation_space(self.env)
act_space = single_action_space(self.env)
obs = np.zeros(epoch_dim + obs_space.shape, dtype=obs_space.dtype)
actions = np.zeros(epoch_dim + act_space.shape, dtype=act_space.dtype)
rewards = np.zeros(epoch_dim, dtype=np.float32)
episode_starts = np.zeros(epoch_dim, dtype=np.bool8)
values = np.zeros(epoch_dim, dtype=np.float32)
logprobs = np.zeros(epoch_dim, dtype=np.float32)
next_obs = self.env.reset()
next_episode_starts = np.full(step_dim, True, dtype=np.bool8)
timesteps_elapsed = start_timesteps
while timesteps_elapsed < start_timesteps + train_timesteps:
start_time = perf_counter()
progress = timesteps_elapsed / total_timesteps
ent_coef = self.ent_coef_schedule(progress)
learning_rate = self.lr_schedule(progress)
update_learning_rate(self.optimizer, learning_rate)
log_scalars(
self.tb_writer,
"charts",
{
"ent_coef": ent_coef,
"learning_rate": learning_rate,
},
timesteps_elapsed,
)
self.policy.eval()
self.policy.reset_noise()
for s in range(self.n_steps):
timesteps_elapsed += self.env.num_envs
if self.sde_sample_freq > 0 and s > 0 and s % self.sde_sample_freq == 0:
self.policy.reset_noise()
obs[s] = next_obs
episode_starts[s] = next_episode_starts
actions[s], values[s], logprobs[s], clamped_action = self.policy.step(
next_obs
)
next_obs, rewards[s], next_episode_starts, _ = self.env.step(
clamped_action
)
advantages = compute_advantages(
rewards,
values,
episode_starts,
next_episode_starts,
next_obs,
self.policy,
self.gamma,
self.gae_lambda,
)
returns = advantages + values
b_obs = torch.tensor(obs.reshape((-1,) + obs_space.shape)).to(self.device)
b_actions = torch.tensor(actions.reshape((-1,) + act_space.shape)).to(
self.device
)
b_advantages = torch.tensor(advantages.reshape(-1)).to(self.device)
b_returns = torch.tensor(returns.reshape(-1)).to(self.device)
if self.normalize_advantage:
b_advantages = (b_advantages - b_advantages.mean()) / (
b_advantages.std() + 1e-8
)
self.policy.train()
logp_a, entropy, v = self.policy(b_obs, b_actions)
pi_loss = -(b_advantages * logp_a).mean()
value_loss = F.mse_loss(b_returns, v)
entropy_loss = -entropy.mean()
loss = pi_loss + self.vf_coef * value_loss + ent_coef * entropy_loss
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.optimizer.step()
y_pred = values.reshape(-1)
y_true = returns.reshape(-1)
var_y = np.var(y_true).item()
explained_var = (
np.nan if var_y == 0 else 1 - np.var(y_true - y_pred).item() / var_y
)
end_time = perf_counter()
rollout_steps = self.n_steps * self.env.num_envs
self.tb_writer.add_scalar(
"train/steps_per_second",
(rollout_steps) / (end_time - start_time),
timesteps_elapsed,
)
log_scalars(
self.tb_writer,
"losses",
{
"loss": loss.item(),
"pi_loss": pi_loss.item(),
"v_loss": value_loss.item(),
"entropy_loss": entropy_loss.item(),
"explained_var": explained_var,
},
timesteps_elapsed,
)
if callbacks:
if not all(
c.on_step(timesteps_elapsed=rollout_steps) for c in callbacks
):
logging.info(
f"Callback terminated training at {timesteps_elapsed} timesteps"
)
break
return self | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/a2c/a2c.py | 0.906751 | 0.3089 | a2c.py | pypi |
from copy import deepcopy
import optuna
from rl_algo_impls.runner.config import Config, EnvHyperparams, Hyperparams
from rl_algo_impls.shared.policy.optimize_on_policy import sample_on_policy_hyperparams
from rl_algo_impls.shared.vec_env import make_eval_env
from rl_algo_impls.tuning.optimize_env import sample_env_hyperparams
def sample_params(
trial: optuna.Trial,
base_hyperparams: Hyperparams,
base_config: Config,
) -> Hyperparams:
hyperparams = deepcopy(base_hyperparams)
base_env_hyperparams = EnvHyperparams(**hyperparams.env_hyperparams)
env = make_eval_env(
base_config,
base_env_hyperparams,
override_hparams={"n_envs": 1},
)
# env_hyperparams
env_hyperparams = sample_env_hyperparams(trial, hyperparams.env_hyperparams, env)
# policy_hyperparams
policy_hyperparams = sample_on_policy_hyperparams(
trial, hyperparams.policy_hyperparams, env
)
# algo_hyperparams
algo_hyperparams = hyperparams.algo_hyperparams
learning_rate = trial.suggest_float("learning_rate", 1e-5, 2e-3, log=True)
learning_rate_decay = trial.suggest_categorical(
"learning_rate_decay", ["none", "linear"]
)
n_steps_exp = trial.suggest_int("n_steps_exp", 1, 10)
n_steps = 2**n_steps_exp
trial.set_user_attr("n_steps", n_steps)
gamma = 1.0 - trial.suggest_float("gamma_om", 1e-4, 1e-1, log=True)
trial.set_user_attr("gamma", gamma)
gae_lambda = 1 - trial.suggest_float("gae_lambda_om", 1e-4, 1e-1)
trial.set_user_attr("gae_lambda", gae_lambda)
ent_coef = trial.suggest_float("ent_coef", 1e-8, 2.5e-2, log=True)
ent_coef_decay = trial.suggest_categorical("ent_coef_decay", ["none", "linear"])
vf_coef = trial.suggest_float("vf_coef", 0.1, 0.7)
max_grad_norm = trial.suggest_float("max_grad_norm", 1e-1, 1e1, log=True)
use_rms_prop = trial.suggest_categorical("use_rms_prop", [True, False])
normalize_advantage = trial.suggest_categorical(
"normalize_advantage", [True, False]
)
algo_hyperparams.update(
{
"learning_rate": learning_rate,
"learning_rate_decay": learning_rate_decay,
"n_steps": n_steps,
"gamma": gamma,
"gae_lambda": gae_lambda,
"ent_coef": ent_coef,
"ent_coef_decay": ent_coef_decay,
"vf_coef": vf_coef,
"max_grad_norm": max_grad_norm,
"use_rms_prop": use_rms_prop,
"normalize_advantage": normalize_advantage,
}
)
if policy_hyperparams.get("use_sde", False):
sde_sample_freq = 2 ** trial.suggest_int("sde_sample_freq_exp", 0, n_steps_exp)
trial.set_user_attr("sde_sample_freq", sde_sample_freq)
algo_hyperparams["sde_sample_freq"] = sde_sample_freq
elif "sde_sample_freq" in algo_hyperparams:
del algo_hyperparams["sde_sample_freq"]
env.close()
return hyperparams | /rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/a2c/optimize.py | 0.592784 | 0.262669 | optimize.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.