code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import struct
import hmac
import hashlib
import sys
import ecdsa
from ecdsa.util import string_to_number, number_to_string
from ecdsa.curves import SECP256k1
from ecdsa.ellipticcurve import Point, INFINITY
from . import tools
from . import messages as proto
PRIME_DERIVATION_FLAG = 0x80000000
if sys.version_info < (3,):
def byteindex(data, index):
return ord(data[index])
else:
def byteindex(data, index):
return data[index]
def point_to_pubkey(point):
order = SECP256k1.order
x_str = number_to_string(point.x(), order)
y_str = number_to_string(point.y(), order)
vk = x_str + y_str
return struct.pack('B', (byteindex(vk, 63) & 1) + 2) + vk[0:32] # To compressed key
def sec_to_public_pair(pubkey):
"""Convert a public key in sec binary format to a public pair."""
x = string_to_number(pubkey[1:33])
sec0 = pubkey[:1]
if sec0 not in (b'\2', b'\3'):
raise ValueError("Compressed pubkey expected")
def public_pair_for_x(generator, x, is_even):
curve = generator.curve()
p = curve.p()
alpha = (pow(x, 3, p) + curve.a() * x + curve.b()) % p
beta = ecdsa.numbertheory.square_root_mod_prime(alpha, p)
if is_even == bool(beta & 1):
return (x, p - beta)
return (x, beta)
return public_pair_for_x(ecdsa.ecdsa.generator_secp256k1, x, is_even=(sec0 == b'\2'))
def is_prime(n):
return (bool)(n & PRIME_DERIVATION_FLAG)
def fingerprint(pubkey):
return string_to_number(tools.hash_160(pubkey)[:4])
def get_address(public_node, address_type):
return tools.public_key_to_bc_address(public_node.public_key, address_type)
def public_ckd(public_node, n):
if not isinstance(n, list):
raise ValueError('Parameter must be a list')
node = proto.HDNodeType()
node.CopyFrom(public_node)
for i in n:
node.CopyFrom(get_subnode(node, i))
return node
def get_subnode(node, i):
# Public Child key derivation (CKD) algorithm of BIP32
i_as_bytes = struct.pack(">L", i)
if is_prime(i):
raise ValueError("Prime derivation not supported")
# Public derivation
data = node.public_key + i_as_bytes
I64 = hmac.HMAC(key=node.chain_code, msg=data, digestmod=hashlib.sha512).digest()
I_left_as_exponent = string_to_number(I64[:32])
node_out = proto.HDNodeType()
node_out.depth = node.depth + 1
node_out.child_num = i
node_out.chain_code = I64[32:]
node_out.fingerprint = fingerprint(node.public_key)
# BIP32 magic converts old public key to new public point
x, y = sec_to_public_pair(node.public_key)
point = I_left_as_exponent * SECP256k1.generator + Point(SECP256k1.curve, x, y, SECP256k1.order)
if point == INFINITY:
raise ValueError("Point cannot be INFINITY")
# Convert public point to compressed public key
node_out.public_key = point_to_pubkey(point)
return node_out
def serialize(node, version=0x0488B21E):
s = b''
s += struct.pack('>I', version)
s += struct.pack('>B', node.depth)
s += struct.pack('>I', node.fingerprint)
s += struct.pack('>I', node.child_num)
s += node.chain_code
if node.private_key:
s += b'\x00' + node.private_key
else:
s += node.public_key
s += tools.Hash(s)[:4]
return tools.b58encode(s)
def deserialize(xpub):
data = tools.b58decode(xpub, None)
if tools.Hash(data[:-4])[:4] != data[-4:]:
raise ValueError("Checksum failed")
node = proto.HDNodeType()
node.depth = struct.unpack('>B', data[4:5])[0]
node.fingerprint = struct.unpack('>I', data[5:9])[0]
node.child_num = struct.unpack('>I', data[9:13])[0]
node.chain_code = data[13:45]
key = data[45:-4]
if byteindex(key, 0) == 0:
node.private_key = key[1:]
else:
node.public_key = key
return node | /safet-0.1.5.tar.gz/safet-0.1.5/trezorlib/ckd_public.py | 0.52975 | 0.374819 | ckd_public.py | pypi |
from __future__ import print_function
from . import messages as proto
def pin_info(pin):
print("Device asks for PIN %s" % pin)
def button_press(yes_no):
print("User pressed", '"y"' if yes_no else '"n"')
def pprint(msg):
return "<%s> (%d bytes):\n%s" % (msg.__class__.__name__, msg.ByteSize(), msg)
class DebugLink(object):
def __init__(self, transport, pin_func=pin_info, button_func=button_press):
self.transport = transport
self.transport.session_begin()
self.pin_func = pin_func
self.button_func = button_func
def close(self):
self.transport.session_end()
def _call(self, msg, nowait=False):
print("DEBUGLINK SEND", pprint(msg))
self.transport.write(msg)
if nowait:
return
ret = self.transport.read()
print("DEBUGLINK RECV", pprint(ret))
return ret
def read_pin(self):
obj = self._call(proto.DebugLinkGetState())
print("Read PIN:", obj.pin)
print("Read matrix:", obj.matrix)
return (obj.pin, obj.matrix)
def read_pin_encoded(self):
pin, _ = self.read_pin()
pin_encoded = self.encode_pin(pin)
self.pin_func(pin_encoded)
return pin_encoded
def encode_pin(self, pin):
_, matrix = self.read_pin()
# Now we have real PIN and PIN matrix.
# We have to encode that into encoded pin,
# because application must send back positions
# on keypad, not a real PIN.
pin_encoded = ''.join([str(matrix.index(p) + 1) for p in pin])
print("Encoded PIN:", pin_encoded)
return pin_encoded
def read_layout(self):
obj = self._call(proto.DebugLinkGetState())
return obj.layout
def read_mnemonic(self):
obj = self._call(proto.DebugLinkGetState())
return obj.mnemonic
def read_node(self):
obj = self._call(proto.DebugLinkGetState())
return obj.node
def read_recovery_word(self):
obj = self._call(proto.DebugLinkGetState())
return (obj.recovery_fake_word, obj.recovery_word_pos)
def read_reset_word(self):
obj = self._call(proto.DebugLinkGetState())
return obj.reset_word
def read_reset_entropy(self):
obj = self._call(proto.DebugLinkGetState())
return obj.reset_entropy
def read_passphrase_protection(self):
obj = self._call(proto.DebugLinkGetState())
return obj.passphrase_protection
def press_button(self, yes_no):
print("Pressing", yes_no)
self.button_func(yes_no)
self._call(proto.DebugLinkDecision(yes_no=yes_no), nowait=True)
def press_yes(self):
self.press_button(True)
def press_no(self):
self.press_button(False)
def stop(self):
self._call(proto.DebugLinkStop(), nowait=True)
def memory_read(self, address, length):
obj = self._call(proto.DebugLinkMemoryRead(address=address, length=length))
return obj.memory
def memory_write(self, address, memory, flash=False):
self._call(proto.DebugLinkMemoryWrite(address=address, memory=memory, flash=flash), nowait=True)
def flash_erase(self, sector):
self._call(proto.DebugLinkFlashErase(sector=sector), nowait=True) | /safet-0.1.5.tar.gz/safet-0.1.5/trezorlib/debuglink.py | 0.711531 | 0.150871 | debuglink.py | pypi |
import sys
import hashlib
b = 256
q = 2 ** 255 - 19
l = 2 ** 252 + 27742317777372353535851937790883648493
def H(m):
return hashlib.sha512(m).digest()
def expmod(b, e, m):
if e < 0:
raise ValueError('negative exponent')
if e == 0:
return 1
t = expmod(b, e >> 1, m) ** 2 % m
if e & 1:
t = (t * b) % m
return t
def inv(x):
return expmod(x, q - 2, q)
d = -121665 * inv(121666)
I = expmod(2, (q - 1) >> 2, q)
def xrecover(y):
xx = (y * y - 1) * inv(d * y * y + 1)
x = expmod(xx, (q + 3) >> 3, q)
if (x * x - xx) % q != 0:
x = (x * I) % q
if x % 2 != 0:
x = q - x
return x
By = 4 * inv(5)
Bx = xrecover(By)
B = [Bx % q, By % q]
def edwards(P, Q):
x1 = P[0]
y1 = P[1]
x2 = Q[0]
y2 = Q[1]
x3 = (x1 * y2 + x2 * y1) * inv(1 + d * x1 * x2 * y1 * y2)
y3 = (y1 * y2 + x1 * x2) * inv(1 - d * x1 * x2 * y1 * y2)
return [x3 % q, y3 % q]
def scalarmult(P, e):
if e == 0:
return [0, 1]
Q = scalarmult(P, e >> 1)
Q = edwards(Q, Q)
if e & 1:
Q = edwards(Q, P)
return Q
def encodeint(y):
bits = [(y >> i) & 1 for i in range(b)]
return bytes([sum([bits[i * 8 + j] << j for j in range(8)]) for i in range(b >> 3)])
def encodepoint(P):
x = P[0]
y = P[1]
bits = [(y >> i) & 1 for i in range(b - 1)] + [x & 1]
return bytes([sum([bits[i * 8 + j] << j for j in range(8)]) for i in range(b >> 3)])
def bit(h, i):
return (h[i >> 3] >> (i & 7)) & 1
def publickey(sk):
h = H(sk)
a = 2 ** (b - 2) + sum(2 ** i * bit(h, i) for i in range(3, b - 2))
A = scalarmult(B, a)
return encodepoint(A)
def Hint(m):
h = H(m)
return sum(2 ** i * bit(h, i) for i in range(2 * b))
def signature(m, sk, pk):
h = H(sk)
a = 2 ** (b - 2) + sum(2 ** i * bit(h, i) for i in range(3, b - 2))
r = Hint(bytes([h[i] for i in range(b >> 3, b >> 2)]) + m)
R = scalarmult(B, r)
S = (r + Hint(encodepoint(R) + pk + m) * a) % l
return encodepoint(R) + encodeint(S)
def isoncurve(P):
x = P[0]
y = P[1]
return (-x * x + y * y - 1 - d * x * x * y * y) % q == 0
def decodeint(s):
return sum(2 ** i * bit(s, i) for i in range(0, b))
def decodepoint(s):
y = sum(2 ** i * bit(s, i) for i in range(0, b - 1))
x = xrecover(y)
if x & 1 != bit(s, b - 1):
x = q - x
P = [x, y]
if not isoncurve(P):
raise ValueError('decoding point that is not on curve')
return P
def checkvalid(s, m, pk):
if len(s) != b >> 2:
raise ValueError('signature length is wrong')
if len(pk) != b >> 3:
raise ValueError('public-key length is wrong')
R = decodepoint(s[0:b >> 3])
A = decodepoint(pk)
S = decodeint(s[b >> 3:b >> 2])
h = Hint(encodepoint(R) + pk + m)
if scalarmult(B, S) != edwards(R, scalarmult(A, h)):
raise ValueError('signature does not pass verification') | /safet-0.1.5.tar.gz/safet-0.1.5/trezorlib/ed25519raw.py | 0.427994 | 0.605566 | ed25519raw.py | pypi |
"""World."""
from __future__ import annotations
import os
from collections import OrderedDict
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, ClassVar
import mujoco
import numpy as np
import xmltodict
import yaml
import safety_gymnasium
from safety_gymnasium.utils.common_utils import build_xml_from_dict, convert, rot2quat
from safety_gymnasium.utils.task_utils import get_body_xvelp
# Default location to look for xmls folder:
BASE_DIR = os.path.dirname(safety_gymnasium.__file__)
@dataclass
class Engine:
"""Physical engine."""
# pylint: disable=no-member
model: mujoco.MjModel = None
data: mujoco.MjData = None
def update(self, model, data):
"""Set engine."""
self.model = model
self.data = data
class World: # pylint: disable=too-many-instance-attributes
"""This class starts mujoco simulation.
And contains some apis for interacting with mujoco."""
# Default configuration (this should not be nested since it gets copied)
# *NOTE:* Changes to this configuration should also be reflected in `Builder` configuration
DEFAULT: ClassVar[dict[str, Any]] = {
'agent_base': 'assets/xmls/car.xml', # Which agent XML to use as the base
'agent_xy': np.zeros(2), # agent XY location
'agent_rot': 0, # agent rotation about Z axis
'floor_size': [3.5, 3.5, 0.1], # Used for displaying the floor
# FreeGeoms -- this is processed and added by the Builder class
'free_geoms': {}, # map from name -> object dict
# Geoms -- similar to objects, but they are immovable and fixed in the scene.
'geoms': {}, # map from name -> geom dict
# Mocaps -- mocap objects which are used to control other objects
'mocaps': {},
'floor_type': 'mat',
'task_name': None,
}
def __init__(self, agent, obstacles, config=None) -> None:
"""config - JSON string or dict of configuration. See self.parse()"""
if config:
self.parse(config) # Parse configuration
self.first_reset = True
self._agent = agent # pylint: disable=no-member
self._obstacles = obstacles
self.agent_base_path = None
self.agent_base_xml = None
self.xml = None
self.xml_string = None
self.engine = Engine()
self.bind_engine()
def parse(self, config):
"""Parse a config dict - see self.DEFAULT for description."""
self.config = deepcopy(self.DEFAULT)
self.config.update(deepcopy(config))
for key, value in self.config.items():
assert key in self.DEFAULT, f'Bad key {key}'
setattr(self, key, value)
def bind_engine(self):
"""Send the new engine instance to the agent and obstacles."""
self._agent.set_engine(self.engine)
for obstacle in self._obstacles:
obstacle.set_engine(self.engine)
def build(self): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
"""Build a world, including generating XML and moving objects."""
# Read in the base XML (contains agent, camera, floor, etc)
self.agent_base_path = os.path.join(BASE_DIR, self.agent_base) # pylint: disable=no-member
with open(self.agent_base_path, encoding='utf-8') as f: # pylint: disable=invalid-name
self.agent_base_xml = f.read()
self.xml = xmltodict.parse(self.agent_base_xml) # Nested OrderedDict objects
if self.task_name in ['FormulaOne']: # pylint: disable=no-member
self.xml['mujoco']['option']['@integrator'] = 'RK4'
self.xml['mujoco']['option']['@timestep'] = '0.004'
if 'compiler' not in self.xml['mujoco']:
compiler = xmltodict.parse(
f"""<compiler
angle="radian"
meshdir="{BASE_DIR}/assets/meshes"
texturedir="{BASE_DIR}/assets/textures"
/>""",
)
self.xml['mujoco']['compiler'] = compiler['compiler']
else:
self.xml['mujoco']['compiler'].update(
{
'@angle': 'radian',
'@meshdir': os.path.join(BASE_DIR, 'assets', 'meshes'),
'@texturedir': os.path.join(BASE_DIR, 'assets', 'textures'),
},
)
# Convenience accessor for xml dictionary
worldbody = self.xml['mujoco']['worldbody']
# Move agent position to starting position
worldbody['body']['@pos'] = convert(
# pylint: disable-next=no-member
np.r_[self.agent_xy, self._agent.z_height],
)
worldbody['body']['@quat'] = convert(rot2quat(self.agent_rot)) # pylint: disable=no-member
# We need this because xmltodict skips over single-item lists in the tree
worldbody['body'] = [worldbody['body']]
if 'geom' in worldbody:
worldbody['geom'] = [worldbody['geom']]
else:
worldbody['geom'] = []
# Add equality section if missing
if 'equality' not in self.xml['mujoco']:
self.xml['mujoco']['equality'] = OrderedDict()
equality = self.xml['mujoco']['equality']
if 'weld' not in equality:
equality['weld'] = []
# Add asset section if missing
if 'asset' not in self.xml['mujoco']:
self.xml['mujoco']['asset'] = {}
if 'texture' not in self.xml['mujoco']['asset']:
self.xml['mujoco']['asset']['texture'] = []
if 'material' not in self.xml['mujoco']['asset']:
self.xml['mujoco']['asset']['material'] = []
if 'mesh' not in self.xml['mujoco']['asset']:
self.xml['mujoco']['asset']['mesh'] = []
material = self.xml['mujoco']['asset']['material']
texture = self.xml['mujoco']['asset']['texture']
mesh = self.xml['mujoco']['asset']['mesh']
# load all assets config from .yaml file
with open(os.path.join(BASE_DIR, 'configs/assets.yaml'), encoding='utf-8') as file:
assets_config = yaml.load(file, Loader=yaml.FullLoader) # noqa: S506
texture.append(assets_config['textures']['skybox'])
if self.floor_type == 'mat': # pylint: disable=no-member
texture.append(assets_config['textures']['matplane'])
material.append(assets_config['materials']['matplane'])
elif self.floor_type == 'village': # pylint: disable=no-member
texture.append(assets_config['textures']['village_floor'])
material.append(assets_config['materials']['village_floor'])
elif self.floor_type == 'mud': # pylint: disable=no-member
texture.append(assets_config['textures']['mud_floor'])
material.append(assets_config['materials']['mud_floor'])
elif self.floor_type == 'none': # pylint: disable=no-member
self.floor_size = [1e-9, 1e-9, 0.1] # pylint: disable=attribute-defined-outside-init
else:
raise NotImplementedError
selected_textures = {}
selected_materials = {}
selected_meshes = {}
for config in (
# pylint: disable=no-member
list(self.geoms.values())
+ list(self.free_geoms.values())
+ list(self.mocaps.values())
# pylint: enable=no-member
):
if 'type' not in config:
for geom in config['geoms']:
if geom['type'] != 'mesh':
continue
mesh_name = geom['mesh']
if mesh_name in assets_config['textures']:
selected_textures[mesh_name] = assets_config['textures'][mesh_name]
selected_materials[mesh_name] = assets_config['materials'][mesh_name]
selected_meshes[mesh_name] = assets_config['meshes'][mesh_name]
elif config['type'] == 'mesh':
mesh_name = config['mesh']
if mesh_name in assets_config['textures']:
selected_textures[mesh_name] = assets_config['textures'][mesh_name]
selected_materials[mesh_name] = assets_config['materials'][mesh_name]
selected_meshes[mesh_name] = assets_config['meshes'][mesh_name]
texture += selected_textures.values()
material += selected_materials.values()
mesh += selected_meshes.values()
# Add light to the XML dictionary
light = xmltodict.parse(
"""<b>
<light cutoff="100" diffuse="1 1 1" dir="0 0 -1" directional="true"
exponent="1" pos="0 0 0.5" specular="0 0 0" castshadow="false"/>
</b>""",
)
worldbody['light'] = light['b']['light']
# Add floor to the XML dictionary if missing
if not any(g.get('@name') == 'floor' for g in worldbody['geom']):
floor = xmltodict.parse(
"""
<geom name="floor" type="plane" condim="6"/>
""",
)
worldbody['geom'].append(floor['geom'])
# Make sure floor renders the same for every world
for g in worldbody['geom']: # pylint: disable=invalid-name
if g['@name'] == 'floor':
g.update(
{
'@size': convert(self.floor_size), # pylint: disable=no-member
'@rgba': '1 1 1 1',
},
)
if self.floor_type == 'mat': # pylint: disable=no-member
g.update({'@material': 'matplane'})
elif self.floor_type == 'village': # pylint: disable=no-member
g.update({'@material': 'village_floor'})
elif self.floor_type == 'mud': # pylint: disable=no-member
g.update({'@material': 'mud_floor'})
elif self.floor_type == 'none': # pylint: disable=no-member
pass
else:
raise NotImplementedError
# Add cameras to the XML dictionary
cameras = xmltodict.parse(
"""<b>
<camera name="fixednear" pos="0 -2 2" zaxis="0 -1 1"/>
<camera name="fixedfar" pos="0 -5 5" zaxis="0 -1 1"/>
<camera name="fixedfar++" pos="0 -10 10" zaxis="0 -1 1"/>
</b>""",
)
worldbody['camera'] = cameras['b']['camera']
# Build and add a tracking camera (logic needed to ensure orientation correct)
theta = self.agent_rot + np.pi # pylint: disable=no-member
xyaxes = {
'x1': np.cos(theta),
'x2': -np.sin(theta),
'x3': 0,
'y1': np.sin(theta),
'y2': np.cos(theta),
'y3': 1,
}
pos = {
'xp': 0 * np.cos(theta) + (-2) * np.sin(theta),
'yp': 0 * (-np.sin(theta)) + (-2) * np.cos(theta),
'zp': 2,
}
track_camera = xmltodict.parse(
"""<b>
<camera name="track" mode="track" pos="{xp} {yp} {zp}"
xyaxes="{x1} {x2} {x3} {y1} {y2} {y3}"/>
</b>""".format(
**pos,
**xyaxes,
),
)
if 'camera' in worldbody['body'][0]:
if isinstance(worldbody['body'][0]['camera'], list):
worldbody['body'][0]['camera'] = worldbody['body'][0]['camera'] + [
track_camera['b']['camera'],
]
else:
worldbody['body'][0]['camera'] = [
worldbody['body'][0]['camera'],
track_camera['b']['camera'],
]
else:
worldbody['body'][0]['camera'] = [
track_camera['b']['camera'],
]
# Add free_geoms to the XML dictionary
for name, object in self.free_geoms.items(): # pylint: disable=redefined-builtin, no-member
assert object['name'] == name, f'Inconsistent {name} {object}'
object = object.copy() # don't modify original object
object['freejoint'] = object['name']
if name == 'push_box':
object['quat'] = rot2quat(object.pop('rot'))
dim = object['geoms'][0]['size'][0]
object['geoms'][0]['dim'] = dim
object['geoms'][0]['width'] = dim / 2
object['geoms'][0]['x'] = dim
object['geoms'][0]['y'] = dim
# pylint: disable-next=consider-using-f-string
collision_xml = """
<freejoint name="{name}"/>
<geom name="{name}" type="{type}" size="{size}" density="{density}"
rgba="{rgba}" group="{group}"/>
<geom name="col1" type="{type}" size="{width} {width} {dim}" density="{density}"
rgba="{rgba}" group="{group}" pos="{x} {y} 0"/>
<geom name="col2" type="{type}" size="{width} {width} {dim}" density="{density}"
rgba="{rgba}" group="{group}" pos="-{x} {y} 0"/>
<geom name="col3" type="{type}" size="{width} {width} {dim}" density="{density}"
rgba="{rgba}" group="{group}" pos="{x} -{y} 0"/>
<geom name="col4" type="{type}" size="{width} {width} {dim}" density="{density}"
rgba="{rgba}" group="{group}" pos="-{x} -{y} 0"/>
""".format(
**{k: convert(v) for k, v in object['geoms'][0].items()},
)
if len(object['geoms']) == 2:
# pylint: disable-next=consider-using-f-string
visual_xml = """
<geom name="{name}" type="mesh" mesh="{mesh}" material="{material}" pos="{pos}"
rgba="1 1 1 1" group="{group}" contype="{contype}" conaffinity="{conaffinity}" density="{density}"
euler="{euler}"/>
""".format(
**{k: convert(v) for k, v in object['geoms'][1].items()},
)
else:
visual_xml = """"""
body = xmltodict.parse(
# pylint: disable-next=consider-using-f-string
f"""
<body name="{object['name']}" pos="{convert(object['pos'])}" quat="{convert(object['quat'])}">
{collision_xml}
{visual_xml}
</body>
""",
)
else:
if object['geoms'][0]['type'] == 'mesh':
object['geoms'][0]['condim'] = 6
object['quat'] = rot2quat(object.pop('rot'))
body = build_xml_from_dict(object)
# Append new body to world, making it a list optionally
# Add the object to the world
worldbody['body'].append(body['body'])
# Add mocaps to the XML dictionary
for name, mocap in self.mocaps.items(): # pylint: disable=no-member
# Mocap names are suffixed with 'mocap'
assert mocap['name'] == name, f'Inconsistent {name}'
assert (
name.replace('mocap', 'obj') in self.free_geoms # pylint: disable=no-member
), f'missing object for {name}' # pylint: disable=no-member
# Add the object to the world
mocap = mocap.copy() # don't modify original object
mocap['quat'] = rot2quat(mocap.pop('rot'))
mocap['mocap'] = 'true'
mocap['geoms'][0]['contype'] = 0
mocap['geoms'][0]['conaffinity'] = 0
mocap['geoms'][0]['pos'] = mocap.pop('pos')
body = build_xml_from_dict(mocap)
worldbody['body'].append(body['body'])
# Add weld to equality list
mocap['body1'] = name
mocap['body2'] = name.replace('mocap', 'obj')
weld = xmltodict.parse(
# pylint: disable-next=consider-using-f-string
"""
<weld name="{name}" body1="{body1}" body2="{body2}" solref=".02 1.5"/>
""".format(
**{k: convert(v) for k, v in mocap.items()},
),
)
equality['weld'].append(weld['weld'])
# Add geoms to XML dictionary
for name, geom in self.geoms.items(): # pylint: disable=no-member
assert geom['name'] == name, f'Inconsistent {name} {geom}'
geom = geom.copy() # don't modify original object
for item in geom['geoms']:
if 'contype' not in item:
item['contype'] = item.get('contype', 1)
if 'conaffinity' not in item:
item['conaffinity'] = item.get('conaffinity', 1)
if 'rot' in geom:
geom['quat'] = rot2quat(geom.pop('rot'))
body = build_xml_from_dict(geom)
# Append new body to world, making it a list optionally
# Add the object to the world
worldbody['body'].append(body['body'])
# Instantiate simulator
# print(xmltodict.unparse(self.xml, pretty=True))
self.xml_string = xmltodict.unparse(self.xml)
model = mujoco.MjModel.from_xml_string(self.xml_string) # pylint: disable=no-member
data = mujoco.MjData(model) # pylint: disable=no-member
# Recompute simulation intrinsics from new position
mujoco.mj_forward(model, data) # pylint: disable=no-member
self.engine.update(model, data)
def rebuild(self, config=None, state=True):
"""Build a new sim from a model if the model changed."""
if state:
old_state = self.get_state()
if config:
self.parse(config)
self.build()
if state:
self.set_state(old_state)
mujoco.mj_forward(self.model, self.data) # pylint: disable=no-member
def reset(self, build=True):
"""Reset the world. (sim is accessed through self.sim)"""
if build:
self.build()
def body_com(self, name):
"""Get the center of mass of a named body in the simulator world reference frame."""
return self.data.body(name).subtree_com.copy()
def body_pos(self, name):
"""Get the position of a named body in the simulator world reference frame."""
return self.data.body(name).xpos.copy()
def body_mat(self, name):
"""Get the rotation matrix of a named body in the simulator world reference frame."""
return self.data.body(name).xmat.copy().reshape(3, -1)
def body_vel(self, name):
"""Get the velocity of a named body in the simulator world reference frame."""
return get_body_xvelp(self.model, self.data, name).copy()
def get_state(self):
"""Returns a copy of the simulator state."""
state = {
'time': np.copy(self.data.time),
'qpos': np.copy(self.data.qpos),
'qvel': np.copy(self.data.qvel),
}
if self.model.na == 0:
state['act'] = None
else:
state['act'] = np.copy(self.data.act)
return state
def set_state(self, value):
"""
Sets the state from an dict.
Args:
- value (dict): the desired state.
- call_forward: optionally call sim.forward(). Called by default if
the udd_callback is set.
"""
self.data.time = value['time']
self.data.qpos[:] = np.copy(value['qpos'])
self.data.qvel[:] = np.copy(value['qvel'])
if self.model.na != 0:
self.data.act[:] = np.copy(value['act'])
@property
def model(self):
"""Access model easily."""
return self.engine.model
@property
def data(self):
"""Access data easily."""
return self.engine.data | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/world.py | 0.825203 | 0.214218 | world.py | pypi |
"""Safety-Gymnasium Environments."""
import copy
from gymnasium import make as gymnasium_make
from gymnasium import register as gymnasium_register
from safety_gymnasium import vector, wrappers
from safety_gymnasium.tasks.safe_multi_agent.tasks.velocity.safe_mujoco_multi import make_ma
from safety_gymnasium.utils.registration import make, register
from safety_gymnasium.version import __version__
__all__ = [
'register',
'make',
'gymnasium_make',
'gymnasium_register',
]
VERSION = 'v0'
ROBOT_NAMES = ('Point', 'Car', 'Doggo', 'Racecar', 'Ant')
MAKE_VISION_ENVIRONMENTS = True
MAKE_DEBUG_ENVIRONMENTS = True
# ========================================
# Helper Methods for Easy Registration
# ========================================
PREFIX = 'Safety'
robots = ROBOT_NAMES
def __register_helper(env_id, entry_point, spec_kwargs=None, **kwargs):
"""Register a environment to both Safety-Gymnasium and Gymnasium registry."""
env_name, dash, version = env_id.partition('-')
if spec_kwargs is None:
spec_kwargs = {}
register(
id=env_id,
entry_point=entry_point,
kwargs=spec_kwargs,
**kwargs,
)
gymnasium_register(
id=f'{env_name}Gymnasium{dash}{version}',
entry_point='safety_gymnasium.wrappers.gymnasium_conversion:make_gymnasium_environment',
kwargs={'env_id': f'{env_name}Gymnasium{dash}{version}', **copy.deepcopy(spec_kwargs)},
**kwargs,
)
def __combine(tasks, agents, max_episode_steps):
"""Combine tasks and agents together to register environment tasks."""
for task_name, task_config in tasks.items():
# Vector inputs
for robot_name in agents:
env_id = f'{PREFIX}{robot_name}{task_name}-{VERSION}'
combined_config = copy.deepcopy(task_config)
combined_config.update({'agent_name': robot_name})
__register_helper(
env_id=env_id,
entry_point='safety_gymnasium.builder:Builder',
spec_kwargs={'config': combined_config, 'task_id': env_id},
max_episode_steps=max_episode_steps,
)
if MAKE_VISION_ENVIRONMENTS:
# Vision inputs
vision_env_name = f'{PREFIX}{robot_name}{task_name}Vision-{VERSION}'
vision_config = {
'observe_vision': True,
'observation_flatten': False,
}
vision_config.update(combined_config)
__register_helper(
env_id=vision_env_name,
entry_point='safety_gymnasium.builder:Builder',
spec_kwargs={'config': vision_config, 'task_id': env_id},
max_episode_steps=max_episode_steps,
)
if MAKE_DEBUG_ENVIRONMENTS and robot_name in ['Point', 'Car', 'Racecar']:
# Keyboard inputs for debugging
debug_env_name = f'{PREFIX}{robot_name}{task_name}Debug-{VERSION}'
debug_config = {'debug': True}
debug_config.update(combined_config)
__register_helper(
env_id=debug_env_name,
entry_point='safety_gymnasium.builder:Builder',
spec_kwargs={'config': debug_config, 'task_id': env_id},
max_episode_steps=max_episode_steps,
)
# ----------------------------------------
# Safety Navigation
# ----------------------------------------
# Button Environments
# ----------------------------------------
button_tasks = {'Button0': {}, 'Button1': {}, 'Button2': {}}
__combine(button_tasks, robots, max_episode_steps=1000)
# Push Environments
# ----------------------------------------
push_tasks = {'Push0': {}, 'Push1': {}, 'Push2': {}}
__combine(push_tasks, robots, max_episode_steps=1000)
# Goal Environments
# ----------------------------------------
goal_tasks = {'Goal0': {}, 'Goal1': {}, 'Goal2': {}}
__combine(goal_tasks, robots, max_episode_steps=1000)
# Circle Environments
# ----------------------------------------
circle_tasks = {'Circle0': {}, 'Circle1': {}, 'Circle2': {}}
__combine(circle_tasks, robots, max_episode_steps=500)
# Run Environments
# ----------------------------------------
run_tasks = {'Run0': {}}
__combine(run_tasks, robots, max_episode_steps=500)
# ----------------------------------------
# Safety Vision
# ----------------------------------------
# Building Button Environments
# ----------------------------------------
building_button_tasks = {
'BuildingButton0': {'floor_conf.type': 'mud'},
'BuildingButton1': {'floor_conf.type': 'mud'},
'BuildingButton2': {'floor_conf.type': 'mud'},
}
__combine(building_button_tasks, robots, max_episode_steps=1000)
# Building Push Environments
# ----------------------------------------
building_push_tasks = {
'BuildingPush0': {'floor_conf.type': 'mud'},
'BuildingPush1': {'floor_conf.type': 'mud'},
'BuildingPush2': {'floor_conf.type': 'mud'},
}
__combine(building_push_tasks, robots, max_episode_steps=1000)
# Building Goal Environments
# ----------------------------------------
building_goal_tasks = {
'BuildingGoal0': {'floor_conf.type': 'mud'},
'BuildingGoal1': {'floor_conf.type': 'mud'},
'BuildingGoal2': {'floor_conf.type': 'mud'},
}
__combine(building_goal_tasks, robots, max_episode_steps=1000)
# Race Environments
# ----------------------------------------
race_tasks = {
'Race0': {'floor_conf.type': 'village'},
'Race1': {'floor_conf.type': 'village'},
'Race2': {'floor_conf.type': 'village'},
}
__combine(race_tasks, robots, max_episode_steps=500)
# Racing Environments
# ----------------------------------------
race_tasks = {
'FormulaOne0': {'floor_conf.type': 'none'},
'FormulaOne1': {'floor_conf.type': 'none'},
'FormulaOne2': {'floor_conf.type': 'none'},
}
__combine(race_tasks, robots, max_episode_steps=50000000000000)
# Fading Environments
# ----------------------------------------
fading_tasks = {'FadingEasy0': {}, 'FadingEasy1': {}, 'FadingEasy2': {}}
__combine(fading_tasks, robots, max_episode_steps=1000)
fading_tasks = {'FadingHard0': {}, 'FadingHard1': {}, 'FadingHard2': {}}
__combine(fading_tasks, robots, max_episode_steps=1000)
# ----------------------------------------
# Safety Velocity
# ----------------------------------------
__register_helper(
env_id='SafetyHalfCheetahVelocity-v0',
entry_point='safety_gymnasium.tasks.safe_velocity.safety_half_cheetah_velocity_v0:SafetyHalfCheetahVelocityEnv',
max_episode_steps=1000,
reward_threshold=4800.0,
)
__register_helper(
env_id='SafetyHopperVelocity-v0',
entry_point='safety_gymnasium.tasks.safe_velocity.safety_hopper_velocity_v0:SafetyHopperVelocityEnv',
max_episode_steps=1000,
reward_threshold=3800.0,
)
__register_helper(
env_id='SafetySwimmerVelocity-v0',
entry_point='safety_gymnasium.tasks.safe_velocity.safety_swimmer_velocity_v0:SafetySwimmerVelocityEnv',
max_episode_steps=1000,
reward_threshold=360.0,
)
__register_helper(
env_id='SafetyWalker2dVelocity-v0',
max_episode_steps=1000,
entry_point='safety_gymnasium.tasks.safe_velocity.safety_walker2d_velocity_v0:SafetyWalker2dVelocityEnv',
)
__register_helper(
env_id='SafetyAntVelocity-v0',
entry_point='safety_gymnasium.tasks.safe_velocity.safety_ant_velocity_v0:SafetyAntVelocityEnv',
max_episode_steps=1000,
reward_threshold=6000.0,
)
__register_helper(
env_id='SafetyHumanoidVelocity-v0',
entry_point='safety_gymnasium.tasks.safe_velocity.safety_humanoid_velocity_v0:SafetyHumanoidVelocityEnv',
max_episode_steps=1000,
)
__register_helper(
env_id='SafetyHalfCheetahVelocity-v1',
entry_point='safety_gymnasium.tasks.safe_velocity.safety_half_cheetah_velocity_v1:SafetyHalfCheetahVelocityEnv',
max_episode_steps=1000,
reward_threshold=4800.0,
)
__register_helper(
env_id='SafetyHopperVelocity-v1',
entry_point='safety_gymnasium.tasks.safe_velocity.safety_hopper_velocity_v1:SafetyHopperVelocityEnv',
max_episode_steps=1000,
reward_threshold=3800.0,
)
__register_helper(
env_id='SafetySwimmerVelocity-v1',
entry_point='safety_gymnasium.tasks.safe_velocity.safety_swimmer_velocity_v1:SafetySwimmerVelocityEnv',
max_episode_steps=1000,
reward_threshold=360.0,
)
__register_helper(
env_id='SafetyWalker2dVelocity-v1',
max_episode_steps=1000,
entry_point='safety_gymnasium.tasks.safe_velocity.safety_walker2d_velocity_v1:SafetyWalker2dVelocityEnv',
)
__register_helper(
env_id='SafetyAntVelocity-v1',
entry_point='safety_gymnasium.tasks.safe_velocity.safety_ant_velocity_v1:SafetyAntVelocityEnv',
max_episode_steps=1000,
reward_threshold=6000.0,
)
__register_helper(
env_id='SafetyHumanoidVelocity-v1',
entry_point='safety_gymnasium.tasks.safe_velocity.safety_humanoid_velocity_v1:SafetyHumanoidVelocityEnv',
max_episode_steps=1000,
)
def __combine_multi(tasks, agents, max_episode_steps):
"""Combine tasks and agents together to register environment tasks."""
for task_name, task_config in tasks.items():
# Vector inputs
for robot_name in agents:
env_id = f'{PREFIX}{robot_name}{task_name}-{VERSION}'
combined_config = copy.deepcopy(task_config)
combined_config.update({'agent_name': robot_name})
__register_helper(
env_id=env_id,
entry_point='safety_gymnasium.tasks.safe_multi_agent.builder:Builder',
spec_kwargs={'config': combined_config, 'task_id': env_id},
max_episode_steps=max_episode_steps,
disable_env_checker=True,
)
if MAKE_VISION_ENVIRONMENTS:
# Vision inputs
vision_env_name = f'{PREFIX}{robot_name}{task_name}Vision-{VERSION}'
vision_config = {
'observe_vision': True,
'observation_flatten': False,
}
vision_config.update(combined_config)
__register_helper(
env_id=vision_env_name,
entry_point='safety_gymnasium.tasks.safe_multi_agent.builder:Builder',
spec_kwargs={'config': vision_config, 'task_id': env_id},
max_episode_steps=max_episode_steps,
disable_env_checker=True,
)
if MAKE_DEBUG_ENVIRONMENTS and robot_name in ['Point', 'Car', 'Racecar']:
# Keyboard inputs for debugging
debug_env_name = f'{PREFIX}{robot_name}{task_name}Debug-{VERSION}'
debug_config = {'debug': True}
debug_config.update(combined_config)
__register_helper(
env_id=debug_env_name,
entry_point='safety_gymnasium.tasks.safe_multi_agent.builder:Builder',
spec_kwargs={'config': debug_config, 'task_id': env_id},
max_episode_steps=max_episode_steps,
disable_env_checker=True,
)
# ----------------------------------------
# Safety Multi-Agent
# ----------------------------------------
# Multi Goal Environments
# ----------------------------------------
fading_tasks = {'MultiGoal0': {}, 'MultiGoal1': {}, 'MultiGoal2': {}}
__combine_multi(fading_tasks, robots, max_episode_steps=1000) | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/__init__.py | 0.741487 | 0.313459 | __init__.py | pypi |
"""Env builder."""
from __future__ import annotations
from dataclasses import asdict, dataclass
from typing import Any, ClassVar
import gymnasium
import numpy as np
from safety_gymnasium import tasks
from safety_gymnasium.bases.base_task import BaseTask
from safety_gymnasium.utils.common_utils import ResamplingError, quat2zalign
from safety_gymnasium.utils.task_utils import get_task_class_name
@dataclass
class RenderConf:
r"""Render options.
Attributes:
mode (str): render mode, can be 'human', 'rgb_array', 'depth_array'.
width (int): width of the rendered image.
height (int): height of the rendered image.
camera_id (int): camera id to render.
camera_name (str): camera name to render.
Note:
``camera_id`` and ``camera_name`` can only be set one of them.
"""
mode: str = None
width: int = 256
height: int = 256
camera_id: int = None
camera_name: str = None
# pylint: disable-next=too-many-instance-attributes
class Builder(gymnasium.Env, gymnasium.utils.EzPickle):
r"""An entry point to organize different environments, while showing unified API for users.
The Builder class constructs the basic control framework of environments, while
the details were hidden. There is another important parts, which is **task module**
including all task specific operation.
Methods:
- :meth:`_setup_simulation`: Set up mujoco the simulation instance.
- :meth:`_get_task`: Instantiate a task object.
- :meth:`set_seed`: Set the seed for the environment.
- :meth:`reset`: Reset the environment.
- :meth:`step`: Step the environment.
- :meth:`_reward`: Calculate the reward.
- :meth:`_cost`: Calculate the cost.
- :meth:`render`: Render the environment.
Attributes:
- :attr:`task_id` (str): Task id.
- :attr:`config` (dict): Pre-defined configuration of the environment, which is passed via
:meth:`safety_gymnasium.register()`.
- :attr:`render_parameters` (RenderConf): Render parameters.
- :attr:`action_space` (gymnasium.spaces.Box): Action space.
- :attr:`observation_space` (gymnasium.spaces.Dict): Observation space.
- :attr:`obs_space_dict` (dict): Observation space dictionary.
- :attr:`done` (bool): Whether the episode is done.
"""
metadata: ClassVar[dict[str, Any]] = {
'render_modes': [
'human',
'rgb_array',
'depth_array',
],
'render_fps': 30,
}
def __init__( # pylint: disable=too-many-arguments
self,
task_id: str,
config: dict | None = None,
render_mode: str | None = None,
width: int = 256,
height: int = 256,
camera_id: int | None = None,
camera_name: str | None = None,
) -> None:
"""Initialize the builder.
Note:
The ``camera_name`` parameter can be chosen from:
- **human**: The camera used for freely moving around and can get input
from keyboard real time.
- **vision**: The camera used for vision observation, which is fixed in front of the
agent's head.
- **track**: The camera used for tracking the agent.
- **fixednear**: The camera used for top-down observation.
- **fixedfar**: The camera used for top-down observation, but is further than **fixednear**.
Args:
task_id (str): Task id.
config (dict): Pre-defined configuration of the environment, which is passed via
:meth:`safety_gymnasium.register`.
render_mode (str): Render mode, can be 'human', 'rgb_array', 'depth_array'.
width (int): Width of the rendered image.
height (int): Height of the rendered image.
camera_id (int): Camera id to render.
camera_name (str): Camera name to render.
"""
gymnasium.utils.EzPickle.__init__(self, config=config)
self.task_id: str = task_id
self.config: dict = config
self._seed: int = None
self._setup_simulation()
self.first_reset: bool = None
self.steps: int = None
self.cost: float = None
self.terminated: bool = True
self.truncated: bool = False
self.render_parameters = RenderConf(render_mode, width, height, camera_id, camera_name)
def _setup_simulation(self) -> None:
"""Set up mujoco the simulation instance."""
self.task = self._get_task()
self.set_seed()
def _get_task(self) -> BaseTask:
"""Instantiate a task object."""
class_name = get_task_class_name(self.task_id)
assert hasattr(tasks, class_name), f'Task={class_name} not implemented.'
task_class = getattr(tasks, class_name)
task = task_class(config=self.config)
task.build_observation_space()
return task
def set_seed(self, seed: int | None = None) -> None:
"""Set internal random state seeds."""
self._seed = np.random.randint(2**32, dtype='int64') if seed is None else seed
self.task.random_generator.set_random_seed(self._seed)
def reset(
self,
*,
seed: int | None = None,
options: dict | None = None,
) -> tuple[np.ndarray, dict]: # pylint: disable=arguments-differ
"""Reset the environment and return observations."""
info = {}
if not self.task.mechanism_conf.randomize_layout:
assert seed is None, 'Cannot set seed if randomize_layout=False'
self.set_seed(0)
elif seed is not None:
self.set_seed(seed)
self.terminated = False
self.truncated = False
self.steps = 0 # Count of steps taken in this episode
self.task.reset()
self.task.specific_reset()
self.task.update_world() # refresh specific settings
self.task.agent.reset()
cost = self._cost()
assert cost['cost_sum'] == 0, f'World has starting cost! {cost}'
# Reset stateful parts of the environment
self.first_reset = False # Built our first world successfully
# Return an observation
return (self.task.obs(), info)
def step(self, action: np.ndarray) -> tuple[np.ndarray, float, float, bool, bool, dict]:
"""Take a step and return observation, reward, cost, terminated, truncated, info."""
assert not self.done, 'Environment must be reset before stepping.'
action = np.array(action, copy=False) # cast to ndarray
if action.shape != self.action_space.shape: # check action dimension
raise ValueError('Action dimension mismatch')
info = {}
exception = self.task.simulation_forward(action)
if exception:
self.truncated = True
reward = self.task.reward_conf.reward_exception
info['cost_exception'] = 1.0
else:
# Reward processing
reward = self._reward()
# Constraint violations
info.update(self._cost())
cost = info['cost_sum']
self.task.specific_step()
# Goal processing
if self.task.goal_achieved:
info['goal_met'] = True
if self.task.mechanism_conf.continue_goal:
# Update the internal layout
# so we can correctly resample (given objects have moved)
self.task.update_layout()
# Try to build a new goal, end if we fail
if self.task.mechanism_conf.terminate_resample_failure:
try:
self.task.update_world()
except ResamplingError:
# Normal end of episode
self.terminated = True
else:
# Try to make a goal, which could raise a ResamplingError exception
self.task.update_world()
else:
self.terminated = True
# termination of death processing
if not self.task.agent.is_alive():
self.terminated = True
# Timeout
self.steps += 1
if self.steps >= self.task.num_steps:
self.truncated = True # Maximum number of steps in an episode reached
if self.render_parameters.mode == 'human':
self.render()
return self.task.obs(), reward, cost, self.terminated, self.truncated, info
def _reward(self) -> float:
"""Calculate the current rewards.
Call exactly once per step.
"""
reward = self.task.calculate_reward()
# Intrinsic reward for uprightness
if self.task.reward_conf.reward_orientation:
zalign = quat2zalign(
self.task.data.get_body_xquat(self.task.reward_conf.reward_orientation_body),
)
reward += self.task.reward_conf.reward_orientation_scale * zalign
# Clip reward
reward_clip = self.task.reward_conf.reward_clip
if reward_clip:
in_range = -reward_clip < reward < reward_clip
if not in_range:
reward = np.clip(reward, -reward_clip, reward_clip)
print('Warning: reward was outside of range!')
return reward
def _cost(self) -> dict:
"""Calculate the current costs and return a dict.
Call exactly once per step.
"""
cost = self.task.calculate_cost()
# Optionally remove shaping from reward functions.
if self.task.cost_conf.constrain_indicator:
for k in list(cost.keys()):
cost[k] = float(cost[k] > 0.0) # Indicator function
self.cost = cost
return cost
def render(self) -> np.ndarray | None:
"""Call underlying :meth:`safety_gymnasium.bases.underlying.Underlying.render` directly.
Width and height in parameters are constant defaults for rendering
frames for humans. (not used for vision)
The set of supported modes varies per environment. (And some
third-party environments may not support rendering at all.)
By convention, if render_mode is:
- None (default): no render is computed.
- human: render return None.
The environment is continuously rendered in the current display or terminal. Usually for human consumption.
- rgb_array: return a single frame representing the current state of the environment.
A frame is a numpy.ndarray with shape (x, y, 3) representing RGB values for an x-by-y pixel image.
- rgb_array_list: return a list of frames representing the states of the environment since the last reset.
Each frame is a numpy.ndarray with shape (x, y, 3), as with `rgb_array`.
- depth_array: return a single frame representing the current state of the environment.
A frame is a numpy.ndarray with shape (x, y) representing depth values for an x-by-y pixel image.
- depth_array_list: return a list of frames representing the states of the environment since the last reset.
Each frame is a numpy.ndarray with shape (x, y), as with `depth_array`.
"""
assert self.render_parameters.mode, 'Please specify the render mode when you make env.'
assert (
not self.task.observe_vision
), 'When you use vision envs, you should not call this function explicitly.'
return self.task.render(cost=self.cost, **asdict(self.render_parameters))
@property
def action_space(self) -> gymnasium.spaces.Box:
"""Helper to get action space."""
return self.task.action_space
@property
def observation_space(self) -> gymnasium.spaces.Box | gymnasium.spaces.Dict:
"""Helper to get observation space."""
return self.task.observation_space
@property
def obs_space_dict(self) -> dict[str, gymnasium.spaces.Box]:
"""Helper to get observation space dictionary."""
return self.task.obs_info.obs_space_dict
@property
def done(self) -> bool:
"""Whether this episode is ended."""
return self.terminated or self.truncated
@property
def render_mode(self) -> str:
"""The render mode."""
return self.render_parameters.mode | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/builder.py | 0.956947 | 0.589598 | builder.py | pypi |
"""Base mujoco task."""
from __future__ import annotations
import abc
from copy import deepcopy
from dataclasses import dataclass
import gymnasium
import mujoco
import numpy as np
from gymnasium.envs.mujoco.mujoco_rendering import OffScreenViewer
import safety_gymnasium
from safety_gymnasium import agents
from safety_gymnasium.assets.color import COLOR
from safety_gymnasium.assets.free_geoms import FREE_GEOMS_REGISTER
from safety_gymnasium.assets.geoms import GEOMS_REGISTER
from safety_gymnasium.assets.mocaps import MOCAPS_REGISTER
from safety_gymnasium.bases.base_object import FreeGeom, Geom, Mocap
from safety_gymnasium.utils.common_utils import MujocoException
from safety_gymnasium.utils.keyboard_viewer import KeyboardViewer
from safety_gymnasium.utils.random_generator import RandomGenerator
from safety_gymnasium.world import World
@dataclass
class RenderConf:
r"""Render options.
Attributes:
libels (bool): Whether to render labels.
lidar_markers (bool): Whether to render lidar markers.
lidar_radius (float): Radius of the lidar markers.
lidar_size (float): Size of the lidar markers.
lidar_offset_init (float): Initial offset of the lidar markers.
lidar_offset_delta (float): Delta offset of the lidar markers.
"""
labels: bool = False
lidar_markers: bool = True
lidar_radius: float = 0.15
lidar_size: float = 0.025
lidar_offset_init: float = 0.5
lidar_offset_delta: float = 0.06
@dataclass
class PlacementsConf:
r"""Placement options.
Attributes:
placements (dict): Generated during running.
extents (list): Placement limits (min X, min Y, max X, max Y).
margin (float): Additional margin added to keepout when placing objects.
"""
placements = None
# FIXME: fix mutable default arguments # pylint: disable=fixme
extents = (-2, -2, 2, 2)
margin = 0.0
@dataclass
class SimulationConf:
r"""Simulation options.
Note:
Frameskip is the number of physics simulation steps per environment step and is sampled
as a binomial distribution.
For deterministic steps, set frameskip_binom_p = 1.0 (always take max frameskip).
Attributes:
frameskip_binom_n (int): Number of draws trials in binomial distribution (max frameskip).
frameskip_binom_p (float): Probability of trial return (controls distribution).
"""
frameskip_binom_n: int = 10
frameskip_binom_p: float = 1.0
@dataclass
class VisionEnvConf:
r"""Vision observation parameters.
Attributes:
vision_size (tuple): Size (width, height) of vision observation.
"""
vision_size = (256, 256)
@dataclass
class FloorConf:
r"""Floor options.
Attributes:
type (str): Type of floor.
size (tuple): Size of floor in environments.
"""
type: str = 'mat' # choose from 'mat' and 'village'
size: tuple = (3.5, 3.5, 0.1)
@dataclass
class WorldInfo:
r"""World information generated in running.
Attributes:
layout (dict): Layout of the world.
reset_layout (dict): Saved layout of the world after reset.
world_config_dict (dict): World configuration dictionary.
"""
layout: dict = None
reset_layout: dict = None
world_config_dict: dict = None
class Underlying(abc.ABC): # pylint: disable=too-many-instance-attributes
r"""Base class which is in charge of mujoco and underlying process.
Methods:
- :meth:`_parse`: Parse the configuration from dictionary.
- :meth:`_build_agent`: Build the agent instance.
- :meth:`_add_geoms`: Add geoms into current environment.
- :meth:`_add_free_geoms`: Add free geoms into current environment.
- :meth:`_add_mocaps`: Add mocaps into current environment.
- :meth:`reset`: Reset the environment, it is dependent on :meth:`_build`.
- :meth:`_build`: Build the mujoco instance of environment from configurations.
- :meth:`simulation_forward`: Forward the simulation.
- :meth:`update_layout`: Update the layout dictionary of the world to update states of some objects.
- :meth:`_set_goal`: Set the goal position in physical simulator.
- :meth:`_render_lidar`: Render the lidar.
- :meth:`_render_compass`: Render the compass.
- :meth:`_render_area`: Render the area.
- :meth:`_render_sphere`: Render the sphere.
- :meth:`render`: Render the environment, it may call :meth:`_render_lidar`, :meth:`_render_compass`
:meth:`_render_area`, :meth:`_render_sphere`.
- :meth:`_get_viewer`: Get the viewer instance according to render_mode.
- :meth:`_update_viewer`: Update the viewer when world is updated.
- :meth:`_obs_lidar`: Get observations from the lidar.
- :meth:`_obs_compass`: Get observations from the compass.
- :meth:`_build_placements_dict`: Build the placements dictionary for different types of object.
- :meth:`_build_world_config`: Build the world configuration, combine separate configurations from
different types of objects together as world configuration.
Attributes:
- :attr:`sim_conf` (SimulationConf): Simulation options.
- :attr:`placements_conf` (PlacementsConf): Placement options.
- :attr:`render_conf` (RenderConf): Render options.
- :attr:`vision_env_conf` (VisionEnvConf): Vision observation parameters.
- :attr:`floor_conf` (FloorConf): Floor options.
- :attr:`random_generator` (RandomGenerator): Random generator instance.
- :attr:`world` (World): World, which is in charge of mujoco.
- :attr:`world_info` (WorldInfo): World information generated according to environment in running.
- :attr:`viewer` (Union[KeyboardViewer, RenderContextOffscreen]): Viewer for environment.
- :attr:`_viewers` (dict): Viewers.
- :attr:`_geoms` (dict): Geoms which are added into current environment.
- :attr:`_free_geoms` (dict): FreeGeoms which are added into current environment.
- :attr:`_mocaps` (dict): Mocaps which are added into current environment.
- :attr:`agent_name` (str): Name of the agent in current environment.
- :attr:`observe_vision` (bool): Whether to observe vision from the agent.
- :attr:`debug` (bool): Whether to enable debug mode, which is pre-config during registration.
- :attr:`observation_flatten` (bool): Whether to flatten the observation.
- :attr:`agent` (Agent): Agent instance added into current environment.
- :attr:`action_noise` (float): Magnitude of independent per-component gaussian action noise.
- :attr:`model`: mjModel.
- :attr:`data`: mjData.
- :attr:`_obstacles` (list): All types of object in current environment.
"""
def __init__(self, config: dict | None = None) -> None:
"""Initialize the engine.
Args:
config (dict): Configuration dictionary, used to pre-config some attributes
according to tasks via :meth:`safety_gymnasium.register`.
"""
self.sim_conf = SimulationConf()
self.placements_conf = PlacementsConf()
self.render_conf = RenderConf()
self.vision_env_conf = VisionEnvConf()
self.floor_conf = FloorConf()
self.random_generator = RandomGenerator()
self.world = None
self.world_info = WorldInfo()
self.viewer = None
self._viewers = {}
# Obstacles which are added in environments.
self._geoms = {}
self._free_geoms = {}
self._mocaps = {}
# something are parsed from pre-defined configs
self.agent_name = None
self.observe_vision = False # Observe vision from the agent
self.debug = False
self.observation_flatten = True # Flatten observation into a vector
self._parse(config)
self.agent = None
self.action_noise: float = (
0.0 # Magnitude of independent per-component gaussian action noise
)
self._build_agent(self.agent_name)
def _parse(self, config: dict) -> None:
"""Parse a config dict.
Modify some attributes according to config.
So that easily adapt to different environment settings.
Args:
config (dict): Configuration dictionary.
"""
for key, value in config.items():
if '.' in key:
obj, key = key.split('.')
assert hasattr(self, obj) and hasattr(getattr(self, obj), key), f'Bad key {key}'
setattr(getattr(self, obj), key, value)
else:
assert hasattr(self, key), f'Bad key {key}'
setattr(self, key, value)
def _build_agent(self, agent_name: str) -> None:
"""Build the agent in the world."""
assert hasattr(agents, agent_name), 'agent not found'
agent_cls = getattr(agents, agent_name)
self.agent = agent_cls(random_generator=self.random_generator)
def _add_geoms(self, *geoms: Geom) -> None:
"""Register geom type objects into environments and set corresponding attributes."""
for geom in geoms:
assert (
type(geom) in GEOMS_REGISTER
), 'Please figure out the type of object before you add it into envs.'
self._geoms[geom.name] = geom
setattr(self, geom.name, geom)
geom.set_agent(self.agent)
def _add_free_geoms(self, *free_geoms: FreeGeom) -> None:
"""Register FreeGeom type objects into environments and set corresponding attributes."""
for obj in free_geoms:
assert (
type(obj) in FREE_GEOMS_REGISTER
), 'Please figure out the type of object before you add it into envs.'
self._free_geoms[obj.name] = obj
setattr(self, obj.name, obj)
obj.set_agent(self.agent)
def _add_mocaps(self, *mocaps: Mocap) -> None:
"""Register mocap type objects into environments and set corresponding attributes."""
for mocap in mocaps:
assert (
type(mocap) in MOCAPS_REGISTER
), 'Please figure out the type of object before you add it into envs.'
self._mocaps[mocap.name] = mocap
setattr(self, mocap.name, mocap)
mocap.set_agent(self.agent)
def reset(self) -> None:
"""Reset the environment."""
self._build()
# Save the layout at reset
self.world_info.reset_layout = deepcopy(self.world_info.layout)
def _build(self) -> None:
"""Build the mujoco instance of environment from configurations."""
if self.placements_conf.placements is None:
self._build_placements_dict()
self.random_generator.set_placements_info(
self.placements_conf.placements,
self.placements_conf.extents,
self.placements_conf.margin,
)
# Sample object positions
self.world_info.layout = self.random_generator.build_layout()
# Build the underlying physics world
self.world_info.world_config_dict = self._build_world_config(self.world_info.layout)
if self.world is None:
self.world = World(self.agent, self._obstacles, self.world_info.world_config_dict)
self.world.reset()
self.world.build()
else:
self.world.reset(build=False)
self.world.rebuild(self.world_info.world_config_dict, state=False)
if self.viewer:
self._update_viewer(self.model, self.data)
def simulation_forward(self, action: np.ndarray) -> None:
"""Take a step in the physics simulation.
Note:
- The **step** mentioned above is not the same as the **step** in Mujoco sense.
- The **step** here is the step in episode sense.
"""
# Simulate physics forward
if self.debug:
self.agent.debug()
else:
noise = (
self.action_noise * self.random_generator.randn(self.agent.body_info.nu)
if self.action_noise
else None
)
self.agent.apply_action(action, noise)
exception = False
for _ in range(
self.random_generator.binomial(
self.sim_conf.frameskip_binom_n,
self.sim_conf.frameskip_binom_p,
),
):
try:
for mocap in self._mocaps.values():
mocap.move()
# pylint: disable-next=no-member
mujoco.mj_step(self.model, self.data) # Physics simulation step
except MujocoException as me: # pylint: disable=invalid-name
print('MujocoException', me)
exception = True
break
if exception:
return exception
# pylint: disable-next=no-member
mujoco.mj_forward(self.model, self.data) # Needed to get sensor readings correct!
return exception
def update_layout(self) -> None:
"""Update layout dictionary with new places of objects from Mujoco instance.
When the objects moves, and if we want to update locations of some objects in environment,
then the layout dictionary needs to be updated to make sure that we won't wrongly change
the locations of other objects because we build world according to layout dictionary.
"""
mujoco.mj_forward(self.model, self.data) # pylint: disable=no-member
for k in list(self.world_info.layout.keys()):
# Mocap objects have to be handled separately
if 'gremlin' in k:
continue
self.world_info.layout[k] = self.data.body(k).xpos[:2].copy()
def _set_goal(self, pos: np.ndarray, name='goal') -> None:
"""Set position of goal object in Mujoco instance.
Note:
This method is used to make sure the position of goal object in Mujoco instance
is the same as the position of goal object in layout dictionary or in attributes
of task instance.
"""
if pos.shape == (2,):
self.model.body(name).pos[:2] = pos[:2]
elif pos.shape == (3,):
self.model.body(name).pos[:3] = pos[:3]
else:
raise NotImplementedError
def _render_lidar(
self,
poses: np.ndarray,
color: np.ndarray,
offset: float,
group: int,
) -> None:
"""Render the lidar observation."""
agent_pos = self.agent.pos
agent_mat = self.agent.mat
lidar = self._obs_lidar(poses, group)
for i, sensor in enumerate(lidar):
if self.lidar_conf.type == 'pseudo': # pylint: disable=no-member
i += 0.5 # Offset to center of bin
theta = 2 * np.pi * i / self.lidar_conf.num_bins # pylint: disable=no-member
rad = self.render_conf.lidar_radius
binpos = np.array([np.cos(theta) * rad, np.sin(theta) * rad, offset])
pos = agent_pos + np.matmul(binpos, agent_mat.transpose())
alpha = min(1, sensor + 0.1)
self.viewer.add_marker(
pos=pos,
size=self.render_conf.lidar_size * np.ones(3),
type=mujoco.mjtGeom.mjGEOM_SPHERE, # pylint: disable=no-member
rgba=np.array(color) * alpha,
label='',
)
def _render_compass(self, pose: np.ndarray, color: np.ndarray, offset: float) -> None:
"""Render a compass observation."""
agent_pos = self.agent.pos
agent_mat = self.agent.mat
# Truncate the compass to only visualize XY component
compass = np.concatenate([self._obs_compass(pose)[:2] * 0.15, [offset]])
pos = agent_pos + np.matmul(compass, agent_mat.transpose())
self.viewer.add_marker(
pos=pos,
size=0.05 * np.ones(3),
type=mujoco.mjtGeom.mjGEOM_SPHERE, # pylint: disable=no-member
rgba=np.array(color) * 0.5,
label='',
)
# pylint: disable-next=too-many-arguments
def _render_area(
self,
pos: np.ndarray,
size: float,
color: np.ndarray,
label: str = '',
alpha: float = 0.1,
) -> None:
"""Render a radial area in the environment."""
z_size = min(size, 0.3)
pos = np.asarray(pos)
if pos.shape == (2,):
pos = np.r_[pos, 0] # Z coordinate 0
self.viewer.add_marker(
pos=pos,
size=[size, size, z_size],
type=mujoco.mjtGeom.mjGEOM_CYLINDER, # pylint: disable=no-member
rgba=np.array(color) * alpha,
label=label if self.render_conf.labels else '',
)
# pylint: disable-next=too-many-arguments
def _render_sphere(
self,
pos: np.ndarray,
size: float,
color: np.ndarray,
label: str = '',
alpha: float = 0.1,
) -> None:
"""Render a radial area in the environment."""
pos = np.asarray(pos)
if pos.shape == (2,):
pos = np.r_[pos, 0] # Z coordinate 0
self.viewer.add_marker(
pos=pos,
size=size * np.ones(3),
type=mujoco.mjtGeom.mjGEOM_SPHERE, # pylint: disable=no-member
rgba=np.array(color) * alpha,
label=label if self.render_conf.labels else '',
)
# pylint: disable-next=too-many-arguments,too-many-branches,too-many-statements
def render(
self,
width: int,
height: int,
mode: str,
camera_id: int | None = None,
camera_name: str | None = None,
cost: float | None = None,
) -> None:
"""Render the environment to somewhere.
Note:
The camera_name parameter can be chosen from:
- **human**: the camera used for freely moving around and can get input
from keyboard real time.
- **vision**: the camera used for vision observation, which is fixed in front of the
agent's head.
- **track**: The camera used for tracking the agent.
- **fixednear**: the camera used for top-down observation.
- **fixedfar**: the camera used for top-down observation, but is further than **fixednear**.
"""
self.model.vis.global_.offwidth = width
self.model.vis.global_.offheight = height
if mode in {
'rgb_array',
'depth_array',
}:
if camera_id is not None and camera_name is not None:
raise ValueError(
'Both `camera_id` and `camera_name` cannot be specified at the same time.',
)
no_camera_specified = camera_name is None and camera_id is None
if no_camera_specified:
camera_name = 'vision'
if camera_id is None:
# pylint: disable-next=no-member
camera_id = mujoco.mj_name2id(
self.model,
mujoco.mjtObj.mjOBJ_CAMERA, # pylint: disable=no-member
camera_name,
)
self._get_viewer(mode)
# Turn all the geom groups on
self.viewer.vopt.geomgroup[:] = 1
# Lidar and Compass markers
if self.render_conf.lidar_markers:
offset = (
self.render_conf.lidar_offset_init
) # Height offset for successive lidar indicators
for obstacle in self._obstacles:
if obstacle.is_lidar_observed:
self._render_lidar(obstacle.pos, obstacle.color, offset, obstacle.group)
if hasattr(obstacle, 'is_comp_observed') and obstacle.is_comp_observed:
self._render_compass(
getattr(self, obstacle.name + '_pos'),
obstacle.color,
offset,
)
offset += self.render_conf.lidar_offset_delta
# Add indicator for nonzero cost
if cost.get('cost_sum', 0) > 0:
self._render_sphere(self.agent.pos, 0.25, COLOR['red'], alpha=0.5)
# Draw vision pixels
if mode in {'rgb_array', 'depth_array'}:
# Extract depth part of the read_pixels() tuple
data = self._get_viewer(mode).render(render_mode=mode, camera_id=camera_id)
self.viewer._markers[:] = [] # pylint: disable=protected-access
self.viewer._overlays.clear() # pylint: disable=protected-access
return data
if mode == 'human':
self._get_viewer(mode).render()
return None
raise NotImplementedError(f'Render mode {mode} is not implemented.')
def _get_viewer(
self,
mode: str,
) -> (
safety_gymnasium.utils.keyboard_viewer.KeyboardViewer
| gymnasium.envs.mujoco.mujoco_rendering.RenderContextOffscreen
):
self.viewer = self._viewers.get(mode)
if self.viewer is None:
if mode == 'human':
self.viewer = KeyboardViewer(
self.model,
self.data,
self.agent.keyboard_control_callback,
)
elif mode in {'rgb_array', 'depth_array'}:
self.viewer = OffScreenViewer(self.model, self.data)
else:
raise AttributeError(f'Unexpected mode: {mode}')
# self.viewer_setup()
self._viewers[mode] = self.viewer
return self.viewer
def _update_viewer(self, model, data) -> None:
"""update the viewer with new model and data"""
assert self.viewer, 'Call before self.viewer existing.'
self.viewer.model = model
self.viewer.data = data
@abc.abstractmethod
def _obs_lidar(self, positions: np.ndarray, group: int) -> np.ndarray:
"""Calculate and return a lidar observation. See sub methods for implementation."""
@abc.abstractmethod
def _obs_compass(self, pos: np.ndarray) -> np.ndarray:
"""Return an agent-centric compass observation of a list of positions.
Compass is a normalized (unit-length) egocentric XY vector,
from the agent to the object.
This is equivalent to observing the egocentric XY angle to the target,
projected into the sin/cos space we use for joints.
(See comment on joint observation for why we do this.)
"""
@abc.abstractmethod
def _build_placements_dict(self) -> dict:
"""Build a dict of placements. Happens only once."""
@abc.abstractmethod
def _build_world_config(self, layout: dict) -> dict:
"""Create a world_config from our own config."""
@property
def model(self):
"""Helper to get the world's model instance."""
return self.world.model
@property
def data(self):
"""Helper to get the world's simulation data instance."""
return self.world.data
@property
def _obstacles(self) -> list[Geom | FreeGeom | Mocap]:
"""Get the obstacles in the task.
Combine all types of object in current environment together into single list
in order to easily iterate them.
"""
return (
list(self._geoms.values())
+ list(self._free_geoms.values())
+ list(self._mocaps.values())
) | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/bases/underlying.py | 0.914823 | 0.531878 | underlying.py | pypi |
"""Base class for agents."""
from __future__ import annotations
import abc
import os
from dataclasses import dataclass, field
import glfw
import gymnasium
import mujoco
import numpy as np
from gymnasium import spaces
import safety_gymnasium
from safety_gymnasium.utils.random_generator import RandomGenerator
from safety_gymnasium.utils.task_utils import get_body_xvelp, quat2mat
from safety_gymnasium.world import Engine
BASE_DIR = os.path.dirname(safety_gymnasium.__file__)
@dataclass
class SensorConf:
r"""Sensor observations configuration.
Attributes:
sensors (tuple): Specify which sensors to add to observation space.
sensors_hinge_joints (bool): Observe named joint position / velocity sensors.
sensors_ball_joints (bool): Observe named ball joint position / velocity sensors.
sensors_angle_components (bool): Observe sin/cos theta instead of theta.
"""
sensors: tuple[str, ...] = ('accelerometer', 'velocimeter', 'gyro', 'magnetometer')
sensors_hinge_joints: bool = True
sensors_ball_joints: bool = True
sensors_angle_components: bool = True
@dataclass
class SensorInfo:
r"""Sensor information generated in running.
Needed to figure out observation space.
Attributes:
hinge_pos_names (list): List of hinge joint position sensor names.
hinge_vel_names (list): List of hinge joint velocity sensor names.
freejoint_pos_name (str): Name of free joint position sensor.
freejoint_qvel_name (str): Name of free joint velocity sensor.
ballquat_names (list): List of ball joint quaternion sensor names.
ballangvel_names (list): List of ball joint angular velocity sensor names.
sensor_dim (list): List of sensor dimensions.
"""
hinge_pos_names: list = field(default_factory=list)
hinge_vel_names: list = field(default_factory=list)
freejoint_pos_name: str = None
freejoint_qvel_name: str = None
ballquat_names: list = field(default_factory=list)
ballangvel_names: list = field(default_factory=list)
sensor_dim: list = field(default_factory=dict)
@dataclass
class BodyInfo:
r"""Body information generated in running.
Needed to figure out the observation spaces.
Attributes:
nq (int): Number of generalized coordinates in agent = dim(qpos).
nv (int): Number of degrees of freedom in agent = dim(qvel).
nu (int): Number of actuators/controls in agent = dim(ctrl),
needed to figure out action space.
nbody (int): Number of bodies in agent.
geom_names (list): List of geom names in agent.
"""
nq: int = None
nv: int = None
nu: int = None
nbody: int = None
geom_names: list = field(default_factory=list)
@dataclass
class DebugInfo:
r"""Debug information generated in running.
Attributes:
keys (set): Set of keys are pressed on keyboard.
"""
keys: set = field(default_factory=set)
class BaseAgent(abc.ABC): # pylint: disable=too-many-instance-attributes
r"""Base class for agent.
Get mujoco-specific info about agent and control agent in environments.
Methods:
- :meth:`_load_model`: Load agent model from xml file.
- :meth:`_init_body_info`: Initialize body information.
- :meth:`_build_action_space`: Build action space for agent.
- :meth:`_init_jnt_sensors`: Initialize information of joint sensors in current agent.
- :meth:`set_engine`: Set physical engine instance.
- :meth:`apply_action`: Agent in physical simulator take specific action.
- :meth:`build_sensor_observation_space`: Build agent specific observation space according to sensors.
- :meth:`obs_sensor`: Get agent specific observations according to sensors.
- :meth:`get_sensor`: Get specific sensor observations in agent.
- :meth:`dist_xy`: Get distance between agent and target in XY plane.
- :meth:`world_xy`: Get agent XY coordinate in world frame.
- :meth:`keyboard_control_callback`: Keyboard control callback designed for debug mode for keyboard controlling.
- :meth:`debug`: Implement specific action debug mode which maps keyboard input into action of agent.
- :meth:`is_alive`: Check if agent is alive.
- :meth:`reset`: Reset agent to specific initial internal state, eg.joints angles.
Attributes:
- :attr:`base` (str): Path to agent XML.
- :attr:`random_generator` (RandomGenerator): Random generator.
- :attr:`placements` (list): Agent placements list (defaults to full extents).
- :attr:`locations` (list): Explicitly place agent XY coordinate.
- :attr:`keepout` (float): Needs to be set to match the agent XML used.
- :attr:`rot` (float): Override agent starting angle.
- :attr:`engine` (:class:`Engine`): Physical engine instance.
- :attr:`sensor_conf` (:class:`SensorConf`): Sensor observations configuration.
- :attr:`sensor_info` (:class:`SensorInfo`): Sensor information.
- :attr:`body_info` (:class:`BodyInfo`): Body information.
- :attr:`debug_info` (:class:`DebugInfo`): Debug information.
- :attr:`z_height` (float): Initial height of agent in environments.
- :attr:`action_space` (:class:`gymnasium.spaces.Box`): Action space.
- :attr:`com` (np.ndarray): The Cartesian coordinate of agent center of mass.
- :attr:`mat` (np.ndarray): The Cartesian rotation matrix of agent.
- :attr:`vel` (np.ndarray): The Cartesian velocity of agent.
- :attr:`pos` (np.ndarray): The Cartesian position of agent.
"""
def __init__( # pylint: disable=too-many-arguments
self,
name: str,
random_generator: RandomGenerator,
placements: list | None = None,
locations: list | None = None,
keepout: float = 0.4,
rot: float | None = None,
) -> None:
"""Initialize the agent.
Args:
name (str): Name of agent.
random_generator (RandomGenerator): Random generator.
placements (list): Agent placements list (defaults to full extents).
locations (list): Explicitly place agent XY coordinate.
keepout (float): Needs to be set to match the agent XML used.
rot (float): Override agent starting angle.
"""
self.base: str = f'assets/xmls/{name.lower()}.xml'
self.random_generator: RandomGenerator = random_generator
self.placements: list = placements
self.locations: list = [] if locations is None else locations
self.keepout: float = keepout
self.rot: float = rot
self.engine: Engine = None
self._load_model()
self.sensor_conf = SensorConf()
self.sensor_info = SensorInfo()
self.body_info = BodyInfo()
self._init_body_info()
self.debug_info = DebugInfo()
# Needed to figure out z-height of free joint of offset body
self.z_height: float = self.engine.data.body('agent').xpos[2]
self.action_space: gymnasium.spaces.Box = self._build_action_space()
self._init_jnt_sensors()
def _load_model(self) -> None:
"""Load the agent model from the xml file.
Note:
The physical engine instance which is created here is just used to figure out the dynamics
of agent and save some useful information, when the environment is actually created, the
physical engine instance will be replaced by the new instance which is created in
:class:`safety_gymnasium.World` via :meth:`set_engine`.
"""
base_path = os.path.join(BASE_DIR, self.base)
model = mujoco.MjModel.from_xml_path(base_path) # pylint: disable=no-member
data = mujoco.MjData(model) # pylint: disable=no-member
mujoco.mj_forward(model, data) # pylint: disable=no-member
self.set_engine(Engine(model, data))
def _init_body_info(self) -> None:
"""Initialize body information.
Access directly from mujoco instance created on agent xml model.
"""
self.body_info.nq = self.engine.model.nq
self.body_info.nv = self.engine.model.nv
self.body_info.nu = self.engine.model.nu
self.body_info.nbody = self.engine.model.nbody
self.body_info.geom_names = [
self.engine.model.geom(i).name
for i in range(self.engine.model.ngeom)
if self.engine.model.geom(i).name != 'floor'
]
def _build_action_space(self) -> gymnasium.spaces.Box:
"""Build the action space for this agent.
Access directly from mujoco instance created on agent xml model.
"""
bounds = self.engine.model.actuator_ctrlrange.copy().astype(np.float32)
low, high = bounds.T
return spaces.Box(low=low, high=high, dtype=np.float64)
def _init_jnt_sensors(self) -> None: # pylint: disable=too-many-branches
"""Initialize joint sensors.
Access directly from mujoco instance created on agent xml model and save different
joint names into different lists.
"""
for i in range(self.engine.model.nsensor):
name = self.engine.model.sensor(i).name
sensor_id = self.engine.model.sensor(
name,
).id # pylint: disable=redefined-builtin, invalid-name
self.sensor_info.sensor_dim[name] = self.engine.model.sensor(sensor_id).dim[0]
sensor_type = self.engine.model.sensor(sensor_id).type
if (
# pylint: disable-next=no-member
self.engine.model.sensor(sensor_id).objtype
== mujoco.mjtObj.mjOBJ_JOINT # pylint: disable=no-member
): # pylint: disable=no-member
joint_id = self.engine.model.sensor(sensor_id).objid
joint_type = self.engine.model.jnt(joint_id).type
if joint_type == mujoco.mjtJoint.mjJNT_HINGE: # pylint: disable=no-member
if sensor_type == mujoco.mjtSensor.mjSENS_JOINTPOS: # pylint: disable=no-member
self.sensor_info.hinge_pos_names.append(name)
elif (
sensor_type == mujoco.mjtSensor.mjSENS_JOINTVEL
): # pylint: disable=no-member
self.sensor_info.hinge_vel_names.append(name)
else:
t = self.engine.model.sensor(i).type # pylint: disable=invalid-name
raise ValueError(f'Unrecognized sensor type {t} for joint')
elif joint_type == mujoco.mjtJoint.mjJNT_BALL: # pylint: disable=no-member
if sensor_type == mujoco.mjtSensor.mjSENS_BALLQUAT: # pylint: disable=no-member
self.sensor_info.ballquat_names.append(name)
elif (
sensor_type == mujoco.mjtSensor.mjSENS_BALLANGVEL
): # pylint: disable=no-member
self.sensor_info.ballangvel_names.append(name)
elif joint_type == mujoco.mjtJoint.mjJNT_SLIDE: # pylint: disable=no-member
# Adding slide joints is trivially easy in code,
# but this removes one of the good properties about our observations.
# (That we are invariant to relative whole-world transforms)
# If slide joints are added we should ensure this stays true!
raise ValueError('Slide joints in agents not currently supported')
elif (
# pylint: disable-next=no-member
self.engine.model.sensor(sensor_id).objtype
== mujoco.mjtObj.mjOBJ_SITE # pylint: disable=no-member
):
if name == 'agent_pos':
self.sensor_info.freejoint_pos_name = name
elif name == 'agent_qvel':
self.sensor_info.freejoint_qvel_name = name
def set_engine(self, engine: Engine) -> None:
"""Set the engine instance.
Args:
engine (Engine): The engine instance.
Note:
This method will be called twice in one single environment.
1. When the agent is initialized, used to get and save useful information.
2. When the environment is created, used to update the engine instance.
"""
self.engine = engine
def apply_action(self, action: np.ndarray, noise: np.ndarray | None = None) -> None:
"""Apply an action to the agent.
Just fill up the control array in the engine data.
Args:
action (np.ndarray): The action to apply.
noise (np.ndarray): The noise to add to the action.
"""
action = np.array(action, copy=False) # Cast to ndarray
# Set action
action_range = self.engine.model.actuator_ctrlrange
self.engine.data.ctrl[:] = np.clip(action, action_range[:, 0], action_range[:, 1])
if noise:
self.engine.data.ctrl[:] += noise
def build_sensor_observation_space(self) -> gymnasium.spaces.Dict:
"""Build observation space for all sensor types.
Returns:
gymnasium.spaces.Dict: The observation space generated by sensors bound with agent.
"""
obs_space_dict = {}
for sensor in self.sensor_conf.sensors: # Explicitly listed sensors
dim = self.sensor_info.sensor_dim[sensor]
obs_space_dict[sensor] = gymnasium.spaces.Box(-np.inf, np.inf, (dim,), dtype=np.float64)
# Velocities don't have wraparound effects that rotational positions do
# Wraparounds are not kind to neural networks
# Whereas the angle 2*pi is very close to 0, this isn't true in the network
# In theory the network could learn this, but in practice we simplify it
# when the sensors_angle_components switch is enabled.
for sensor in self.sensor_info.hinge_vel_names:
obs_space_dict[sensor] = gymnasium.spaces.Box(-np.inf, np.inf, (1,), dtype=np.float64)
for sensor in self.sensor_info.ballangvel_names:
obs_space_dict[sensor] = gymnasium.spaces.Box(-np.inf, np.inf, (3,), dtype=np.float64)
if self.sensor_info.freejoint_pos_name:
sensor = self.sensor_info.freejoint_pos_name
obs_space_dict[sensor] = gymnasium.spaces.Box(-np.inf, np.inf, (1,), dtype=np.float64)
if self.sensor_info.freejoint_qvel_name:
sensor = self.sensor_info.freejoint_qvel_name
obs_space_dict[sensor] = gymnasium.spaces.Box(-np.inf, np.inf, (3,), dtype=np.float64)
# Angular positions have wraparound effects, so output something more friendly
if self.sensor_conf.sensors_angle_components:
# Single joints are turned into sin(x), cos(x) pairs
# These should be easier to learn for neural networks,
# Since for angles, small perturbations in angle give small differences in sin/cos
for sensor in self.sensor_info.hinge_pos_names:
obs_space_dict[sensor] = gymnasium.spaces.Box(
-np.inf,
np.inf,
(2,),
dtype=np.float64,
)
# Quaternions are turned into 3x3 rotation matrices
# Quaternions have a wraparound issue in how they are normalized,
# where the convention is to change the sign so the first element to be positive.
# If the first element is close to 0, this can mean small differences in rotation
# lead to large differences in value as the latter elements change sign.
# This also means that the first element of the quaternion is not expectation zero.
# The SO(3) rotation representation would be a good replacement here,
# since it smoothly varies between values in all directions (the property we want),
# but right now we have very little code to support SO(3) rotations.
# Instead we use a 3x3 rotation matrix, which if normalized, smoothly varies as well.
for sensor in self.sensor_info.ballquat_names:
obs_space_dict[sensor] = gymnasium.spaces.Box(
-np.inf,
np.inf,
(3, 3),
dtype=np.float64,
)
else:
# Otherwise include the sensor without any processing
for sensor in self.sensor_info.hinge_pos_names:
obs_space_dict[sensor] = gymnasium.spaces.Box(
-np.inf,
np.inf,
(1,),
dtype=np.float64,
)
for sensor in self.sensor_info.ballquat_names:
obs_space_dict[sensor] = gymnasium.spaces.Box(
-np.inf,
np.inf,
(4,),
dtype=np.float64,
)
return obs_space_dict
def obs_sensor(self) -> dict[str, np.ndarray]:
"""Get observations of all sensor types.
Returns:
Dict[str, np.ndarray]: The observations generated by sensors bound with agent.
"""
obs = {}
# Sensors which can be read directly, without processing
for sensor in self.sensor_conf.sensors: # Explicitly listed sensors
obs[sensor] = self.get_sensor(sensor)
for sensor in self.sensor_info.hinge_vel_names:
obs[sensor] = self.get_sensor(sensor)
for sensor in self.sensor_info.ballangvel_names:
obs[sensor] = self.get_sensor(sensor)
if self.sensor_info.freejoint_pos_name:
sensor = self.sensor_info.freejoint_pos_name
obs[sensor] = self.get_sensor(sensor)[2:]
if self.sensor_info.freejoint_qvel_name:
sensor = self.sensor_info.freejoint_qvel_name
obs[sensor] = self.get_sensor(sensor)
# Process angular position sensors
if self.sensor_conf.sensors_angle_components:
for sensor in self.sensor_info.hinge_pos_names:
theta = float(self.get_sensor(sensor)) # Ensure not 1D, 1-element array
obs[sensor] = np.array([np.sin(theta), np.cos(theta)])
for sensor in self.sensor_info.ballquat_names:
quat = self.get_sensor(sensor)
obs[sensor] = quat2mat(quat)
else: # Otherwise read sensors directly
for sensor in self.sensor_info.hinge_pos_names:
obs[sensor] = self.get_sensor(sensor)
for sensor in self.sensor_info.ballquat_names:
obs[sensor] = self.get_sensor(sensor)
return obs
def get_sensor(self, name: str) -> np.ndarray:
"""Get the value of one sensor.
Args:
name (str): The name of the sensor to checkout.
Returns:
np.ndarray: The observation value of the sensor.
"""
id = self.engine.model.sensor(name).id # pylint: disable=redefined-builtin, invalid-name
adr = self.engine.model.sensor_adr[id]
dim = self.engine.model.sensor_dim[id]
return self.engine.data.sensordata[adr : adr + dim].copy()
def dist_xy(self, pos: np.ndarray) -> float:
"""Return the distance from the agent to an XY position.
Args:
pos (np.ndarray): The position to measure the distance to.
Returns:
float: The distance from the agent to the position.
"""
pos = np.asarray(pos)
if pos.shape == (3,):
pos = pos[:2]
agent_pos = self.pos
return np.sqrt(np.sum(np.square(pos - agent_pos[:2])))
def world_xy(self, pos: np.ndarray) -> np.ndarray:
"""Return the world XY vector to a position from the agent.
Args:
pos (np.ndarray): The position to measure the vector to.
Returns:
np.ndarray: The world XY vector to the position.
"""
assert pos.shape == (2,)
return pos - self.agent.agent_pos()[:2] # pylint: disable=no-member
def keyboard_control_callback(self, key: int, action: int) -> None:
"""Callback for keyboard control.
Collect keys which are pressed.
Args:
key (int): The key code inputted by user.
action (int): The action of the key in glfw.
"""
if action == glfw.PRESS:
self.debug_info.keys.add(key)
elif action == glfw.RELEASE:
self.debug_info.keys.remove(key)
def debug(self) -> None:
"""Debug mode.
Apply action which is inputted from keyboard.
"""
raise NotImplementedError
@abc.abstractmethod
def is_alive(self) -> bool:
"""Returns True if the agent is healthy.
Returns:
bool: True if the agent is healthy,
False if the agent is unhealthy.
"""
@abc.abstractmethod
def reset(self) -> None:
"""Called when the environment is reset."""
@property
def com(self) -> np.ndarray:
"""Get the position of the agent center of mass in the simulator world reference frame.
Returns:
np.ndarray: The Cartesian position of the agent center of mass.
"""
return self.engine.data.body('agent').subtree_com.copy()
@property
def mat(self) -> np.ndarray:
"""Get the rotation matrix of the agent in the simulator world reference frame.
Returns:
np.ndarray: The Cartesian rotation matrix of the agent.
"""
return self.engine.data.body('agent').xmat.copy().reshape(3, -1)
@property
def vel(self) -> np.ndarray:
"""Get the velocity of the agent in the simulator world reference frame.
Returns:
np.ndarray: The velocity of the agent.
"""
return get_body_xvelp(self.engine.model, self.engine.data, 'agent').copy()
@property
def pos(self) -> np.ndarray:
"""Get the position of the agent in the simulator world reference frame.
Returns:
np.ndarray: The Cartesian position of the agent.
"""
return self.engine.data.body('agent').xpos.copy() | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/bases/base_agent.py | 0.948466 | 0.560974 | base_agent.py | pypi |
"""Base task."""
from __future__ import annotations
import abc
import os
from collections import OrderedDict
from dataclasses import dataclass
import gymnasium
import mujoco
import numpy as np
import yaml
import safety_gymnasium
from safety_gymnasium.bases.underlying import Underlying
from safety_gymnasium.utils.common_utils import ResamplingError, camel_to_snake
from safety_gymnasium.utils.task_utils import theta2vec
@dataclass
class LidarConf:
r"""Lidar observation parameters.
Attributes:
num_bins (int): Bins (around a full circle) for lidar sensing.
max_dist (float): Maximum distance for lidar sensitivity (if None, exponential distance).
exp_gain (float): Scaling factor for distance in exponential distance lidar.
type (str): 'pseudo', 'natural', see self._obs_lidar().
alias (bool): Lidar bins alias into each other.
"""
num_bins: int = 16
max_dist: float = 3
exp_gain: float = 1.0
type: str = 'pseudo'
alias: bool = True
@dataclass
class CompassConf:
r"""Compass observation parameters.
Attributes:
shape (int): 2 for XY unit vector, 3 for XYZ unit vector.
"""
shape: int = 2
@dataclass
class RewardConf:
r"""Reward options.
Attributes:
reward_orientation (bool): Reward for being upright.
reward_orientation_scale (float): Scale for uprightness reward.
reward_orientation_body (str): What body to get orientation from.
reward_exception (float): Reward when encountering a mujoco exception.
reward_clip (float): Clip reward, last resort against physics errors causing magnitude spikes.
"""
reward_orientation: bool = False
reward_orientation_scale: float = 0.002
reward_orientation_body: str = 'agent'
reward_exception: float = -10.0
reward_clip: float = 10
@dataclass
class CostConf:
r"""Cost options.
Attributes:
constrain_indicator (bool): If true, all costs are either 1 or 0 for a given step.
"""
constrain_indicator: bool = True
@dataclass
class MechanismConf:
r"""Mechanism options.
Starting position distribution.
Attributes:
randomize_layout (bool): If false, set the random seed before layout to constant.
continue_goal (bool): If true, draw a new goal after achievement.
terminate_resample_failure (bool): If true, end episode when resampling fails,
otherwise, raise a python exception.
"""
randomize_layout: bool = True
continue_goal: bool = True
terminate_resample_failure: bool = True
@dataclass
class ObservationInfo:
r"""Observation information generated in running.
Attributes:
obs_space_dict (:class:`gymnasium.spaces.Dict`): Observation space dictionary.
"""
obs_space_dict: gymnasium.spaces.Dict = None
class BaseTask(Underlying): # pylint: disable=too-many-instance-attributes,too-many-public-methods
r"""Base task class for defining some common characteristic and mechanism.
Methods:
- :meth:`dist_goal`: Return the distance from the agent to the goal XY position.
- :meth:`calculate_cost`: Determine costs depending on the agent and obstacles, actually all
cost calculation is done in different :meth:`safety_gymnasium.bases.base_obstacle.BaseObject.cal_cost`
which implemented in different types of object, We just combine all results of them here.
- :meth:`build_observation_space`: Build observation space, combine agent specific observation space
and task specific observation space together.
- :meth:`_build_placements_dict`: Build placement dictionary for all types of object.
- :meth:`toggle_observation_space`: Toggle observation space.
- :meth:`_build_world_config`: Create a world_config from all separate configs of different types of object.
- :meth:`_build_static_geoms_config`: Build static geoms config from yaml files.
- :meth:`build_goal_position`: Build goal position, it will be called when the task is initialized or
when the goal is achieved.
- :meth:`_placements_dict_from_object`: Build placement dictionary for a specific type of object.
- :meth:`obs`: Combine and return all separate observations of different types of object.
- :meth:`_obs_lidar`: Return lidar observation, unify natural lidar and pseudo lidar in API.
- :meth:`_obs_lidar_natural`: Return natural lidar observation.
- :meth:`_obs_lidar_pseudo`: Return pseudo lidar observation.
- :meth:`_obs_compass`: Return compass observation.
- :meth:`_obs_vision`: Return vision observation, that is RGB image captured by camera
fixed in front of agent.
- :meth:`_ego_xy`: Return the egocentric XY vector to a position from the agent.
- :meth:`calculate_reward`: Calculate reward, it will be called in every timestep, and it is
implemented in different task.
- :meth:`specific_reset`: Reset task specific parameters, it will be called in every reset.
- :meth:`specific_step`: Step task specific parameters, it will be called in every timestep.
- :meth:`update_world`: Update world, it will be called when ``env.reset()`` or :meth:`goal_achieved` == True.
Attributes:
- :attr:`num_steps` (int): Maximum number of environment steps in an episode.
- :attr:`lidar_conf` (:class:`LidarConf`): Lidar observation parameters.
- :attr:`reward_conf` (:class:`RewardConf`): Reward options.
- :attr:`cost_conf` (:class:`CostConf`): Cost options.
- :attr:`mechanism_conf` (:class:`MechanismConf`): Mechanism options.
- :attr:`action_space` (gymnasium.spaces.Box): Action space.
- :attr:`observation_space` (gymnasium.spaces.Dict): Observation space.
- :attr:`obs_info` (:class:`ObservationInfo`): Observation information generated in running.
- :attr:`_is_load_static_geoms` (bool): Whether to load static geoms in current task which is mean
some geoms that has no randomness.
- :attr:`goal_achieved` (bool): Determine whether the goal is achieved, it will be called in every timestep
and it is implemented in different task.
"""
def __init__(self, config: dict) -> None: # pylint: disable-next=too-many-statements
"""Initialize the task.
Args:
config (dict): Configuration dictionary, used to pre-config some attributes
according to tasks via :meth:`safety_gymnasium.register`.
"""
super().__init__(config=config)
self.task_name: str
self.num_steps = 1000 # Maximum number of environment steps in an episode
self.lidar_conf = LidarConf()
self.compass_conf = CompassConf()
self.reward_conf = RewardConf()
self.cost_conf = CostConf()
self.mechanism_conf = MechanismConf()
self.action_space = self.agent.action_space
self.observation_space = None
self.obs_info = ObservationInfo()
self._is_load_static_geoms = False # Whether to load static geoms in current task.
self.static_geoms_names: dict
self.static_geoms_contact_cost: float = None
def dist_goal(self) -> float:
"""Return the distance from the agent to the goal XY position."""
assert hasattr(self, 'goal'), 'Please make sure you have added goal into env.'
return self.agent.dist_xy(self.goal.pos) # pylint: disable=no-member
def dist_staged_goal(self) -> float:
"""Return the distance from the agent to the goal XY position."""
assert hasattr(self, 'staged_goal'), 'Please make sure you have added goal into env.'
return self.agent.dist_xy(self.staged_goal.pos) # pylint: disable=no-member
def calculate_cost(self) -> dict:
"""Determine costs depending on the agent and obstacles."""
# pylint: disable-next=no-member
mujoco.mj_forward(self.model, self.data) # Ensure positions and contacts are correct
cost = {}
# Calculate constraint violations
for obstacle in self._obstacles:
cost.update(obstacle.cal_cost())
if self._is_load_static_geoms and self.static_geoms_contact_cost:
cost['cost_static_geoms_contact'] = 0.0
for contact in self.data.contact[: self.data.ncon]:
geom_ids = [contact.geom1, contact.geom2]
geom_names = sorted([self.model.geom(g).name for g in geom_ids])
if any(n in self.static_geoms_names for n in geom_names) and any(
n in self.agent.body_info.geom_names for n in geom_names
):
# pylint: disable-next=no-member
cost['cost_static_geoms_contact'] += self.static_geoms_contact_cost
# Sum all costs into single total cost
cost['cost_sum'] = sum(v for k, v in cost.items() if k.startswith('cost_'))
return cost
def build_observation_space(self) -> gymnasium.spaces.Dict:
"""Construct observation space. Happens only once during __init__ in Builder."""
obs_space_dict = OrderedDict() # See self.obs()
obs_space_dict.update(self.agent.build_sensor_observation_space())
for obstacle in self._obstacles:
if obstacle.is_lidar_observed:
name = obstacle.name + '_' + 'lidar'
obs_space_dict[name] = gymnasium.spaces.Box(
0.0,
1.0,
(self.lidar_conf.num_bins,),
dtype=np.float64,
)
if hasattr(obstacle, 'is_comp_observed') and obstacle.is_comp_observed:
name = obstacle.name + '_' + 'comp'
obs_space_dict[name] = gymnasium.spaces.Box(
-1.0,
1.0,
(self.compass_conf.shape,),
dtype=np.float64,
)
if self.observe_vision:
width, height = self.vision_env_conf.vision_size
rows, cols = height, width
self.vision_env_conf.vision_size = (rows, cols)
obs_space_dict['vision'] = gymnasium.spaces.Box(
0,
255,
(*self.vision_env_conf.vision_size, 3),
dtype=np.uint8,
)
self.obs_info.obs_space_dict = gymnasium.spaces.Dict(obs_space_dict)
if self.observation_flatten:
self.observation_space = gymnasium.spaces.utils.flatten_space(
self.obs_info.obs_space_dict,
)
else:
self.observation_space = self.obs_info.obs_space_dict
def _build_placements_dict(self) -> None:
"""Build a dict of placements.
Happens only once.
"""
placements = {}
placements.update(self._placements_dict_from_object('agent'))
for obstacle in self._obstacles:
placements.update(self._placements_dict_from_object(obstacle.name))
self.placements_conf.placements = placements
def toggle_observation_space(self) -> None:
"""Toggle observation space."""
self.observation_flatten = not self.observation_flatten
self.build_observation_space()
def _build_world_config(self, layout: dict) -> dict: # pylint: disable=too-many-branches
"""Create a world_config from our own config."""
world_config = {
'floor_type': self.floor_conf.type,
'floor_size': self.floor_conf.size,
'agent_base': self.agent.base,
'agent_xy': layout['agent'],
}
if self.agent.rot is None:
world_config['agent_rot'] = self.random_generator.random_rot()
else:
world_config['agent_rot'] = float(self.agent.rot)
self.task_name = self.__class__.__name__.split('Level', maxsplit=1)[0]
world_config['task_name'] = self.task_name
# process world config via different objects.
world_config.update(
{
'geoms': {},
'free_geoms': {},
'mocaps': {},
},
)
for obstacle in self._obstacles:
num = obstacle.num if hasattr(obstacle, 'num') else 1
obstacle.process_config(world_config, layout, self.random_generator.generate_rots(num))
if self._is_load_static_geoms:
self._build_static_geoms_config(world_config['geoms'])
return world_config
def _build_static_geoms_config(self, geoms_config: dict) -> None:
"""Load static geoms from .yaml file.
Static geoms are geoms which won't be considered when calculate reward and cost in general.
And have no randomness.
Some tasks may generate cost when contacting static geoms.
"""
config_name = camel_to_snake(self.task_name)
level = int(self.__class__.__name__.split('Level')[1])
# load all config of meshes in specific environment from .yaml file
base_dir = os.path.dirname(safety_gymnasium.__file__)
with open(os.path.join(base_dir, f'configs/{config_name}.yaml'), encoding='utf-8') as file:
meshes_config = yaml.load(file, Loader=yaml.FullLoader) # noqa: S506
self.static_geoms_names = set()
for idx in range(level + 1):
for group in meshes_config[idx].values():
geoms_config.update(group)
for item in group.values():
if 'geoms' in item:
for geom in item['geoms']:
self.static_geoms_names.add(geom['name'])
else:
self.static_geoms_names.add(item['name'])
def build_goal_position(self) -> None:
"""Build a new goal position, maybe with resampling due to hazards."""
# Resample until goal is compatible with layout
if 'goal' in self.world_info.layout:
del self.world_info.layout['goal']
for _ in range(10000): # Retries
if self.random_generator.sample_goal_position():
break
else:
raise ResamplingError('Failed to generate goal')
# Move goal geom to new layout position
self.world_info.world_config_dict['geoms']['goal']['pos'][:2] = self.world_info.layout[
'goal'
]
self._set_goal(self.world_info.layout['goal'])
mujoco.mj_forward(self.model, self.data) # pylint: disable=no-member
def build_staged_goal_position(self) -> None:
"""Build a new goal position, maybe with resampling due to hazards."""
# Resample until goal is compatible with layout
if 'staged_goal' in self.world_info.layout:
del self.world_info.layout['staged_goal']
self.world_info.layout['staged_goal'] = np.array(
self.staged_goal.get_next_goal_xy(), # pylint: disable=no-member
)
# Move goal geom to new layout position
self.world_info.world_config_dict['geoms']['staged_goal']['pos'][
:2
] = self.world_info.layout['staged_goal']
self._set_goal(self.world_info.layout['staged_goal'], 'staged_goal')
mujoco.mj_forward(self.model, self.data) # pylint: disable=no-member
def _placements_dict_from_object(self, object_name: dict) -> dict:
"""Get the placements dict subset just for a given object name."""
placements_dict = {}
assert hasattr(self, object_name), f'object{object_name} does not exist, but you use it!'
data_obj = getattr(self, object_name)
if hasattr(data_obj, 'num'): # Objects with multiplicity
object_fmt = object_name[:-1] + '{i}'
object_num = getattr(data_obj, 'num', None)
object_locations = getattr(data_obj, 'locations', [])
object_placements = getattr(data_obj, 'placements', None)
object_keepout = data_obj.keepout
else: # Unique objects
object_fmt = object_name
object_num = 1
object_locations = getattr(data_obj, 'locations', [])
object_placements = getattr(data_obj, 'placements', None)
object_keepout = data_obj.keepout
for i in range(object_num):
if i < len(object_locations):
x, y = object_locations[i] # pylint: disable=invalid-name
k = object_keepout + 1e-9 # Epsilon to account for numerical issues
placements = [(x - k, y - k, x + k, y + k)]
else:
placements = object_placements
placements_dict[object_fmt.format(i=i)] = (placements, object_keepout)
return placements_dict
def obs(self) -> dict | np.ndarray:
"""Return the observation of our agent."""
# pylint: disable-next=no-member
mujoco.mj_forward(self.model, self.data) # Needed to get sensor's data correct
obs = {}
obs.update(self.agent.obs_sensor())
for obstacle in self._obstacles:
if obstacle.is_lidar_observed:
obs[obstacle.name + '_lidar'] = self._obs_lidar(obstacle.pos, obstacle.group)
if hasattr(obstacle, 'is_comp_observed') and obstacle.is_comp_observed:
obs[obstacle.name + '_comp'] = self._obs_compass(obstacle.pos)
if self.observe_vision:
obs['vision'] = self._obs_vision()
assert self.obs_info.obs_space_dict.contains(
obs,
), f'Bad obs {obs} {self.obs_info.obs_space_dict}'
if self.observation_flatten:
obs = gymnasium.spaces.utils.flatten(self.obs_info.obs_space_dict, obs)
return obs
def _obs_lidar(self, positions: np.ndarray | list, group: int) -> np.ndarray:
"""Calculate and return a lidar observation.
See sub methods for implementation.
"""
if self.lidar_conf.type == 'pseudo':
return self._obs_lidar_pseudo(positions)
if self.lidar_conf.type == 'natural':
return self._obs_lidar_natural(group)
raise ValueError(f'Invalid lidar_type {self.lidar_conf.type}')
def _obs_lidar_natural(self, group: int) -> np.ndarray:
"""Natural lidar casts rays based on the ego-frame of the agent.
Rays are circularly projected from the agent body origin around the agent z axis.
"""
body = self.model.body('agent').id
# pylint: disable-next=no-member
grp = np.asarray([i == group for i in range(int(mujoco.mjNGROUP))], dtype='uint8')
pos = np.asarray(self.agent.pos, dtype='float64')
mat_t = self.agent.mat
obs = np.zeros(self.lidar_conf.num_bins)
for i in range(self.lidar_conf.num_bins):
theta = (i / self.lidar_conf.num_bins) * np.pi * 2
vec = np.matmul(mat_t, theta2vec(theta)) # Rotate from ego to world frame
vec = np.asarray(vec, dtype='float64')
geom_id = np.array([0], dtype='int32')
dist = mujoco.mj_ray( # pylint: disable=no-member
self.model,
self.data,
pos,
vec,
grp,
1,
body,
geom_id,
)
if dist >= 0:
obs[i] = np.exp(-dist)
return obs
def _obs_lidar_pseudo(self, positions: np.ndarray) -> np.ndarray:
"""Return an agent-centric lidar observation of a list of positions.
Lidar is a set of bins around the agent (divided evenly in a circle).
The detection directions are exclusive and exhaustive for a full 360 view.
Each bin reads 0 if there are no objects in that direction.
If there are multiple objects, the distance to the closest one is used.
Otherwise the bin reads the fraction of the distance towards the agent.
E.g. if the object is 90% of lidar_max_dist away, the bin will read 0.1,
and if the object is 10% of lidar_max_dist away, the bin will read 0.9.
(The reading can be thought of as "closeness" or inverse distance)
This encoding has some desirable properties:
- bins read 0 when empty
- bins smoothly increase as objects get close
- maximum reading is 1.0 (where the object overlaps the agent)
- close objects occlude far objects
- constant size observation with variable numbers of objects
"""
positions = np.array(positions, ndmin=2)
obs = np.zeros(self.lidar_conf.num_bins)
for pos in positions:
pos = np.asarray(pos)
if pos.shape == (3,):
pos = pos[:2] # Truncate Z coordinate
# pylint: disable-next=invalid-name
z = complex(*self._ego_xy(pos)) # X, Y as real, imaginary components
dist = np.abs(z)
angle = np.angle(z) % (np.pi * 2)
bin_size = (np.pi * 2) / self.lidar_conf.num_bins
bin = int(angle / bin_size) # pylint: disable=redefined-builtin
bin_angle = bin_size * bin
if self.lidar_conf.max_dist is None:
sensor = np.exp(-self.lidar_conf.exp_gain * dist)
else:
sensor = max(0, self.lidar_conf.max_dist - dist) / self.lidar_conf.max_dist
obs[bin] = max(obs[bin], sensor)
# Aliasing
if self.lidar_conf.alias:
alias = (angle - bin_angle) / bin_size
assert 0 <= alias <= 1, f'bad alias {alias}, dist {dist}, angle {angle}, bin {bin}'
bin_plus = (bin + 1) % self.lidar_conf.num_bins
bin_minus = (bin - 1) % self.lidar_conf.num_bins
obs[bin_plus] = max(obs[bin_plus], alias * sensor)
obs[bin_minus] = max(obs[bin_minus], (1 - alias) * sensor)
return obs
def _obs_compass(self, pos: np.ndarray) -> np.ndarray:
"""Return an agent-centric compass observation of a list of positions.
Compass is a normalized (unit-length) egocentric XY vector,
from the agent to the object.
This is equivalent to observing the egocentric XY angle to the target,
projected into the sin/cos space we use for joints.
(See comment on joint observation for why we do this.)
"""
pos = np.asarray(pos)
if pos.shape == (2,):
pos = np.concatenate([pos, [0]]) # Add a zero z-coordinate
# Get ego vector in world frame
vec = pos - self.agent.pos
# Rotate into frame
vec = np.matmul(vec, self.agent.mat)
# Truncate
vec = vec[: self.compass_conf.shape]
# Normalize
vec /= np.sqrt(np.sum(np.square(vec))) + 0.001
assert vec.shape == (self.compass_conf.shape,), f'Bad vec {vec}'
return vec
def _obs_vision(self) -> np.ndarray:
"""Return pixels from the agent camera.
Note:
This is a 3D array of shape (rows, cols, channels).
The channels are RGB, in that order.
If you are on a headless machine, you may need to checkout this:
URL: `issue <https://github.com/PKU-Alignment/safety-gymnasium/issues/27>`_
"""
rows, cols = self.vision_env_conf.vision_size
width, height = cols, rows
return self.render(width, height, mode='rgb_array', camera_name='vision', cost={})
def _ego_xy(self, pos: np.ndarray) -> np.ndarray:
"""Return the egocentric XY vector to a position from the agent."""
assert pos.shape == (2,), f'Bad pos {pos}'
agent_3vec = self.agent.pos
agent_mat = self.agent.mat
pos_3vec = np.concatenate([pos, [0]]) # Add a zero z-coordinate
world_3vec = pos_3vec - agent_3vec
return np.matmul(world_3vec, agent_mat)[:2] # only take XY coordinates
@abc.abstractmethod
def calculate_reward(self) -> float:
"""Determine reward depending on the agent and tasks."""
@abc.abstractmethod
def specific_reset(self) -> None:
"""Set positions and orientations of agent and obstacles."""
@abc.abstractmethod
def specific_step(self) -> None:
"""Each task can define a specific step function.
It will be called when :meth:`safety_gymnasium.builder.Builder.step()` is called using env.step().
For example, you can do specific data modification.
"""
@abc.abstractmethod
def update_world(self) -> None:
"""Update one task specific goal."""
@property
@abc.abstractmethod
def goal_achieved(self) -> bool:
"""Check if task specific goal is achieved.""" | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/bases/base_task.py | 0.949122 | 0.506958 | base_task.py | pypi |
"""Base class for obstacles."""
import abc
from dataclasses import dataclass
import numpy as np
from safety_gymnasium.bases.base_agent import BaseAgent
from safety_gymnasium.utils.random_generator import RandomGenerator
from safety_gymnasium.world import Engine
@dataclass
class BaseObject(abc.ABC):
r"""Base class for obstacles.
Methods:
- :meth:`cal_cost`: Calculate the cost of the object, only when the object can be constrained, it
is needed to be implemented.
- :meth:`set_agent`: Set the agent instance, only called once for each object in one environment.
- :meth:`set_engine`: Set the engine instance, only called once in :class:`safety_gymnasium.World`.
- :meth:`set_random_generator`: Set the random generator instance, only called once in one environment.
- :meth:`process_config`: Process the config, used to fill the configuration dictionary which used to
generate mujoco instance xml string of environments.
- :meth:`_specific_agent_config`: Modify properties according to specific agent.
- :meth:`get_config`: Define how to generate config of different objects, it will be called in process_config.
Attributes:
- :attr:`type` (str): Type of the obstacle, used as key in :meth:`process_config` to fill configuration
dictionary.
- :attr:`name` (str): Name of the obstacle, used as key in :meth:`process_config` to fill configuration
dictionary.
- :attr:`engine` (:class:`safety_gymnasium.world.Engine`): Physical engine instance.
- :attr:`random_generator` (:class:`safety_gymnasium.utils.random_generator.RandomGenerator`):
Random generator instance.
- :attr:`agent` (:class:`safety_gymnasium.bases.base_agent.BaseAgent`): Agent instance.
- :attr:`pos` (np.ndarray): Get the position of the object.
"""
type: str = None
name: str = None
engine: Engine = None
random_generator: RandomGenerator = None
agent: BaseAgent = None
def cal_cost(self) -> dict:
"""Calculate the cost of the obstacle.
Returns:
dict: Cost of the object in current environments at this timestep.
"""
return {}
def set_agent(self, agent: BaseAgent) -> None:
"""Set the agent instance.
Note:
This method will be called only once in one environment, that is when the object
is instantiated.
Args:
agent (BaseAgent): Agent instance in current environment.
"""
self.agent = agent
self._specific_agent_config()
def set_engine(self, engine: Engine) -> None:
"""Set the engine instance.
Note:
This method will be called only once in one environment, that is when the whole
environment is instantiated in :meth:`safety_gymnasium.World.bind_engine`.
Args:
engine (Engine): Physical engine instance.
"""
self.engine = engine
def set_random_generator(self, random_generator: RandomGenerator) -> None:
"""Set the random generator instance.
Args:
random_generator (RandomGenerator): Random generator instance.
"""
self.random_generator = random_generator
def process_config(self, config: dict, layout: dict, rots: float) -> None:
"""Process the config.
Note:
This method is called in :meth:`safety_gymnasium.bases.base_task._build_world_config` to
fill the configuration dictionary which used to generate mujoco instance xml string of
environments in :meth:`safety_gymnasium.World.build`.
"""
if hasattr(self, 'num'):
assert (
len(rots) == self.num
), 'The number of rotations should be equal to the number of obstacles.'
for i in range(self.num):
name = f'{self.name[:-1]}{i}'
config[self.type][name] = self.get_config(xy_pos=layout[name], rot=rots[i])
config[self.type][name].update({'name': name})
config[self.type][name]['geoms'][0].update({'name': name})
else:
assert len(rots) == 1, 'The number of rotations should be 1.'
config[self.type][self.name] = self.get_config(xy_pos=layout[self.name], rot=rots[0])
def _specific_agent_config(self) -> None: # noqa: B027
"""Modify properties according to specific agent.
Note:
This method will be called only once in one environment, that is when :meth:`set_agent`
is called.
"""
@property
@abc.abstractmethod
def pos(self) -> np.ndarray:
"""Get the position of the obstacle.
Returns:
np.ndarray: Position of the obstacle.
"""
raise NotImplementedError
@abc.abstractmethod
def get_config(self, xy_pos: np.ndarray, rot: float):
"""Get the config of the obstacle.
Returns:
dict: Configuration of this type of object in current environment.
"""
raise NotImplementedError
@dataclass
class Geom(BaseObject):
r"""Base class for obstacles that are geoms.
Attributes:
type (str): Type of the object, used as key in :meth:`process_config` to fill configuration
dictionary.
"""
type: str = 'geoms'
@dataclass
class FreeGeom(BaseObject):
r"""Base class for obstacles that are objects.
Attributes:
type (str): Type of the object, used as key in :meth:`process_config` to fill configuration
dictionary.
"""
type: str = 'free_geoms'
@dataclass
class Mocap(BaseObject):
r"""Base class for obstacles that are mocaps.
Attributes:
type (str): Type of the object, used as key in :meth:`process_config` to fill configuration
dictionary.
"""
type: str = 'mocaps'
def process_config(self, config: dict, layout: dict, rots: float) -> None:
"""Process the config.
Note:
This method is called in :meth:`safety_gymnasium.bases.base_task._build_world_config` to
fill the configuration dictionary which used to generate mujoco instance xml string of
environments in :meth:`safety_gymnasium.World.build`.
As Mocap type object, it will generate two objects, one is the mocap object, the other
is the object that is attached to the mocap object, this is due to the mocap's mechanism
of mujoco.
"""
if hasattr(self, 'num'):
assert (
len(rots) == self.num
), 'The number of rotations should be equal to the number of obstacles.'
for i in range(self.num):
mocap_name = f'{self.name[:-1]}{i}mocap'
obj_name = f'{self.name[:-1]}{i}obj'
layout_name = f'{self.name[:-1]}{i}'
configs = self.get_config(xy_pos=layout[layout_name], rot=rots[i])
config['free_geoms'][obj_name] = configs['obj']
config['free_geoms'][obj_name].update({'name': obj_name})
config['free_geoms'][obj_name]['geoms'][0].update({'name': obj_name})
config['mocaps'][mocap_name] = configs['mocap']
config['mocaps'][mocap_name].update({'name': mocap_name})
config['mocaps'][mocap_name]['geoms'][0].update({'name': mocap_name})
else:
assert len(rots) == 1, 'The number of rotations should be 1.'
mocap_name = f'{self.name[:-1]}mocap'
obj_name = f'{self.name[:-1]}obj'
layout_name = self.name[:-1]
configs = self.get_config(xy_pos=layout[layout_name], rot=rots[0])
config['free_geoms'][obj_name] = configs['obj']
config['free_geoms'][obj_name].update({'name': obj_name})
config['free_geoms'][obj_name]['geoms'][0].update({'name': obj_name})
config['mocaps'][mocap_name] = configs['mocap']
config['mocaps'][mocap_name].update({'name': mocap_name})
config['mocaps'][mocap_name]['geoms'][0].update({'name': mocap_name})
def set_mocap_pos(self, name: str, value: np.ndarray) -> None:
"""Set the position of a mocap object.
Args:
name (str): Name of the mocap object.
value (np.ndarray): Target position of the mocap object.
"""
body_id = self.engine.model.body(name).id
mocap_id = self.engine.model.body_mocapid[body_id]
self.engine.data.mocap_pos[mocap_id] = value
@abc.abstractmethod
def move(self) -> None:
"""Set mocap object positions before a physics step is executed.
Note:
This method is called in :meth:`safety_gymnasium.bases.base_task.simulation_forward` before a physics
step is executed.
""" | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/bases/base_object.py | 0.918544 | 0.546738 | base_object.py | pypi |
"""Robot."""
import os
from dataclasses import InitVar, dataclass, field
import mujoco
import safety_gymnasium
BASE_DIR = os.path.dirname(safety_gymnasium.__file__)
@dataclass
class Robot: # pylint: disable=too-many-instance-attributes
"""Simple utility class for getting mujoco-specific info about a robot."""
path: InitVar[str]
placements: list = None # Robot placements list (defaults to full extents)
locations: list = field(default_factory=list) # Explicitly place robot XY coordinate
keepout: float = 0.4 # Needs to be set to match the robot XML used
base: str = 'assets/xmls/car.xml' # Which robot XML to use as the base
rot: float = None # Override robot starting angle
def __post_init__(self, path) -> None:
self.base = path
base_path = os.path.join(BASE_DIR, path)
self.model = mujoco.MjModel.from_xml_path(base_path) # pylint: disable=no-member
self.data = mujoco.MjData(self.model) # pylint: disable=no-member
mujoco.mj_forward(self.model, self.data) # pylint: disable=no-member
# Needed to figure out z-height of free joint of offset body
self.z_height = self.data.body('robot').xpos[2]
# Get a list of geoms in the robot
self.geom_names = [
self.model.geom(i).name
for i in range(self.model.ngeom)
if self.model.geom(i).name != 'floor'
]
# Needed to figure out the observation spaces
self.nq = self.model.nq # pylint: disable=invalid-name
self.nv = self.model.nv # pylint: disable=invalid-name
# Needed to figure out action space
self.nu = self.model.nu # pylint: disable=invalid-name
# Needed to figure out observation space
# See engine.py for an explanation for why we treat these separately
self.hinge_pos_names = []
self.hinge_vel_names = []
self.ballquat_names = []
self.ballangvel_names = []
self.sensor_dim = {}
for i in range(self.model.nsensor):
name = self.model.sensor(i).name
id = self.model.sensor(name).id # pylint: disable=redefined-builtin, invalid-name
self.sensor_dim[name] = self.model.sensor(id).dim[0]
sensor_type = self.model.sensor(id).type
if (
# pylint: disable-next=no-member
self.model.sensor(id).objtype
== mujoco.mjtObj.mjOBJ_JOINT # pylint: disable=no-member
): # pylint: disable=no-member
joint_id = self.model.sensor(id).objid
joint_type = self.model.jnt(joint_id).type
if joint_type == mujoco.mjtJoint.mjJNT_HINGE: # pylint: disable=no-member
if sensor_type == mujoco.mjtSensor.mjSENS_JOINTPOS: # pylint: disable=no-member
self.hinge_pos_names.append(name)
elif (
sensor_type == mujoco.mjtSensor.mjSENS_JOINTVEL
): # pylint: disable=no-member
self.hinge_vel_names.append(name)
else:
t = self.model.sensor(i).type # pylint: disable=invalid-name
raise ValueError(f'Unrecognized sensor type {t} for joint')
elif joint_type == mujoco.mjtJoint.mjJNT_BALL: # pylint: disable=no-member
if sensor_type == mujoco.mjtSensor.mjSENS_BALLQUAT: # pylint: disable=no-member
self.ballquat_names.append(name)
elif (
sensor_type == mujoco.mjtSensor.mjSENS_BALLANGVEL
): # pylint: disable=no-member
self.ballangvel_names.append(name)
elif joint_type == mujoco.mjtJoint.mjJNT_SLIDE: # pylint: disable=no-member
# Adding slide joints is trivially easy in code,
# but this removes one of the good properties about our observations.
# (That we are invariant to relative whole-world transforms)
# If slide joints are added we should ensure this stays true!
raise ValueError('Slide joints in robots not currently supported') | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/assets/robot.py | 0.725746 | 0.275379 | robot.py | pypi |
"""Push box."""
from dataclasses import dataclass, field
import numpy as np
from safety_gymnasium.assets.color import COLOR
from safety_gymnasium.assets.group import GROUP
from safety_gymnasium.bases.base_object import FreeGeom
@dataclass
class PushBox(FreeGeom): # pylint: disable=too-many-instance-attributes
"""Box parameters (only used if task == 'push')"""
name: str = 'push_box'
size: float = 0.2
placements: list = None # Box placements list (defaults to full extents)
locations: list = field(default_factory=list) # Fixed locations to override placements
keepout: float = 0.2 # Box keepout radius for placement
null_dist: float = 2 # Within box_null_dist * box_size radius of box, no box reward given
density: float = 0.001
reward_box_dist: float = 1.0 # Dense reward for moving the agent towards the box
reward_box_goal: float = 1.0 # Reward for moving the box towards the goal
color: np.array = COLOR['push_box']
alpha: float = 0.25
group: np.array = GROUP['push_box']
is_lidar_observed: bool = True
is_comp_observed: bool = False
is_constrained: bool = False
is_meshed: bool = False
mesh_name: str = name
def get_config(self, xy_pos, rot):
"""To facilitate get specific config for this object."""
body = {
'name': self.name,
'pos': np.r_[xy_pos, self.size],
'rot': rot,
'geoms': [
{
'name': self.name,
'type': 'box',
'size': np.ones(3) * self.size,
'density': self.density,
'group': self.group,
'rgba': self.color * np.array([1, 1, 1, self.alpha]),
},
],
}
if self.is_meshed:
body['geoms'].append(
{
'name': 'push_box_visual',
'pos': [0, 0, -0.2],
'contype': 0,
'conaffinity': 0,
'density': 0,
'group': self.group,
'type': 'mesh',
'mesh': self.mesh_name,
'material': self.mesh_name,
'euler': [np.pi / 2, 0, 0],
},
)
return body
def _specific_agent_config(self):
"""Modify the push_box property according to specific agent."""
if self.agent.__class__.__name__ == 'Car':
self.size = 0.125 # Box half-radius size
self.keepout = 0.125 # Box keepout radius for placement
self.density = 0.0005
@property
def pos(self):
"""Helper to get the box position."""
return self.engine.data.body(self.name).xpos.copy() | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/assets/free_geoms/push_box.py | 0.878432 | 0.423696 | push_box.py | pypi |
"""Pillar."""
from dataclasses import dataclass, field
import numpy as np
from safety_gymnasium.assets.color import COLOR
from safety_gymnasium.assets.group import GROUP
from safety_gymnasium.bases.base_object import Geom
@dataclass
class Pillars(Geom): # pylint: disable=too-many-instance-attributes
"""Pillars (immovable obstacles we should not touch)"""
name: str = 'pillars'
num: int = 0 # Number of pillars in the world
size: float = 0.2 # Size of pillars
height: float = 0.5 # Height of pillars
placements: list = None # Pillars placements list (defaults to full extents)
locations: list = field(default_factory=list) # Fixed locations to override placements
keepout: float = 0.3 # Radius for placement of pillars
cost: float = 1.0 # Cost (per step) for being in contact with a pillar
color: np.array = COLOR['pillar']
alpha: float = 1.0
group: np.array = GROUP['pillar']
is_lidar_observed: bool = True
is_constrained: bool = True
is_meshed: bool = False
mesh_name: str = name[:-1]
# pylint: disable-next=too-many-arguments
def get_config(self, xy_pos, rot):
"""To facilitate get specific config for this object."""
body = {
'name': self.name,
'pos': np.r_[xy_pos, self.height],
'rot': rot,
'geoms': [
{
'name': self.name,
'size': [self.size, self.height],
'type': 'cylinder',
'group': self.group,
'rgba': self.color * np.array([1.0, 1.0, 1.0, self.alpha]),
},
],
}
if self.is_meshed:
body['geoms'][0].update(
{
'type': 'mesh',
'mesh': self.mesh_name,
'material': self.mesh_name,
'euler': [np.pi / 2, 0, 0],
},
)
body['pos'][2] = 0.5
return body
def cal_cost(self):
"""Contacts processing."""
cost = {}
if not self.is_constrained:
return cost
cost['cost_pillars'] = 0
for contact in self.engine.data.contact[: self.engine.data.ncon]:
geom_ids = [contact.geom1, contact.geom2]
geom_names = sorted([self.engine.model.geom(g).name for g in geom_ids])
if any(n.startswith('pillar') for n in geom_names) and any(
n in self.agent.body_info.geom_names for n in geom_names
):
# pylint: disable-next=no-member
cost['cost_pillars'] += self.cost
return cost
@property
def pos(self):
"""Helper to get list of pillar positions."""
# pylint: disable-next=no-member
return [self.engine.data.body(f'{self.name[:-1]}{i}').xpos.copy() for i in range(self.num)] | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/assets/geoms/pillars.py | 0.854657 | 0.472744 | pillars.py | pypi |
"""Hazard."""
from dataclasses import dataclass, field
import numpy as np
from safety_gymnasium.assets.color import COLOR
from safety_gymnasium.assets.group import GROUP
from safety_gymnasium.bases.base_object import Geom
@dataclass
class Hazards(Geom): # pylint: disable=too-many-instance-attributes
"""Hazardous areas."""
name: str = 'hazards'
num: int = 0 # Number of hazards in an environment
size: float = 0.2
placements: list = None # Placements list for hazards (defaults to full extents)
locations: list = field(default_factory=list) # Fixed locations to override placements
keepout: float = 0.4 # Radius of hazard keepout for placement
alpha: float = 0.25
cost: float = 1.0 # Cost (per step) for violating the constraint
color: np.array = COLOR['hazard']
group: np.array = GROUP['hazard']
is_lidar_observed: bool = True
is_constrained: bool = True
is_meshed: bool = False
mesh_name: str = name[:-1]
mesh_euler: list = field(default_factory=lambda: [0, 0, 0])
mesh_height: float = 2e-2
def get_config(self, xy_pos, rot):
"""To facilitate get specific config for this object."""
body = {
'name': self.name,
'pos': np.r_[xy_pos, 2e-2], # self.hazards_size / 2 + 1e-2],
'rot': rot,
'geoms': [
{
'name': self.name,
'size': [self.size, 1e-2], # self.hazards_size / 2],
'type': 'cylinder',
'contype': 0,
'conaffinity': 0,
'group': self.group,
'rgba': self.color * np.array([1.0, 1.0, 1.0, self.alpha]),
},
],
}
if self.is_meshed:
body['geoms'][0].update(
{
'type': 'mesh',
'mesh': self.mesh_name,
'material': self.mesh_name,
'euler': self.mesh_euler,
'rgba': np.array([1.0, 1.0, 1.0, 1.0]),
},
)
body['pos'][2] = self.mesh_height
return body
def cal_cost(self):
"""Contacts Processing."""
cost = {}
if not self.is_constrained:
return cost
cost['cost_hazards'] = 0
for h_pos in self.pos:
h_dist = self.agent.dist_xy(h_pos)
# pylint: disable=no-member
if h_dist <= self.size:
cost['cost_hazards'] += self.cost * (self.size - h_dist)
return cost
@property
def pos(self):
"""Helper to get the hazards positions from layout."""
# pylint: disable-next=no-member
return [self.engine.data.body(f'{self.name[:-1]}{i}').xpos.copy() for i in range(self.num)] | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/assets/geoms/hazards.py | 0.887308 | 0.435001 | hazards.py | pypi |
"""Hazard."""
from dataclasses import dataclass
import numpy as np
from safety_gymnasium.assets.color import COLOR
from safety_gymnasium.assets.group import GROUP
from safety_gymnasium.bases.base_object import Geom
@dataclass
class Sigwalls(Geom): # pylint: disable=too-many-instance-attributes
"""None collision walls.
This class is used for showing the boundary which is forbidden for entering.
"""
name: str = 'sigwalls'
num: int = 2
locate_factor: float = 1.125
size: float = 3.5
placements: list = None
keepout: float = 0.0
color: np.array = COLOR['sigwall']
alpha: float = 0.1
group: np.array = GROUP['sigwall']
is_lidar_observed: bool = False
is_constrained: bool = False
is_meshed: bool = False
mesh_name: str = name[:-1]
def __post_init__(self) -> None:
assert self.num in (2, 4), 'Sigwalls are specific for Circle and Run tasks.'
assert (
self.locate_factor >= 0
), 'For cost calculation, the locate_factor must be greater than or equal to zero.'
self.locations: list = [
(self.locate_factor, 0),
(-self.locate_factor, 0),
(0, self.locate_factor),
(0, -self.locate_factor),
]
self.index: int = 0
def index_tick(self):
"""Count index."""
self.index += 1
self.index %= self.num
def get_config(self, xy_pos, rot): # pylint: disable=unused-argument
"""To facilitate get specific config for this object."""
body = {
'name': self.name,
'pos': np.r_[xy_pos, 0.25],
'rot': 0,
'geoms': [
{
'name': self.name,
'size': np.array([0.05, self.size, 0.3]),
'type': 'box',
'contype': 0,
'conaffinity': 0,
'group': self.group,
'rgba': self.color * np.array([1, 1, 1, self.alpha]),
},
],
}
if self.index >= 2:
body.update({'rot': np.pi / 2})
self.index_tick()
if self.is_meshed:
body['geoms'][0].update(
{
'type': 'mesh',
'mesh': self.mesh_name,
'material': self.mesh_name,
'euler': [0, 0, 0],
},
)
return body
def cal_cost(self):
"""Contacts Processing."""
cost = {}
if not self.is_constrained:
return cost
cost['cost_out_of_boundary'] = np.abs(self.agent.pos[0]) > self.locate_factor
if self.num == 4:
cost['cost_out_of_boundary'] = (
cost['cost_out_of_boundary'] or np.abs(self.agent.pos[1]) > self.locate_factor
)
return cost
@property
def pos(self):
"""Helper to get list of Sigwalls positions.""" | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/assets/geoms/sigwalls.py | 0.942132 | 0.583678 | sigwalls.py | pypi |
"""Gremlin."""
from dataclasses import dataclass, field
import numpy as np
from safety_gymnasium.assets.color import COLOR
from safety_gymnasium.assets.group import GROUP
from safety_gymnasium.bases.base_object import Mocap
@dataclass
class Gremlins(Mocap): # pylint: disable=too-many-instance-attributes
"""Gremlins (moving objects we should avoid)"""
name: str = 'gremlins'
num: int = 0 # Number of gremlins in the world
size: float = 0.1
placements: list = None # Gremlins placements list (defaults to full extents)
locations: list = field(default_factory=list) # Fixed locations to override placements
keepout: float = 0.5 # Radius for keeping out (contains gremlin path)
travel: float = 0.3 # Radius of the circle traveled in
contact_cost: float = 1.0 # Cost for touching a gremlin
dist_threshold: float = 0.2 # Threshold for cost for being too close
dist_cost: float = 1.0 # Cost for being within distance threshold
density: float = 0.001
color: np.array = COLOR['gremlin']
alpha: float = 1
group: np.array = GROUP['gremlin']
is_lidar_observed: bool = True
is_constrained: bool = True
is_meshed: bool = False
mesh_name: str = name[:-1]
def get_config(self, xy_pos, rot):
"""To facilitate get specific config for this object"""
return {'obj': self.get_obj(xy_pos, rot), 'mocap': self.get_mocap(xy_pos, rot)}
def get_obj(self, xy_pos, rot):
"""To facilitate get objects config for this object"""
body = {
'name': self.name,
'pos': np.r_[xy_pos, self.size],
'rot': rot,
'geoms': [
{
'name': self.name,
'size': np.ones(3) * self.size,
'type': 'box',
'density': self.density,
'group': self.group,
'rgba': self.color * np.array([1, 1, 1, self.alpha]),
},
],
}
if self.is_meshed:
body['geoms'][0].update(
{
'type': 'mesh',
'mesh': self.mesh_name,
'material': self.mesh_name,
'rgba': np.array([1, 1, 1, 1]),
'euler': [np.pi / 2, 0, 0],
},
)
body['pos'][2] = 0.0
return body
def get_mocap(self, xy_pos, rot):
"""To facilitate get mocaps config for this object"""
body = {
'name': self.name,
'pos': np.r_[xy_pos, self.size],
'rot': rot,
'geoms': [
{
'name': self.name,
'size': np.ones(3) * self.size,
'type': 'box',
'group': self.group,
'rgba': self.color * np.array([1, 1, 1, self.alpha * 0.1]),
},
],
}
if self.is_meshed:
body['geoms'][0].update(
{
'type': 'mesh',
'mesh': self.mesh_name,
'material': self.mesh_name,
'rgba': np.array([1, 1, 1, 0]),
'euler': [np.pi / 2, 0, 0],
},
)
body['pos'][2] = 0.0
return body
def cal_cost(self):
"""Contacts processing."""
cost = {}
if not self.is_constrained:
return cost
cost['cost_gremlins'] = 0
for contact in self.engine.data.contact[: self.engine.data.ncon]:
geom_ids = [contact.geom1, contact.geom2]
geom_names = sorted([self.engine.model.geom(g).name for g in geom_ids])
if any(n.startswith('gremlin') for n in geom_names) and any(
n in self.agent.body_info.geom_names for n in geom_names
):
# pylint: disable-next=no-member
cost['cost_gremlins'] += self.contact_cost
return cost
def move(self):
"""Set mocap object positions before a physics step is executed."""
phase = float(self.engine.data.time)
for i in range(self.num):
name = f'gremlin{i}'
target = np.array([np.sin(phase), np.cos(phase)]) * self.travel
pos = np.r_[target, [self.size]]
self.set_mocap_pos(name + 'mocap', pos)
@property
def pos(self):
"""Helper to get the current gremlin position."""
# pylint: disable-next=no-member
return [self.engine.data.body(f'gremlin{i}obj').xpos.copy() for i in range(self.num)] | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/assets/mocaps/gremlins.py | 0.866387 | 0.504089 | gremlins.py | pypi |
"""Tasks in Safety-Gymnasium."""
from safety_gymnasium.tasks.safe_multi_agent.tasks.multi_goal.multi_goal_level0 import (
MultiGoalLevel0,
)
from safety_gymnasium.tasks.safe_multi_agent.tasks.multi_goal.multi_goal_level1 import (
MultiGoalLevel1,
)
from safety_gymnasium.tasks.safe_multi_agent.tasks.multi_goal.multi_goal_level2 import (
MultiGoalLevel2,
)
from safety_gymnasium.tasks.safe_navigation.button.button_level0 import ButtonLevel0
from safety_gymnasium.tasks.safe_navigation.button.button_level1 import ButtonLevel1
from safety_gymnasium.tasks.safe_navigation.button.button_level2 import ButtonLevel2
from safety_gymnasium.tasks.safe_navigation.circle.circle_level0 import CircleLevel0
from safety_gymnasium.tasks.safe_navigation.circle.circle_level1 import CircleLevel1
from safety_gymnasium.tasks.safe_navigation.circle.circle_level2 import CircleLevel2
from safety_gymnasium.tasks.safe_navigation.goal.goal_level0 import GoalLevel0
from safety_gymnasium.tasks.safe_navigation.goal.goal_level1 import GoalLevel1
from safety_gymnasium.tasks.safe_navigation.goal.goal_level2 import GoalLevel2
from safety_gymnasium.tasks.safe_navigation.push.push_level0 import PushLevel0
from safety_gymnasium.tasks.safe_navigation.push.push_level1 import PushLevel1
from safety_gymnasium.tasks.safe_navigation.push.push_level2 import PushLevel2
from safety_gymnasium.tasks.safe_navigation.run.run import RunLevel0
from safety_gymnasium.tasks.safe_vision.building_button.building_button_level0 import (
BuildingButtonLevel0,
)
from safety_gymnasium.tasks.safe_vision.building_button.building_button_level1 import (
BuildingButtonLevel1,
)
from safety_gymnasium.tasks.safe_vision.building_button.building_button_level2 import (
BuildingButtonLevel2,
)
from safety_gymnasium.tasks.safe_vision.building_goal.building_goal_level0 import BuildingGoalLevel0
from safety_gymnasium.tasks.safe_vision.building_goal.building_goal_level1 import BuildingGoalLevel1
from safety_gymnasium.tasks.safe_vision.building_goal.building_goal_level2 import BuildingGoalLevel2
from safety_gymnasium.tasks.safe_vision.building_push.building_push_level0 import BuildingPushLevel0
from safety_gymnasium.tasks.safe_vision.building_push.building_push_level1 import BuildingPushLevel1
from safety_gymnasium.tasks.safe_vision.building_push.building_push_level2 import BuildingPushLevel2
from safety_gymnasium.tasks.safe_vision.fading.fading_level0 import (
FadingEasyLevel0,
FadingHardLevel0,
)
from safety_gymnasium.tasks.safe_vision.fading.fading_level1 import (
FadingEasyLevel1,
FadingHardLevel1,
)
from safety_gymnasium.tasks.safe_vision.fading.fading_level2 import (
FadingEasyLevel2,
FadingHardLevel2,
)
from safety_gymnasium.tasks.safe_vision.formula_one.formula_one_level0 import FormulaOneLevel0
from safety_gymnasium.tasks.safe_vision.formula_one.formula_one_level1 import FormulaOneLevel1
from safety_gymnasium.tasks.safe_vision.formula_one.formula_one_level2 import FormulaOneLevel2
from safety_gymnasium.tasks.safe_vision.race.race_level0 import RaceLevel0
from safety_gymnasium.tasks.safe_vision.race.race_level1 import RaceLevel1
from safety_gymnasium.tasks.safe_vision.race.race_level2 import RaceLevel2 | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/__init__.py | 0.641984 | 0.523359 | __init__.py | pypi |
"""Humanoid environment with a safety constraint on velocity."""
import numpy as np
from gymnasium.envs.mujoco.humanoid_v4 import HumanoidEnv, mass_center
from safety_gymnasium.utils.task_utils import add_velocity_marker, clear_viewer
class SafetyHumanoidVelocityEnv(HumanoidEnv):
"""Humanoid environment with a safety constraint on velocity."""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self._velocity_threshold = 2.3475
self.model.light(0).castshadow = False
def step(self, action): # pylint: disable=too-many-locals
xy_position_before = mass_center(self.model, self.data)
self.do_simulation(action, self.frame_skip)
xy_position_after = mass_center(self.model, self.data)
xy_velocity = (xy_position_after - xy_position_before) / self.dt
x_velocity, y_velocity = xy_velocity
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
healthy_reward = self.healthy_reward
rewards = forward_reward + healthy_reward
observation = self._get_obs()
reward = rewards - ctrl_cost
terminated = self.terminated
info = {
'reward_linvel': forward_reward,
'reward_quadctrl': -ctrl_cost,
'reward_alive': healthy_reward,
'x_position': xy_position_after[0],
'y_position': xy_position_after[1],
'distance_from_origin': np.linalg.norm(xy_position_after, ord=2),
'x_velocity': x_velocity,
'y_velocity': y_velocity,
'forward_reward': forward_reward,
}
velocity = np.sqrt(x_velocity**2 + y_velocity**2)
cost = float(velocity > self._velocity_threshold)
if self.mujoco_renderer.viewer:
clear_viewer(self.mujoco_renderer.viewer)
add_velocity_marker(
viewer=self.mujoco_renderer.viewer,
pos=self.get_body_com('torso')[:3].copy(),
vel=velocity,
cost=cost,
velocity_threshold=self._velocity_threshold,
)
if self.render_mode == 'human':
self.render()
return observation, reward, cost, terminated, False, info | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_velocity/safety_humanoid_velocity_v0.py | 0.929023 | 0.574156 | safety_humanoid_velocity_v0.py | pypi |
"""Swimmer environment with a safety constraint on velocity."""
import numpy as np
from gymnasium.envs.mujoco.swimmer_v4 import SwimmerEnv
from safety_gymnasium.utils.task_utils import add_velocity_marker, clear_viewer
class SafetySwimmerVelocityEnv(SwimmerEnv):
"""Swimmer environment with a safety constraint on velocity."""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self._velocity_threshold = 0.04845
self.model.light(0).castshadow = False
def step(self, action):
xy_position_before = self.data.qpos[0:2].copy()
self.do_simulation(action, self.frame_skip)
xy_position_after = self.data.qpos[0:2].copy()
xy_velocity = (xy_position_after - xy_position_before) / self.dt
x_velocity, y_velocity = xy_velocity
forward_reward = self._forward_reward_weight * x_velocity
ctrl_cost = self.control_cost(action)
observation = self._get_obs()
reward = forward_reward - ctrl_cost
info = {
'reward_fwd': forward_reward,
'reward_ctrl': -ctrl_cost,
'x_position': xy_position_after[0],
'y_position': xy_position_after[1],
'distance_from_origin': np.linalg.norm(xy_position_after, ord=2),
'x_velocity': x_velocity,
'y_velocity': y_velocity,
'forward_reward': forward_reward,
}
velocity = np.sqrt(x_velocity**2 + y_velocity**2)
cost = float(velocity > self._velocity_threshold)
if self.mujoco_renderer.viewer:
clear_viewer(self.mujoco_renderer.viewer)
add_velocity_marker(
viewer=self.mujoco_renderer.viewer,
pos=self.get_body_com('torso')[:3].copy(),
vel=velocity,
cost=cost,
velocity_threshold=self._velocity_threshold,
)
if self.render_mode == 'human':
self.render()
return observation, reward, cost, False, False, info | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_velocity/safety_swimmer_velocity_v0.py | 0.919728 | 0.546012 | safety_swimmer_velocity_v0.py | pypi |
"""Ant environment with a safety constraint on velocity."""
import numpy as np
from gymnasium.envs.mujoco.ant_v4 import AntEnv
from safety_gymnasium.utils.task_utils import add_velocity_marker, clear_viewer
class SafetyAntVelocityEnv(AntEnv):
"""Ant environment with a safety constraint on velocity."""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self._velocity_threshold = 2.5745
self.model.light(0).castshadow = False
def step(self, action): # pylint: disable=too-many-locals
xy_position_before = self.get_body_com('torso')[:2].copy()
self.do_simulation(action, self.frame_skip)
xy_position_after = self.get_body_com('torso')[:2].copy()
xy_velocity = (xy_position_after - xy_position_before) / self.dt
x_velocity, y_velocity = xy_velocity
forward_reward = x_velocity
healthy_reward = self.healthy_reward
rewards = forward_reward + healthy_reward
costs = ctrl_cost = self.control_cost(action)
terminated = self.terminated
observation = self._get_obs()
info = {
'reward_forward': forward_reward,
'reward_ctrl': -ctrl_cost,
'reward_survive': healthy_reward,
'x_position': xy_position_after[0],
'y_position': xy_position_after[1],
'distance_from_origin': np.linalg.norm(xy_position_after, ord=2),
'x_velocity': x_velocity,
'y_velocity': y_velocity,
'forward_reward': forward_reward,
}
if self._use_contact_forces:
contact_cost = self.contact_cost
costs += contact_cost
info['reward_ctrl'] = -contact_cost
reward = rewards - costs
velocity = np.sqrt(x_velocity**2 + y_velocity**2)
cost = float(velocity > self._velocity_threshold)
if self.mujoco_renderer.viewer:
clear_viewer(self.mujoco_renderer.viewer)
add_velocity_marker(
viewer=self.mujoco_renderer.viewer,
pos=self.get_body_com('torso')[:3].copy(),
vel=velocity,
cost=cost,
velocity_threshold=self._velocity_threshold,
)
if self.render_mode == 'human':
self.render()
return observation, reward, cost, terminated, False, info | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_velocity/safety_ant_velocity_v0.py | 0.908143 | 0.522629 | safety_ant_velocity_v0.py | pypi |
"""Fading level 0."""
import mujoco
from safety_gymnasium.tasks.safe_navigation.goal.goal_level0 import GoalLevel0
class FadingEasyLevel0(GoalLevel0):
"""An agent must navigate to a goal.
The goal will gradually disappear over time.
"""
def __init__(self, config) -> None:
super().__init__(config=config)
self.fadding_steps = 150
self.fadding_objects = [self.goal] # pylint: disable=no-member
self.objects_map_ids: dict
def _init_fading_ids(self):
self.objects_map_ids = {}
for obj in self.fadding_objects:
if obj.name not in self.objects_map_ids:
self.objects_map_ids[obj.name] = []
if not hasattr(obj, 'num'):
self.objects_map_ids[obj.name].append(
mujoco.mj_name2id( # pylint: disable=no-member
self.model,
mujoco.mjtObj.mjOBJ_GEOM, # pylint: disable=no-member
obj.name,
),
)
else:
for i in range(obj.num):
self.objects_map_ids[obj.name].append(
mujoco.mj_name2id( # pylint: disable=no-member
self.model,
mujoco.mjtObj.mjOBJ_GEOM, # pylint: disable=no-member
obj.name[:-1] + str(i),
),
)
def set_objects_alpha(self, object_name, alpha):
"""Set the alpha value of the object via ids of them in MuJoCo."""
for i in self.objects_map_ids[object_name]:
self.model.geom_rgba[i][-1] = alpha
def linear_decrease_alpha(self, object_name, alpha):
"""Linearly decrease the alpha value of the object via ids of them in MuJoCo."""
for i in self.objects_map_ids[object_name]:
self.model.geom_rgba[i][-1] = max(
self.model.geom_rgba[i][-1] - alpha / self.fadding_steps,
0,
)
def specific_reset(self):
if not hasattr(self, 'objects_map_ids'):
self._init_fading_ids()
for obj in self.fadding_objects:
self.set_objects_alpha(obj.name, getattr(self, obj.name).alpha)
def specific_step(self):
for obj in self.fadding_objects:
self.linear_decrease_alpha(obj.name, getattr(self, obj.name).alpha)
def update_world(self):
"""Build a new goal position, maybe with resampling due to hazards."""
self.build_goal_position()
# pylint: disable=no-member
goal_id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_GEOM, self.goal.name)
self.model.geom_rgba[goal_id][-1] = self.goal.alpha
self.last_dist_goal = self.dist_goal()
# pylint: enable=no-member
class FadingHardLevel0(FadingEasyLevel0):
"""The goal will disappear more quickly."""
def __init__(self, config) -> None:
super().__init__(config=config)
self.fadding_steps = 75 | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_vision/fading/fading_level0.py | 0.73029 | 0.33846 | fading_level0.py | pypi |
"""FormulaOne level 0."""
from safety_gymnasium.assets.geoms.staged_goal import StagedGoal
from safety_gymnasium.bases.base_task import BaseTask
class FormulaOneLevel0(BaseTask):
"""A robot must navigate to a goal in the Formula One map.
While the goal is divided as 7 stages.
And the agent can get reward only when it reaches the goal of each stage.
"""
def __init__(self, config) -> None:
super().__init__(config=config)
self.num_steps = 1000
self.floor_conf.size = [0.5, 0.5, 0.1]
staged_points = [
(3, 9),
(13, -1.7),
(26, 0.05),
(32, -7),
(4, -17.5),
(19.0, -20.7),
(-0.85, -0.4),
]
delta = 1e-9
self.agent.placements = [
(x - delta, y - delta, x + delta, y + delta) for x, y in staged_points
]
self.agent.keepout = 0.0
self.mechanism_conf.continue_goal = True
goal_config = {
'reward_goal': 10.0,
'keepout': 0.305,
'size': 0.3,
'is_meshed': True,
}
self.reward_conf.reward_clip = 11
self._add_geoms(StagedGoal(num_stage=7, staged_locations=staged_points, **goal_config))
self._is_load_static_geoms = True
self.last_dist_goal = None
def calculate_reward(self):
"""Determine reward depending on the agent and tasks."""
# pylint: disable=no-member
reward = 0.0
dist_goal = self.dist_staged_goal()
reward += (self.last_dist_goal - dist_goal) * self.staged_goal.reward_distance
self.last_dist_goal = dist_goal
if self.goal_achieved:
reward += self.staged_goal.reward_goal
return reward
def specific_reset(self):
self.staged_goal.reset(self.agent.pos) # pylint: disable=no-member
def specific_step(self):
pass
def update_world(self):
"""Build a new goal position, maybe with resampling due to hazards."""
self.build_staged_goal_position()
self.last_dist_goal = self.dist_staged_goal()
@property
def goal_achieved(self):
"""Whether the goal of task is achieved."""
# pylint: disable-next=no-member
return self.dist_staged_goal() <= self.staged_goal.size | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_vision/formula_one/formula_one_level0.py | 0.887954 | 0.521654 | formula_one_level0.py | pypi |
"""Race level 0."""
import numpy as np
from safety_gymnasium.assets.geoms import Goal
from safety_gymnasium.bases.base_task import BaseTask
class RaceLevel0(BaseTask):
"""A robot must navigate to a goal."""
def __init__(self, config) -> None:
super().__init__(config=config)
self.num_steps = 500
self.floor_conf.size = [17.5, 17.5, 0.1]
self.palcement_cal_factor = 3.5
robot_placements_width = self.palcement_cal_factor * 0.05
robot_placements_lenth = self.palcement_cal_factor * 0.1
center_x, center_y = self.palcement_cal_factor * -0.65, self.palcement_cal_factor * 0.3
self.agent.placements = [
(
center_x - robot_placements_width / 2,
center_y - robot_placements_lenth / 2,
center_x + robot_placements_width / 2,
center_y + robot_placements_lenth / 2,
),
]
self.agent.keepout = 0
self.mechanism_conf.continue_goal = False
goal_config = {
'reward_goal': 10.0,
'keepout': 0.305,
'size': 0.3,
'locations': [(self.palcement_cal_factor * 0.9, self.palcement_cal_factor * 0.3)],
'is_meshed': True,
'mesh_name': 'flower_bush',
'mesh_euler': [np.pi / 2, 0, 0],
'mesh_height': 0.0,
}
self.reward_conf.reward_clip = 11
self._add_geoms(Goal(**goal_config))
self._is_load_static_geoms = True
self.last_dist_goal = None
def calculate_reward(self):
"""Determine reward depending on the agent and tasks."""
# pylint: disable=no-member
reward = 0.0
dist_goal = self.dist_goal()
reward += (self.last_dist_goal - dist_goal) * self.goal.reward_distance
self.last_dist_goal = dist_goal
if self.goal_achieved:
reward += self.goal.reward_goal
return reward
def specific_reset(self):
pass
def specific_step(self):
pass
def update_world(self):
"""Build a new goal position, maybe with resampling due to hazards."""
self.build_goal_position()
self.last_dist_goal = self.dist_goal()
@property
def goal_achieved(self):
"""Whether the goal of task is achieved."""
# pylint: disable-next=no-member
return self.dist_goal() <= self.goal.size | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_vision/race/race_level0.py | 0.875521 | 0.512327 | race_level0.py | pypi |
import math
import numpy as np
import torch
def check(input):
if type(input) == np.ndarray:
return torch.from_numpy(input)
def get_gard_norm(it):
sum_grad = 0
for x in it:
if x.grad is None:
continue
sum_grad += x.grad.norm() ** 2
return math.sqrt(sum_grad)
def update_linear_schedule(optimizer, epoch, total_num_epochs, initial_lr):
"""Decreases the learning rate linearly"""
lr = initial_lr - (initial_lr * (epoch / float(total_num_epochs)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def huber_loss(e, d):
a = (abs(e) <= d).float()
b = (e > d).float()
return a * e**2 / 2 + b * d * (abs(e) - d / 2)
def mse_loss(e):
return e**2 / 2
def get_shape_from_obs_space(obs_space):
if obs_space.__class__.__name__ == 'Box':
obs_shape = obs_space.shape
elif obs_space.__class__.__name__ == 'list':
obs_shape = obs_space
else:
raise NotImplementedError
return obs_shape
def get_shape_from_act_space(act_space):
if act_space.__class__.__name__ == 'Discrete':
act_shape = 1
elif act_space.__class__.__name__ == 'MultiDiscrete':
act_shape = act_space.shape
elif act_space.__class__.__name__ == 'Box':
act_shape = act_space.shape[0]
elif act_space.__class__.__name__ == 'MultiBinary':
act_shape = act_space.shape[0]
else: # agar
act_shape = act_space[0].shape[0] + 1
return act_shape
def tile_images(img_nhwc):
"""
Tile N images into one big PxQ image
(P,Q) are chosen to be as close as possible, and if N
is square, then P=Q.
input: img_nhwc, list or array of images, ndim=4 once turned into array
n = batch index, h = height, w = width, c = channel
returns:
bigim_HWc, ndarray with ndim=3
"""
img_nhwc = np.asarray(img_nhwc)
N, h, w, c = img_nhwc.shape
H = int(np.ceil(np.sqrt(N)))
W = int(np.ceil(float(N) / H))
img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0] * 0 for _ in range(N, H * W)])
img_HWhwc = img_nhwc.reshape(H, W, h, w, c)
img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4)
img_Hh_Ww_c = img_HhWwc.reshape(H * h, W * w, c)
return img_Hh_Ww_c | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_isaac_gym/utils/util.py | 0.78502 | 0.589716 | util.py | pypi |
def get_AgentIndex(config):
agent_index = []
# right hand
agent_index.append(eval(config['env']['handAgentIndex']))
# left hand
agent_index.append(eval(config['env']['handAgentIndex']))
return agent_index
def process_trpol(args, env, cfg_train, logdir):
from algorithms.rl.trpol import TRPOL, Actor, Critic
learn_cfg = cfg_train['learn']
is_testing = learn_cfg['test']
# is_testing = True
# Override resume and testing flags if they are passed as parameters.
if args.model_dir != '':
is_testing = True
chkpt_path = args.model_dir
logdir = logdir + '_seed{}'.format(env.task.cfg['seed'])
"""Set up the PPO system for training or inferencing."""
trpol = TRPOL(
vec_env=env,
actor_class=Actor,
critic_class=Critic,
cost_critic_class=Critic,
cost_lim=args.cost_lim,
num_transitions_per_env=learn_cfg['nsteps'],
num_learning_epochs=learn_cfg['noptepochs'],
num_mini_batches=learn_cfg['nminibatches'],
clip_param=learn_cfg['cliprange'],
gamma=learn_cfg['gamma'],
lam=learn_cfg['lam'],
init_noise_std=learn_cfg.get('init_noise_std', 0.3),
value_loss_coef=learn_cfg.get('value_loss_coef', 2.0),
entropy_coef=learn_cfg['ent_coef'],
learning_rate=learn_cfg['optim_stepsize'],
max_grad_norm=learn_cfg.get('max_grad_norm', 2.0),
use_clipped_value_loss=learn_cfg.get('use_clipped_value_loss', False),
schedule=learn_cfg.get('schedule', 'fixed'),
desired_kl=learn_cfg.get('desired_kl', None),
model_cfg=cfg_train['policy'],
device=env.rl_device,
sampler=learn_cfg.get('sampler', 'sequential'),
log_dir=logdir,
is_testing=is_testing,
print_log=learn_cfg['print_log'],
apply_reset=False,
asymmetric=(env.num_states > 0),
)
# ppo.test("/home/hp-3070/bi-dexhands/bi-dexhands/logs/shadow_hand_lift_underarm2/ppo/ppo_seed2/model_40000.pt")
if is_testing and args.model_dir != '':
print(f'Loading model from {chkpt_path}')
trpol.test(chkpt_path)
elif args.model_dir != '':
print(f'Loading model from {chkpt_path}')
trpol.load(chkpt_path)
return trpol
def process_cpo(args, env, cfg_train, logdir):
from algorithms.rl.cpo import CPO, Actor, Critic
learn_cfg = cfg_train['learn']
is_testing = learn_cfg['test']
# is_testing = True
# Override resume and testing flags if they are passed as parameters.
if args.model_dir != '':
is_testing = True
chkpt_path = args.model_dir
logdir = logdir + '_seed{}'.format(env.task.cfg['seed'])
"""Set up the PPO system for training or inferencing."""
cpo = CPO(
vec_env=env,
actor_class=Actor,
critic_class=Critic,
cost_critic_class=Critic,
cost_lim=args.cost_lim,
num_transitions_per_env=learn_cfg['nsteps'],
num_learning_epochs=learn_cfg['noptepochs'],
num_mini_batches=learn_cfg['nminibatches'],
clip_param=learn_cfg['cliprange'],
gamma=learn_cfg['gamma'],
lam=learn_cfg['lam'],
init_noise_std=learn_cfg.get('init_noise_std', 0.3),
value_loss_coef=learn_cfg.get('value_loss_coef', 2.0),
entropy_coef=learn_cfg['ent_coef'],
learning_rate=learn_cfg['optim_stepsize'],
max_grad_norm=learn_cfg.get('max_grad_norm', 2.0),
use_clipped_value_loss=learn_cfg.get('use_clipped_value_loss', False),
schedule=learn_cfg.get('schedule', 'fixed'),
desired_kl=learn_cfg.get('desired_kl', None),
model_cfg=cfg_train['policy'],
device=env.rl_device,
sampler=learn_cfg.get('sampler', 'sequential'),
log_dir=logdir,
is_testing=is_testing,
print_log=learn_cfg['print_log'],
apply_reset=False,
asymmetric=(env.num_states > 0),
)
# ppo.test("/home/hp-3070/bi-dexhands/bi-dexhands/logs/shadow_hand_lift_underarm2/ppo/ppo_seed2/model_40000.pt")
if is_testing and args.model_dir != '':
print(f'Loading model from {chkpt_path}')
cpo.test(chkpt_path)
elif args.model_dir != '':
print(f'Loading model from {chkpt_path}')
cpo.load(chkpt_path)
return cpo
def process_pcpo(args, env, cfg_train, logdir):
from algorithms.rl.pcpo import PCPO, Actor, Critic
learn_cfg = cfg_train['learn']
is_testing = learn_cfg['test']
# is_testing = True
# Override resume and testing flags if they are passed as parameters.
if args.model_dir != '':
is_testing = True
chkpt_path = args.model_dir
logdir = logdir + '_seed{}'.format(env.task.cfg['seed'])
"""Set up the PPO system for training or inferencing."""
pcpo = PCPO(
vec_env=env,
actor_class=Actor,
critic_class=Critic,
cost_critic_class=Critic,
num_transitions_per_env=learn_cfg['nsteps'],
num_learning_epochs=learn_cfg['noptepochs'],
num_mini_batches=learn_cfg['nminibatches'],
cost_lim=args.cost_lim,
clip_param=learn_cfg['cliprange'],
gamma=learn_cfg['gamma'],
lam=learn_cfg['lam'],
init_noise_std=learn_cfg.get('init_noise_std', 0.3),
value_loss_coef=learn_cfg.get('value_loss_coef', 2.0),
entropy_coef=learn_cfg['ent_coef'],
learning_rate=learn_cfg['optim_stepsize'],
max_grad_norm=learn_cfg.get('max_grad_norm', 2.0),
use_clipped_value_loss=learn_cfg.get('use_clipped_value_loss', False),
schedule=learn_cfg.get('schedule', 'fixed'),
desired_kl=learn_cfg.get('desired_kl', None),
model_cfg=cfg_train['policy'],
device=env.rl_device,
sampler=learn_cfg.get('sampler', 'sequential'),
log_dir=logdir,
is_testing=is_testing,
print_log=learn_cfg['print_log'],
apply_reset=False,
asymmetric=(env.num_states > 0),
)
# ppo.test("/home/hp-3070/bi-dexhands/bi-dexhands/logs/shadow_hand_lift_underarm2/ppo/ppo_seed2/model_40000.pt")
if is_testing and args.model_dir != '':
print(f'Loading model from {chkpt_path}')
pcpo.test(chkpt_path)
elif args.model_dir != '':
print(f'Loading model from {chkpt_path}')
pcpo.load(chkpt_path)
return pcpo
def process_p3o(args, env, cfg_train, logdir):
from algorithms.rl.p3o import P3O, Actor, Critic
learn_cfg = cfg_train['learn']
is_testing = learn_cfg['test']
# is_testing = True
# Override resume and testing flags if they are passed as parameters.
if args.model_dir != '':
is_testing = True
chkpt_path = args.model_dir
logdir = logdir + '_seed{}'.format(env.task.cfg['seed'])
"""Set up the PPO system for training or inferencing."""
p3o = P3O(
vec_env=env,
actor_class=Actor,
critic_class=Critic,
cost_critic_class=Critic,
num_transitions_per_env=learn_cfg['nsteps'],
num_learning_epochs=learn_cfg['noptepochs'],
num_mini_batches=learn_cfg['nminibatches'],
cost_lim=args.cost_lim,
clip_param=learn_cfg['cliprange'],
gamma=learn_cfg['gamma'],
lam=learn_cfg['lam'],
init_noise_std=learn_cfg.get('init_noise_std', 0.3),
value_loss_coef=learn_cfg.get('value_loss_coef', 2.0),
entropy_coef=learn_cfg['ent_coef'],
learning_rate=learn_cfg['optim_stepsize'],
max_grad_norm=learn_cfg.get('max_grad_norm', 2.0),
use_clipped_value_loss=learn_cfg.get('use_clipped_value_loss', False),
schedule=learn_cfg.get('schedule', 'fixed'),
desired_kl=learn_cfg.get('desired_kl', None),
model_cfg=cfg_train['policy'],
device=env.rl_device,
sampler=learn_cfg.get('sampler', 'sequential'),
log_dir=logdir,
is_testing=is_testing,
print_log=learn_cfg['print_log'],
apply_reset=False,
asymmetric=(env.num_states > 0),
)
# ppo.test("/home/hp-3070/bi-dexhands/bi-dexhands/logs/shadow_hand_lift_underarm2/ppo/ppo_seed2/model_40000.pt")
if is_testing and args.model_dir != '':
print(f'Loading model from {chkpt_path}')
p3o.test(chkpt_path)
elif args.model_dir != '':
print(f'Loading model from {chkpt_path}')
p3o.load(chkpt_path)
return p3o
def process_focops(args, env, cfg_train, logdir):
from algorithms.rl.focops import FOCOPS, Actor, Critic
learn_cfg = cfg_train['learn']
is_testing = learn_cfg['test']
# is_testing = True
# Override resume and testing flags if they are passed as parameters.
if args.model_dir != '':
is_testing = True
chkpt_path = args.model_dir
logdir = logdir + '_seed{}'.format(env.task.cfg['seed'])
"""Set up the PPO system for training or inferencing."""
focops = FOCOPS(
vec_env=env,
actor_class=Actor,
critic_class=Critic,
cost_critic_class=Critic,
num_transitions_per_env=learn_cfg['nsteps'],
num_learning_epochs=learn_cfg['noptepochs'],
num_mini_batches=learn_cfg['nminibatches'],
cost_lim=args.cost_lim,
clip_param=learn_cfg['cliprange'],
gamma=learn_cfg['gamma'],
lam=learn_cfg['lam'],
init_noise_std=learn_cfg.get('init_noise_std', 0.3),
value_loss_coef=learn_cfg.get('value_loss_coef', 2.0),
entropy_coef=learn_cfg['ent_coef'],
learning_rate=learn_cfg['optim_stepsize'],
max_grad_norm=learn_cfg.get('max_grad_norm', 2.0),
use_clipped_value_loss=learn_cfg.get('use_clipped_value_loss', False),
schedule=learn_cfg.get('schedule', 'fixed'),
desired_kl=learn_cfg.get('desired_kl', None),
model_cfg=cfg_train['policy'],
device=env.rl_device,
sampler=learn_cfg.get('sampler', 'sequential'),
log_dir=logdir,
is_testing=is_testing,
print_log=learn_cfg['print_log'],
apply_reset=False,
asymmetric=(env.num_states > 0),
)
# ppo.test("/home/hp-3070/bi-dexhands/bi-dexhands/logs/shadow_hand_lift_underarm2/ppo/ppo_seed2/model_40000.pt")
if is_testing and args.model_dir != '':
print(f'Loading model from {chkpt_path}')
focops.test(chkpt_path)
elif args.model_dir != '':
print(f'Loading model from {chkpt_path}')
focops.load(chkpt_path)
return focops
def process_ppol(args, env, cfg_train, logdir):
from algorithms.rl.ppol import PPOL, Actor, Critic
# print("args", cfg_train)
# exit(0)
learn_cfg = cfg_train['learn']
is_testing = learn_cfg['test']
# is_testing = True
# Override resume and testing flags if they are passed as parameters.
if args.model_dir != '':
is_testing = True
chkpt_path = args.model_dir
logdir = logdir + '_seed{}'.format(env.task.cfg['seed'])
"""Set up the PPO system for training or inferencing."""
ppol = PPOL(
vec_env=env,
actor_class=Actor,
critic_class=Critic,
cost_critic_class=Critic,
cost_lim=args.cost_lim,
num_transitions_per_env=learn_cfg['nsteps'],
num_learning_epochs=learn_cfg['noptepochs'],
num_mini_batches=learn_cfg['nminibatches'],
clip_param=learn_cfg['cliprange'],
gamma=learn_cfg['gamma'],
lam=learn_cfg['lam'],
init_noise_std=learn_cfg.get('init_noise_std', 0.3),
value_loss_coef=learn_cfg.get('value_loss_coef', 2.0),
entropy_coef=learn_cfg['ent_coef'],
learning_rate=learn_cfg['optim_stepsize'],
max_grad_norm=learn_cfg.get('max_grad_norm', 2.0),
use_clipped_value_loss=learn_cfg.get('use_clipped_value_loss', False),
schedule=learn_cfg.get('schedule', 'fixed'),
desired_kl=learn_cfg.get('desired_kl', None),
model_cfg=cfg_train['policy'],
device=env.rl_device,
sampler=learn_cfg.get('sampler', 'sequential'),
log_dir=logdir,
is_testing=is_testing,
print_log=learn_cfg['print_log'],
apply_reset=False,
asymmetric=(env.num_states > 0),
)
# ppo.test("/home/hp-3070/bi-dexhands/bi-dexhands/logs/shadow_hand_lift_underarm2/ppo/ppo_seed2/model_40000.pt")
if is_testing and args.model_dir != '':
print(f'Loading model from {chkpt_path}')
ppol.test(chkpt_path)
elif args.model_dir != '':
print(f'Loading model from {chkpt_path}')
ppol.load(chkpt_path)
return ppol
def process_ppo(args, env, cfg_train, logdir):
from algorithms.rl.ppo import PPO, ActorCritic
learn_cfg = cfg_train['learn']
is_testing = learn_cfg['test']
# is_testing = True
# Override resume and testing flags if they are passed as parameters.
if args.model_dir != '':
is_testing = True
chkpt_path = args.model_dir
log_dir = logdir + '_seed_{}'.format(env.task.cfg['seed'])
model_dir = logdir + '_model_seed_{}'.format(env.task.cfg['seed'])
"""Set up the PPO system for training or inferencing."""
ppo = PPO(
vec_env=env,
actor_critic_class=ActorCritic,
num_transitions_per_env=learn_cfg['nsteps'],
num_learning_epochs=learn_cfg['noptepochs'],
num_mini_batches=learn_cfg['nminibatches'],
clip_param=learn_cfg['cliprange'],
gamma=learn_cfg['gamma'],
lam=learn_cfg['lam'],
init_noise_std=learn_cfg.get('init_noise_std', 0.3),
value_loss_coef=learn_cfg.get('value_loss_coef', 2.0),
entropy_coef=learn_cfg['ent_coef'],
learning_rate=learn_cfg['optim_stepsize'],
max_grad_norm=learn_cfg.get('max_grad_norm', 2.0),
use_clipped_value_loss=learn_cfg.get('use_clipped_value_loss', False),
schedule=learn_cfg.get('schedule', 'fixed'),
desired_kl=learn_cfg.get('desired_kl', None),
model_cfg=cfg_train['policy'],
device=env.rl_device,
sampler=learn_cfg.get('sampler', 'sequential'),
log_dir=log_dir,
model_dir=model_dir,
is_testing=is_testing,
print_log=learn_cfg['print_log'],
apply_reset=False,
asymmetric=(env.num_states > 0),
)
if is_testing and args.model_dir != '':
print(f'Loading model from {chkpt_path}')
ppo.test(chkpt_path)
elif args.model_dir != '':
print(f'Loading model from {chkpt_path}')
ppo.load(chkpt_path)
return ppo
def process_sac(args, env, cfg_train, logdir):
from algorithms.rl.sac import SAC, MLPActorCritic
learn_cfg = cfg_train['learn']
is_testing = learn_cfg['test']
chkpt = learn_cfg['resume']
# Override resume and testing flags if they are passed as parameters.
if args.model_dir != '':
is_testing = True
chkpt_path = args.model_dir
"""Set up the SAC system for training or inferencing."""
sac = SAC(
vec_env=env,
actor_critic=MLPActorCritic,
ac_kwargs=dict(hidden_sizes=[learn_cfg['hidden_nodes']] * learn_cfg['hidden_layer']),
num_transitions_per_env=learn_cfg['nsteps'],
num_learning_epochs=learn_cfg['noptepochs'],
num_mini_batches=learn_cfg['nminibatches'],
replay_size=learn_cfg['replay_size'],
# clip_param=learn_cfg["cliprange"],
gamma=learn_cfg['gamma'],
polyak=learn_cfg['polyak'],
learning_rate=learn_cfg['learning_rate'],
max_grad_norm=learn_cfg.get('max_grad_norm', 2.0),
entropy_coef=learn_cfg['ent_coef'],
use_clipped_value_loss=learn_cfg.get('use_clipped_value_loss', False),
reward_scale=learn_cfg['reward_scale'],
batch_size=learn_cfg['batch_size'],
device=env.rl_device,
sampler=learn_cfg.get('sampler', 'sequential'),
log_dir=logdir,
is_testing=is_testing,
print_log=learn_cfg['print_log'],
apply_reset=False,
asymmetric=(env.num_states > 0),
)
if is_testing and args.model_dir != '':
print(f'Loading model from {chkpt_path}')
sac.test(chkpt_path)
elif args.model_dir != '':
print(f'Loading model from {chkpt_path}')
sac.load(chkpt_path)
return sac
def process_td3(args, env, cfg_train, logdir):
from algorithms.rl.td3 import TD3, MLPActorCritic
learn_cfg = cfg_train['learn']
is_testing = learn_cfg['test']
chkpt = learn_cfg['resume']
# Override resume and testing flags if they are passed as parameters.
if args.model_dir != '':
is_testing = True
chkpt_path = args.model_dir
"""Set up the TD3 system for training or inferencing."""
td3 = TD3(
vec_env=env,
actor_critic=MLPActorCritic,
ac_kwargs=dict(hidden_sizes=[learn_cfg['hidden_nodes']] * learn_cfg['hidden_layer']),
num_transitions_per_env=learn_cfg['nsteps'],
num_learning_epochs=learn_cfg['noptepochs'],
num_mini_batches=learn_cfg['nminibatches'],
replay_size=learn_cfg['replay_size'],
# clip_param=learn_cfg["cliprange"],
gamma=learn_cfg['gamma'],
polyak=learn_cfg['polyak'],
learning_rate=learn_cfg['learning_rate'],
max_grad_norm=learn_cfg.get('max_grad_norm', 2.0),
policy_delay=learn_cfg['policy_delay'], # 2,
act_noise=learn_cfg['act_noise'], # 0.1,
target_noise=learn_cfg['target_noise'], # 0.2,
noise_clip=learn_cfg['noise_clip'], # 0.5,
use_clipped_value_loss=learn_cfg.get('use_clipped_value_loss', False),
reward_scale=learn_cfg['reward_scale'],
batch_size=learn_cfg['batch_size'],
device=env.rl_device,
sampler=learn_cfg.get('sampler', 'sequential'),
log_dir=logdir,
is_testing=is_testing,
print_log=learn_cfg['print_log'],
apply_reset=False,
asymmetric=(env.num_states > 0),
)
if is_testing and args.model_dir != '':
print(f'Loading model from {chkpt_path}')
td3.test(chkpt_path)
elif args.model_dir != '':
print(f'Loading model from {chkpt_path}')
td3.load(chkpt_path)
return td3
def process_ddpg(args, env, cfg_train, logdir):
from algorithms.rl.ddpg import DDPG, MLPActorCritic
learn_cfg = cfg_train['learn']
is_testing = learn_cfg['test']
chkpt = learn_cfg['resume']
# Override resume and testing flags if they are passed as parameters.
if args.model_dir != '':
is_testing = True
chkpt_path = args.model_dir
"""Set up the DDPG system for training or inferencing."""
ddpg = DDPG(
vec_env=env,
actor_critic=MLPActorCritic,
ac_kwargs=dict(hidden_sizes=[learn_cfg['hidden_nodes']] * learn_cfg['hidden_layer']),
num_transitions_per_env=learn_cfg['nsteps'],
num_learning_epochs=learn_cfg['noptepochs'],
num_mini_batches=learn_cfg['nminibatches'],
replay_size=learn_cfg['replay_size'],
gamma=learn_cfg['gamma'],
polyak=learn_cfg['polyak'],
learning_rate=learn_cfg['learning_rate'],
max_grad_norm=learn_cfg.get('max_grad_norm', 2.0),
act_noise=learn_cfg['act_noise'], # 0.1,
target_noise=learn_cfg['target_noise'], # 0.2,
noise_clip=learn_cfg['noise_clip'], # 0.5,
use_clipped_value_loss=learn_cfg.get('use_clipped_value_loss', False),
reward_scale=learn_cfg['reward_scale'],
batch_size=learn_cfg['batch_size'],
device=env.rl_device,
sampler=learn_cfg.get('sampler', 'sequential'),
log_dir=logdir,
is_testing=is_testing,
print_log=learn_cfg['print_log'],
apply_reset=False,
asymmetric=(env.num_states > 0),
)
if is_testing and args.model_dir != '':
print(f'Loading model from {chkpt_path}')
ddpg.test(chkpt_path)
elif args.model_dir != '':
print(f'Loading model from {chkpt_path}')
ddpg.load(chkpt_path)
return ddpg
def process_trpo(args, env, cfg_train, logdir):
from algorithms.rl.trpo import TRPO, Actor, Critic
learn_cfg = cfg_train['learn']
is_testing = learn_cfg['test']
chkpt = learn_cfg['resume']
# Override resume and testing flags if they are passed as parameters.
if args.model_dir != '':
is_testing = True
chkpt_path = args.model_dir
"""Set up the TRPO system for training or inferencing."""
trpo = TRPO(
vec_env=env,
actor_class=Actor,
critic_class=Critic,
cost_critic_class=Critic,
num_transitions_per_env=learn_cfg['nsteps'],
num_learning_epochs=learn_cfg['noptepochs'],
num_mini_batches=learn_cfg['nminibatches'],
clip_param=learn_cfg['cliprange'],
gamma=learn_cfg['gamma'],
lam=learn_cfg['lam'],
init_noise_std=learn_cfg.get('init_noise_std', 0.3),
# value_loss_coef=learn_cfg.get("value_loss_coef", 2.0),
damping=learn_cfg['damping'],
cg_nsteps=learn_cfg['cg_nsteps'],
max_kl=learn_cfg['max_kl'],
max_num_backtrack=learn_cfg['max_num_backtrack'],
accept_ratio=learn_cfg['accept_ratio'],
step_fraction=learn_cfg['step_fraction'],
learning_rate=learn_cfg['optim_stepsize'],
max_grad_norm=learn_cfg.get('max_grad_norm', 2.0),
use_clipped_value_loss=learn_cfg.get('use_clipped_value_loss', False),
schedule=learn_cfg.get('schedule', 'fixed'),
model_cfg=cfg_train['policy'],
device=env.rl_device,
sampler=learn_cfg.get('sampler', 'sequential'),
log_dir=logdir,
is_testing=is_testing,
print_log=learn_cfg['print_log'],
apply_reset=False,
asymmetric=(env.num_states > 0),
)
if is_testing and args.model_dir != '':
print(f'Loading model from {chkpt_path}')
trpo.test(chkpt_path)
elif args.model_dir != '':
print(f'Loading model from {chkpt_path}')
trpo.load(chkpt_path)
return trpo | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_isaac_gym/utils/process_sarl.py | 0.62498 | 0.232354 | process_sarl.py | pypi |
import atexit
import datetime
import json
import os
import numpy as np
color2num = dict(
gray=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37, crimson=38
)
def colorize(string, color, bold=False, highlight=False):
"""
Colorize a string.
This function was originally written by John Schulman.
"""
attr = []
num = color2num[color]
if highlight:
num += 10
attr.append(str(num))
if bold:
attr.append('1')
return '\x1b[{}m{}\x1b[0m'.format(';'.join(attr), string)
class Logger:
def __init__(self, algo, task, seed):
self.curr_time = f'{datetime.datetime.now():%Y-%m-%d-%H:%M:%S}'
self.first_row = True
self.log_headers = []
self.log_current_row = {}
self.output_dir = './data'
self.output_dir = os.path.join(self.output_dir)
if os.path.exists(self.output_dir):
print(
'Warning: Log dir %s already exists! Storing info there anyway.' % self.output_dir
)
else:
os.makedirs(self.output_dir)
st = '_'.join([self.curr_time, algo, task, str(seed), 'progress.txt'])
self.output_file = open(os.path.join(self.output_dir, st), 'w')
atexit.register(self.output_file.close)
def log(self, msg, color='green'):
"""Print a colorized message to stdout."""
print(colorize(msg, color, bold=True))
def log_tabular(self, key, val):
"""
Log a value of some diagnostic.
Call this only once for each diagnostic quantity, each iteration.
After using ``log_tabular`` to store values for each diagnostic,
make sure to call ``dump_tabular`` to write them out to file and
stdout (otherwise they will not get saved anywhere).
"""
if self.first_row:
self.log_headers.append(key)
else:
assert key in self.log_headers, (
"Trying to introduce a new key %s that you didn't include in the first "
'iteration' % key
)
assert key not in self.log_current_row, (
'You already set %s this iteration. Maybe you forgot to call ' 'dump_tabular()' % key
)
self.log_current_row[key] = val
def dump_tabular(self):
"""
Write all of the diagnostics from the current iteration.
Writes both to stdout, and to the output file.
"""
vals = []
key_lens = [len(key) for key in self.log_headers]
max_key_len = max(15, max(key_lens))
keystr = '%' + '%d' % max_key_len
fmt = '| ' + keystr + 's | %15s |'
n_slashes = 22 + max_key_len
print('-' * n_slashes)
for key in self.log_headers:
val = self.log_current_row.get(key, '')
valstr = '%8.3g' % val if hasattr(val, '__float__') else val
print(fmt % (key, valstr))
vals.append(val)
print('-' * n_slashes, flush=True)
if self.output_file is not None:
if self.first_row:
self.output_file.write('\t'.join(self.log_headers) + '\n')
self.output_file.write('\t'.join(map(str, vals)) + '\n')
self.output_file.flush()
self.log_current_row.clear()
self.first_row = False
class EpochLogger(Logger):
def __init__(self, algo, task, seed):
super().__init__(algo, task, seed)
self.epoch = 0
self.epoch_dict = dict()
def setup_global_epoch(self, epoch=None):
self.epoch = epoch
def store(self, **kwargs):
"""
Save something into the epoch_logger's current state.
Provide an arbitrary number of keyword arguments with numerical values.
"""
for k, v in kwargs.items():
if not (k in self.epoch_dict.keys()):
self.epoch_dict[k] = []
self.epoch_dict[k].append(v)
def log_tabular(
self,
key,
val=None,
with_min_and_max=False,
average_only=False,
to_tb=False,
to_sacred=False,
):
if val is not None:
super().log_tabular(key, val)
else:
v = self.epoch_dict[key]
vals = np.concatenate(v) if isinstance(v[0], np.ndarray) and len(v[0].shape) > 0 else v
stats = (np.mean(vals), np.std(vals), np.min(vals), np.max(vals))
super().log_tabular(key if average_only else 'Average' + key, stats[0])
if not (average_only):
super().log_tabular('Std' + key, stats[1])
if with_min_and_max:
super().log_tabular('Max' + key, stats[3])
super().log_tabular('Min' + key, stats[2])
self.epoch_dict[key] = [] | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_isaac_gym/utils/Logger.py | 0.485356 | 0.195325 | Logger.py | pypi |
import numpy as np
import torch
from gymnasium import spaces
from isaacgym import gymtorch
from isaacgym.torch_utils import to_torch
# VecEnv Wrapper for RL training
class VecTask:
def __init__(self, task, rl_device, clip_observations=5.0, clip_actions=1.0):
self.task = task
self.num_environments = task.num_envs
self.num_agents = 1 # used for multi-agent environments
self.num_observations = task.num_obs
self.num_states = task.num_states
self.num_actions = task.num_actions
self.obs_space = spaces.Box(np.ones(self.num_obs) * -np.Inf, np.ones(self.num_obs) * np.Inf)
self.state_space = spaces.Box(
np.ones(self.num_states) * -np.Inf, np.ones(self.num_states) * np.Inf
)
self.act_space = spaces.Box(
np.ones(self.num_actions) * -1.0, np.ones(self.num_actions) * 1.0
)
self.clip_obs = clip_observations
self.clip_actions = clip_actions
self.rl_device = rl_device
print('RL device: ', rl_device)
def step(self, actions):
raise NotImplementedError
def reset(self):
raise NotImplementedError
def get_number_of_agents(self):
return self.num_agents
@property
def observation_space(self):
return self.obs_space
@property
def action_space(self):
return self.act_space
@property
def num_envs(self):
return self.num_environments
@property
def num_acts(self):
return self.num_actions
@property
def num_obs(self):
return self.num_observations
# C++ CPU Class
class VecTaskCPU(VecTask):
def __init__(
self, task, rl_device, sync_frame_time=False, clip_observations=5.0, clip_actions=1.0
):
super().__init__(
task, rl_device, clip_observations=clip_observations, clip_actions=clip_actions
)
self.sync_frame_time = sync_frame_time
def step(self, actions):
actions = actions.cpu().numpy()
self.task.render(self.sync_frame_time)
obs, rewards, resets, extras = self.task.step(
np.clip(actions, -self.clip_actions, self.clip_actions)
)
return (
to_torch(
np.clip(obs, -self.clip_obs, self.clip_obs),
dtype=torch.float,
device=self.rl_device,
),
to_torch(rewards, dtype=torch.float, device=self.rl_device),
to_torch(resets, dtype=torch.uint8, device=self.rl_device),
[],
)
def reset(self):
actions = 0.01 * (1 - 2 * np.random.rand(self.num_envs, self.num_actions)).astype('f')
# step the simulator
obs, rewards, resets, extras = self.task.step(actions)
return to_torch(
np.clip(obs, -self.clip_obs, self.clip_obs), dtype=torch.float, device=self.rl_device
)
# C++ GPU Class
class VecTaskGPU(VecTask):
def __init__(self, task, rl_device, clip_observations=5.0, clip_actions=1.0):
super().__init__(
task, rl_device, clip_observations=clip_observations, clip_actions=clip_actions
)
self.obs_tensor = gymtorch.wrap_tensor(
self.task.obs_tensor, counts=(self.task.num_envs, self.task.num_obs)
)
self.rewards_tensor = gymtorch.wrap_tensor(
self.task.rewards_tensor, counts=(self.task.num_envs,)
)
self.resets_tensor = gymtorch.wrap_tensor(
self.task.resets_tensor, counts=(self.task.num_envs,)
)
def step(self, actions):
self.task.render(False)
actions_clipped = torch.clamp(actions, -self.clip_actions, self.clip_actions)
actions_tensor = gymtorch.unwrap_tensor(actions_clipped)
self.task.step(actions_tensor)
return (
torch.clamp(self.obs_tensor, -self.clip_obs, self.clip_obs),
self.rewards_tensor,
self.resets_tensor,
[],
)
def reset(self):
actions = 0.01 * (
1
- 2
* torch.rand(
[self.task.num_envs, self.task.num_actions],
dtype=torch.float32,
device=self.rl_device,
)
)
actions_tensor = gymtorch.unwrap_tensor(actions)
# step the simulator
self.task.step(actions_tensor)
return torch.clamp(self.obs_tensor, -self.clip_obs, self.clip_obs)
# Python CPU/GPU Class
class VecTaskPython(VecTask):
def get_state(self):
return torch.clamp(self.task.states_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
def step(self, actions):
actions = torch.as_tensor(actions, dtype=torch.float32, device=self.rl_device)
actions_tensor = torch.clamp(actions, -self.clip_actions, self.clip_actions)
self.task.step(actions_tensor)
# next_obs, rews, dones, infos = self.vec_env.step(actions)
return (
torch.clamp(self.task.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device),
self.task.rew_buf.to(self.rl_device),
self.task.cost_buf.to(self.rl_device),
self.task.reset_buf.to(self.rl_device),
self.task.extras,
)
def reset(self):
actions = 0.01 * (
1
- 2
* torch.rand(
[self.task.num_envs, self.task.num_actions],
dtype=torch.float32,
device=self.rl_device,
)
)
# step the simulator
self.task.step(actions)
return torch.clamp(self.task.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
class VecTaskPythonArm(VecTask):
def get_state(self):
return torch.clamp(self.task.states_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
def step(self, actions):
actions_tensor = torch.clamp(actions, -self.clip_actions, self.clip_actions)
self.task.step(actions_tensor)
return (
torch.clamp(self.task.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device),
self.task.rew_buf.to(self.rl_device),
self.task.reset_buf.to(self.rl_device),
self.task.extras,
)
def reset(self):
actions = 0.01 * (
1
- 2
* torch.rand(
[self.task.num_envs, self.task.num_actions],
dtype=torch.float32,
device=self.rl_device,
)
)
# step the simulator
self.task.reset()
self.task.step(actions)
return torch.clamp(self.task.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device) | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_isaac_gym/envs/tasks/hand_base/vec_task.py | 0.900605 | 0.438966 | vec_task.py | pypi |
import numpy as np
import torch
from gymnasium import spaces
from isaacgym import gymtorch
from isaacgym.torch_utils import to_torch
# VecEnv Wrapper for RL training
class VecTask:
def __init__(self, task, rl_device, clip_observations=5.0, clip_actions=1.0):
self.task = task
self.num_environments = task.num_envs
self.num_agents = 1 # used for multi-agent environments
self.num_observations = task.num_obs
self.num_states = task.num_states
self.num_actions = task.num_actions
self.obs_space = spaces.Box(np.ones(self.num_obs) * -np.Inf, np.ones(self.num_obs) * np.Inf)
self.state_space = spaces.Box(
np.ones(self.num_states) * -np.Inf, np.ones(self.num_states) * np.Inf
)
self.act_space = spaces.Box(
np.ones(self.num_actions) * task.franka_dof_lower_limits_tensor.cpu().numpy(),
np.ones(self.num_actions) * task.franka_dof_upper_limits_tensor.cpu().numpy(),
)
self.clip_obs = clip_observations
self.clip_actions = clip_actions
self.rl_device = rl_device
print('RL device: ', rl_device)
def step(self, actions):
raise NotImplementedError
def reset(self):
raise NotImplementedError
def get_number_of_agents(self):
return self.num_agents
@property
def observation_space(self):
return self.obs_space
@property
def action_space(self):
return self.act_space
@property
def num_envs(self):
return self.num_environments
@property
def num_acts(self):
return self.num_actions
@property
def num_obs(self):
return self.num_observations
# Python CPU/GPU Class
class VecTaskPython(VecTask):
def get_state(self):
return torch.clamp(self.task.states_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
def step(self, actions):
actions = torch.as_tensor(actions, dtype=torch.float32, device=self.rl_device)
actions_tensor = torch.clamp(actions, -self.clip_actions, self.clip_actions)
obs_buf, rew_buf, cost_buf, reset_buf, _ = self.task.step(actions_tensor)
return (
torch.clamp(obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device),
rew_buf.to(self.rl_device),
cost_buf.to(self.rl_device),
reset_buf.to(self.rl_device),
self.task.extras,
)
def reset(self):
actions = 0.01 * (
1
- 2
* torch.rand(
[self.task.num_envs, self.task.num_actions],
dtype=torch.float32,
device=self.rl_device,
)
)
# step the simulator
obs_buf, rew_buf, cost_buf, reset_buf, _ = self.task.step(actions)
return torch.clamp(obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device) | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_isaac_gym/envs/tasks/base/vec_task.py | 0.875787 | 0.436562 | vec_task.py | pypi |
"""World."""
from __future__ import annotations
import os
from collections import OrderedDict
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, ClassVar
import mujoco
import numpy as np
import xmltodict
import yaml
import safety_gymnasium
from safety_gymnasium.tasks.safe_multi_agent.utils.common_utils import convert, rot2quat
from safety_gymnasium.tasks.safe_multi_agent.utils.task_utils import get_body_xvelp
# Default location to look for xmls folder:
BASE_DIR = os.path.join(os.path.dirname(safety_gymnasium.__file__), 'tasks/safe_multi_agent')
@dataclass
class Engine:
"""Physical engine."""
# pylint: disable=no-member
model: mujoco.MjModel = None
data: mujoco.MjData = None
def update(self, model, data):
"""Set engine."""
self.model = model
self.data = data
class World: # pylint: disable=too-many-instance-attributes
"""This class starts mujoco simulation.
And contains some apis for interacting with mujoco."""
# Default configuration (this should not be nested since it gets copied)
# *NOTE:* Changes to this configuration should also be reflected in `Builder` configuration
DEFAULT: ClassVar[dict[str, Any]] = {
'agent_base': 'assets/xmls/car.xml', # Which agent XML to use as the base
'agent_xy': np.zeros(4), # agent XY location
'agent_rot': 0, # agent rotation about Z axis
'floor_size': [3.5, 3.5, 0.1], # Used for displaying the floor
# FreeGeoms -- this is processed and added by the Builder class
'free_geoms': {}, # map from name -> object dict
# Geoms -- similar to objects, but they are immovable and fixed in the scene.
'geoms': {}, # map from name -> geom dict
# Mocaps -- mocap objects which are used to control other objects
'mocaps': {},
'floor_type': 'mat',
}
def __init__(self, agent, obstacles, config=None) -> None:
"""config - JSON string or dict of configuration. See self.parse()"""
if config:
self.parse(config) # Parse configuration
self.first_reset = True
self._agent = agent # pylint: disable=no-member
self._obstacles = obstacles
self.agent_base_path = None
self.agent_base_xml = None
self.xml = None
self.xml_string = None
self.engine = Engine()
self.bind_engine()
def parse(self, config):
"""Parse a config dict - see self.DEFAULT for description."""
self.config = deepcopy(self.DEFAULT)
self.config.update(deepcopy(config))
for key, value in self.config.items():
assert key in self.DEFAULT, f'Bad key {key}'
setattr(self, key, value)
def bind_engine(self):
"""Send the new engine instance to the agent and obstacles."""
self._agent.set_engine(self.engine)
for obstacle in self._obstacles:
obstacle.set_engine(self.engine)
def build(self): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
"""Build a world, including generating XML and moving objects."""
# Read in the base XML (contains agent, camera, floor, etc)
self.agent_base_path = os.path.join(
BASE_DIR,
f'{self.agent_base}', # pylint: disable=no-member
) # pylint: disable=no-member
with open(self.agent_base_path, encoding='utf-8') as f: # pylint: disable=invalid-name
self.agent_base_xml = f.read()
self.xml = xmltodict.parse(self.agent_base_xml) # Nested OrderedDict objects
if 'compiler' not in self.xml['mujoco']:
compiler = xmltodict.parse(
f"""<compiler
angle="radian"
meshdir="{BASE_DIR}/assets/meshes"
texturedir="{BASE_DIR}/assets/textures"
/>""",
)
self.xml['mujoco']['compiler'] = compiler['compiler']
else:
self.xml['mujoco']['compiler'].update(
{
'@angle': 'radian',
'@meshdir': os.path.join(BASE_DIR, 'assets', 'meshes'),
'@texturedir': os.path.join(BASE_DIR, 'assets', 'textures'),
},
)
# Convenience accessor for xml dictionary
worldbody = self.xml['mujoco']['worldbody']
# Move agent position to starting position
worldbody['body'][0]['@pos'] = convert(
# pylint: disable-next=no-member
np.r_[self.agent_xy[0], self._agent.z_height],
)
worldbody['body'][1]['@pos'] = convert(
# pylint: disable-next=no-member
np.r_[self.agent_xy[1], self._agent.z_height],
)
worldbody['body'][0]['@quat'] = convert(
# pylint: disable-next=no-member
rot2quat(self.agent_rot[0]),
)
worldbody['body'][1]['@quat'] = convert(
# pylint: disable-next=no-member
rot2quat(self.agent_rot[1]),
)
# We need this because xmltodict skips over single-item lists in the tree
if 'geom' in worldbody:
worldbody['geom'] = [worldbody['geom']]
else:
worldbody['geom'] = []
# Add equality section if missing
if 'equality' not in self.xml['mujoco']:
self.xml['mujoco']['equality'] = OrderedDict()
equality = self.xml['mujoco']['equality']
if 'weld' not in equality:
equality['weld'] = []
# Add asset section if missing
if 'asset' not in self.xml['mujoco']:
self.xml['mujoco']['asset'] = {}
if 'texture' not in self.xml['mujoco']['asset']:
self.xml['mujoco']['asset']['texture'] = []
if 'material' not in self.xml['mujoco']['asset']:
self.xml['mujoco']['asset']['material'] = []
if 'mesh' not in self.xml['mujoco']['asset']:
self.xml['mujoco']['asset']['mesh'] = []
material = self.xml['mujoco']['asset']['material']
texture = self.xml['mujoco']['asset']['texture']
mesh = self.xml['mujoco']['asset']['mesh']
# load all assets config from .yaml file
with open(os.path.join(BASE_DIR, 'configs/assets.yaml'), encoding='utf-8') as file:
assets_config = yaml.load(file, Loader=yaml.FullLoader) # noqa: S506
texture.append(assets_config['textures']['skybox'])
if self.floor_type == 'mat': # pylint: disable=no-member
texture.append(assets_config['textures']['matplane'])
material.append(assets_config['materials']['matplane'])
elif self.floor_type == 'village': # pylint: disable=no-member
texture.append(assets_config['textures']['village_floor'])
material.append(assets_config['materials']['village_floor'])
else:
raise NotImplementedError
selected_textures = {}
selected_materials = {}
selected_meshes = {}
for config in self.geoms.values(): # pylint: disable=no-member
if config['type'] == 'mesh':
mesh_name = config['mesh']
selected_textures[mesh_name] = assets_config['textures'][mesh_name]
selected_materials[mesh_name] = assets_config['materials'][mesh_name]
selected_meshes[mesh_name] = assets_config['meshes'][mesh_name]
for config in self.free_geoms.values(): # pylint: disable=no-member
if config['type'] == 'mesh':
mesh_name = config['mesh']
selected_textures[mesh_name] = assets_config['textures'][mesh_name]
selected_materials[mesh_name] = assets_config['materials'][mesh_name]
selected_meshes[mesh_name] = assets_config['meshes'][mesh_name]
for config in self.mocaps.values(): # pylint: disable=no-member
if config['type'] == 'mesh':
mesh_name = config['mesh']
selected_textures[mesh_name] = assets_config['textures'][mesh_name]
selected_materials[mesh_name] = assets_config['materials'][mesh_name]
selected_meshes[mesh_name] = assets_config['meshes'][mesh_name]
texture += selected_textures.values()
material += selected_materials.values()
mesh += selected_meshes.values()
# Add light to the XML dictionary
light = xmltodict.parse(
"""<b>
<light cutoff="100" diffuse="1 1 1" dir="0 0 -1" directional="true"
exponent="1" pos="0 0 0.5" specular="0 0 0" castshadow="false"/>
</b>""",
)
worldbody['light'] = light['b']['light']
# Add floor to the XML dictionary if missing
if not any(g.get('@name') == 'floor' for g in worldbody['geom']):
floor = xmltodict.parse(
"""
<geom name="floor" type="plane" condim="6"/>
""",
)
worldbody['geom'].append(floor['geom'])
# Make sure floor renders the same for every world
for g in worldbody['geom']: # pylint: disable=invalid-name
if g['@name'] == 'floor':
g.update(
{
'@size': convert(self.floor_size), # pylint: disable=no-member
'@rgba': '1 1 1 1',
},
)
if self.floor_type == 'mat': # pylint: disable=no-member
g.update({'@material': 'matplane'})
elif self.floor_type == 'village': # pylint: disable=no-member
g.update({'@material': 'village_floor'})
else:
raise NotImplementedError
# Add cameras to the XML dictionary
cameras = xmltodict.parse(
"""<b>
<camera name="fixednear" pos="0 -2 2" zaxis="0 -1 1"/>
<camera name="fixedfar" pos="0 -5 5" zaxis="0 -1 1"/>
</b>""",
)
worldbody['camera'] = cameras['b']['camera']
# Build and add a tracking camera (logic needed to ensure orientation correct)
theta = self.agent_rot[0] # pylint: disable=no-member
xyaxes = {
'x1': np.cos(theta),
'x2': -np.sin(theta),
'x3': 0,
'y1': np.sin(theta),
'y2': np.cos(theta),
'y3': 1,
}
pos = {
'xp': 0 * np.cos(theta) + (-2) * np.sin(theta),
'yp': 0 * (-np.sin(theta)) + (-2) * np.cos(theta),
'zp': 2,
}
track_camera = xmltodict.parse(
"""<b>
<camera name="track" mode="track" pos="{xp} {yp} {zp}"
xyaxes="{x1} {x2} {x3} {y1} {y2} {y3}"/>
</b>""".format(
**pos,
**xyaxes,
),
)
theta1 = self.agent_rot[1] # pylint: disable=no-member
xyaxes1 = {
'x1': np.cos(theta1),
'x2': -np.sin(theta1),
'x3': 0,
'y1': np.sin(theta1),
'y2': np.cos(theta1),
'y3': 1,
}
pos1 = {
'xp': 0 * np.cos(theta1) + (-2) * np.sin(theta1),
'yp': 0 * (-np.sin(theta1)) + (-2) * np.cos(theta1),
'zp': 2,
}
track_camera1 = xmltodict.parse(
"""<b>
<camera name="track1" mode="track" pos="{xp} {yp} {zp}"
xyaxes="{x1} {x2} {x3} {y1} {y2} {y3}"/>
</b>""".format(
**pos1,
**xyaxes1,
),
)
if 'camera' in worldbody['body'][0]:
if isinstance(worldbody['body'][0]['camera'], list):
worldbody['body'][0]['camera'] = worldbody['body'][0][0]['camera'] + [
track_camera['b']['camera'],
]
else:
worldbody['body'][0]['camera'] = [
worldbody['body'][0]['camera'],
track_camera['b']['camera'],
]
if isinstance(worldbody['body'][1]['camera'], list):
worldbody['body'][1]['camera'] = worldbody['body'][1]['camera'] + [
track_camera1['b']['camera'],
]
else:
worldbody['body'][1]['camera'] = [
worldbody['body'][1]['camera'],
track_camera1['b']['camera'],
]
else:
worldbody['body'][0]['camera'] = [
track_camera['b']['camera'],
]
# Add free_geoms to the XML dictionary
for name, object in self.free_geoms.items(): # pylint: disable=redefined-builtin, no-member
assert object['name'] == name, f'Inconsistent {name} {object}'
object = object.copy() # don't modify original object
if name == 'push_box':
object['quat'] = rot2quat(object['rot'])
dim = object['size'][0]
object['dim'] = dim
object['width'] = dim / 2
object['x'] = dim
object['y'] = dim
body = xmltodict.parse(
# pylint: disable-next=consider-using-f-string
"""
<body name="{name}" pos="{pos}" quat="{quat}">
<freejoint name="{name}"/>
<geom name="{name}" type="{type}" size="{size}" density="{density}"
rgba="{rgba}" group="{group}"/>
<geom name="col1" type="{type}" size="{width} {width} {dim}" density="{density}"
rgba="{rgba}" group="{group}" pos="{x} {y} 0"/>
<geom name="col2" type="{type}" size="{width} {width} {dim}" density="{density}"
rgba="{rgba}" group="{group}" pos="-{x} {y} 0"/>
<geom name="col3" type="{type}" size="{width} {width} {dim}" density="{density}"
rgba="{rgba}" group="{group}" pos="{x} -{y} 0"/>
<geom name="col4" type="{type}" size="{width} {width} {dim}" density="{density}"
rgba="{rgba}" group="{group}" pos="-{x} -{y} 0"/>
</body>
""".format(
**{k: convert(v) for k, v in object.items()},
),
)
else:
if object['type'] == 'mesh':
body = xmltodict.parse(
# pylint: disable-next=consider-using-f-string
"""
<body name="{name}" pos="{pos}" euler="{euler}" >
<freejoint name="{name}"/>
<geom name="{name}" type="mesh" mesh="{mesh}" material="{material}" density="{density}"
rgba="{rgba}" group="{group}" condim="6" />
</body>
""".format(
**{k: convert(v) for k, v in object.items()},
),
)
else:
object['quat'] = rot2quat(object['rot'])
body = xmltodict.parse(
# pylint: disable-next=consider-using-f-string
"""
<body name="{name}" pos="{pos}" quat="{quat}">
<freejoint name="{name}"/>
<geom name="{name}" type="{type}" size="{size}" density="{density}"
rgba="{rgba}" group="{group}"/>
</body>
""".format(
**{k: convert(v) for k, v in object.items()},
),
)
# Append new body to world, making it a list optionally
# Add the object to the world
worldbody['body'].append(body['body'])
# Add mocaps to the XML dictionary
for name, mocap in self.mocaps.items(): # pylint: disable=no-member
# Mocap names are suffixed with 'mocap'
assert mocap['name'] == name, f'Inconsistent {name}'
assert (
name.replace('mocap', 'obj') in self.free_geoms # pylint: disable=no-member
), f'missing object for {name}' # pylint: disable=no-member
# Add the object to the world
mocap = mocap.copy() # don't modify original object
mocap['quat'] = rot2quat(mocap['rot'])
body = xmltodict.parse(
# pylint: disable-next=consider-using-f-string
"""
<body name="{name}" mocap="true">
<geom name="{name}" type="{type}" size="{size}" rgba="{rgba}"
pos="{pos}" quat="{quat}" contype="0" conaffinity="0" group="{group}"/>
</body>
""".format(
**{k: convert(v) for k, v in mocap.items()},
),
)
# Add weld to equality list
mocap['body1'] = name
mocap['body2'] = name.replace('mocap', 'obj')
weld = xmltodict.parse(
# pylint: disable-next=consider-using-f-string
"""
<weld name="{name}" body1="{body1}" body2="{body2}" solref=".02 1.5"/>
""".format(
**{k: convert(v) for k, v in mocap.items()},
),
)
equality['weld'].append(weld['weld'])
# Add geoms to XML dictionary
for name, geom in self.geoms.items(): # pylint: disable=no-member
assert geom['name'] == name, f'Inconsistent {name} {geom}'
geom = geom.copy() # don't modify original object
geom['contype'] = geom.get('contype', 1)
geom['conaffinity'] = geom.get('conaffinity', 1)
if geom['type'] == 'mesh':
body = xmltodict.parse(
# pylint: disable-next=consider-using-f-string
"""
<body name="{name}" pos="{pos}" euler="{euler}">
<geom name="{name}" type="mesh" mesh="{mesh}" material="{material}"
rgba="1 1 1 1" group="{group}" contype="{contype}"
conaffinity="{conaffinity}"/>
</body>
""".format(
**{k: convert(v) for k, v in geom.items()},
),
)
else:
geom['quat'] = rot2quat(geom['rot'])
body = xmltodict.parse(
# pylint: disable-next=consider-using-f-string
"""
<body name="{name}" pos="{pos}" quat="{quat}">
<geom name="{name}" type="{type}" size="{size}" rgba="{rgba}"
group="{group}" contype="{contype}" conaffinity="{conaffinity}"/>
</body>
""".format(
**{k: convert(v) for k, v in geom.items()},
),
)
# Append new body to world, making it a list optionally
# Add the object to the world
worldbody['body'].append(body['body'])
# Instantiate simulator
# print(xmltodict.unparse(self.xml, pretty=True))
self.xml_string = xmltodict.unparse(self.xml)
model = mujoco.MjModel.from_xml_string(self.xml_string) # pylint: disable=no-member
data = mujoco.MjData(model) # pylint: disable=no-member
# Recompute simulation intrinsics from new position
mujoco.mj_forward(model, data) # pylint: disable=no-member
self.engine.update(model, data)
def rebuild(self, config=None, state=True):
"""Build a new sim from a model if the model changed."""
if state:
old_state = self.get_state()
if config:
self.parse(config)
self.build()
if state:
self.set_state(old_state)
mujoco.mj_forward(self.model, self.data) # pylint: disable=no-member
def reset(self, build=True):
"""Reset the world. (sim is accessed through self.sim)"""
if build:
self.build()
def body_com(self, name):
"""Get the center of mass of a named body in the simulator world reference frame."""
return self.data.body(name).subtree_com.copy()
def body_pos(self, name):
"""Get the position of a named body in the simulator world reference frame."""
return self.data.body(name).xpos.copy()
def body_mat(self, name):
"""Get the rotation matrix of a named body in the simulator world reference frame."""
return self.data.body(name).xmat.copy().reshape(3, -1)
def body_vel(self, name):
"""Get the velocity of a named body in the simulator world reference frame."""
return get_body_xvelp(self.model, self.data, name).copy()
def get_state(self):
"""Returns a copy of the simulator state."""
state = {
'time': np.copy(self.data.time),
'qpos': np.copy(self.data.qpos),
'qvel': np.copy(self.data.qvel),
}
if self.model.na == 0:
state['act'] = None
else:
state['act'] = np.copy(self.data.act)
return state
def set_state(self, value):
"""
Sets the state from an dict.
Args:
- value (dict): the desired state.
- call_forward: optionally call sim.forward(). Called by default if
the udd_callback is set.
"""
self.data.time = value['time']
self.data.qpos[:] = np.copy(value['qpos'])
self.data.qvel[:] = np.copy(value['qvel'])
if self.model.na != 0:
self.data.act[:] = np.copy(value['act'])
@property
def model(self):
"""Access model easily."""
return self.engine.model
@property
def data(self):
"""Access data easily."""
return self.engine.data | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_multi_agent/world.py | 0.829423 | 0.237946 | world.py | pypi |
"""Env builder."""
from __future__ import annotations
from dataclasses import asdict, dataclass
from typing import Any, ClassVar
import gymnasium
import numpy as np
from safety_gymnasium import tasks
from safety_gymnasium.tasks.safe_multi_agent.bases.base_task import BaseTask
from safety_gymnasium.utils.common_utils import ResamplingError, quat2zalign
from safety_gymnasium.utils.task_utils import get_task_class_name
@dataclass
class RenderConf:
r"""Render options.
Attributes:
mode (str): render mode, can be 'human', 'rgb_array', 'depth_array'.
width (int): width of the rendered image.
height (int): height of the rendered image.
camera_id (int): camera id to render.
camera_name (str): camera name to render.
Note:
``camera_id`` and ``camera_name`` can only be set one of them.
"""
mode: str = None
width: int = 256
height: int = 256
camera_id: int = None
camera_name: str = None
# pylint: disable-next=too-many-instance-attributes
class Builder(gymnasium.Env, gymnasium.utils.EzPickle):
r"""An entry point to organize different environments, while showing unified API for users.
The Builder class constructs the basic control framework of environments, while
the details were hidden. There is another important parts, which is **task module**
including all task specific operation.
Methods:
- :meth:`_setup_simulation`: Set up mujoco the simulation instance.
- :meth:`_get_task`: Instantiate a task object.
- :meth:`set_seed`: Set the seed for the environment.
- :meth:`reset`: Reset the environment.
- :meth:`step`: Step the environment.
- :meth:`_reward`: Calculate the reward.
- :meth:`_cost`: Calculate the cost.
- :meth:`render`: Render the environment.
Attributes:
- :attr:`task_id` (str): Task id.
- :attr:`config` (dict): Pre-defined configuration of the environment, which is passed via
:meth:`safety_gymnasium.register()`.
- :attr:`render_parameters` (RenderConf): Render parameters.
- :attr:`action_space` (gymnasium.spaces.Box): Action space.
- :attr:`observation_space` (gymnasium.spaces.Dict): Observation space.
- :attr:`obs_space_dict` (dict): Observation space dictionary.
- :attr:`done` (bool): Whether the episode is done.
"""
metadata: ClassVar[dict[str, Any]] = {
'render_modes': [
'human',
'rgb_array',
'depth_array',
],
'render_fps': 30,
}
def __init__( # pylint: disable=too-many-arguments
self,
task_id: str,
config: dict | None = None,
render_mode: str | None = None,
width: int = 256,
height: int = 256,
camera_id: int | None = None,
camera_name: str | None = None,
) -> None:
"""Initialize the builder.
Note:
The ``camera_name`` parameter can be chosen from:
- **human**: The camera used for freely moving around and can get input
from keyboard real time.
- **vision**: The camera used for vision observation, which is fixed in front of the
agent's head.
- **track**: The camera used for tracking the agent.
- **fixednear**: The camera used for top-down observation.
- **fixedfar**: The camera used for top-down observation, but is further than **fixednear**.
Args:
task_id (str): Task id.
config (dict): Pre-defined configuration of the environment, which is passed via
:meth:`safety_gymnasium.register`.
render_mode (str): Render mode, can be 'human', 'rgb_array', 'depth_array'.
width (int): Width of the rendered image.
height (int): Height of the rendered image.
camera_id (int): Camera id to render.
camera_name (str): Camera name to render.
"""
gymnasium.utils.EzPickle.__init__(self, config=config)
self.task_id: str = task_id
self.config: dict = config
self._seed: int = None
self._setup_simulation()
self.first_reset: bool = None
self.steps: int = None
self.cost: float = None
self.terminated: bool = True
self.truncated: bool = False
self.render_parameters = RenderConf(render_mode, width, height, camera_id, camera_name)
def _setup_simulation(self) -> None:
"""Set up mujoco the simulation instance."""
self.task = self._get_task()
self.set_seed()
def _get_task(self) -> BaseTask:
"""Instantiate a task object."""
class_name = get_task_class_name(self.task_id)
assert hasattr(tasks, class_name), f'Task={class_name} not implemented.'
task_class = getattr(tasks, class_name)
task = task_class(config=self.config)
task.build_observation_space()
return task
def set_seed(self, seed: int | None = None) -> None:
"""Set internal random state seeds."""
self._seed = np.random.randint(2**32, dtype='int64') if seed is None else seed
self.task.random_generator.set_random_seed(self._seed)
def reset(
self,
*,
seed: int | None = None,
options: dict | None = None,
) -> tuple[np.ndarray, dict]: # pylint: disable=arguments-differ
"""Reset the environment and return observations."""
info = {}
if not self.task.mechanism_conf.randomize_layout:
assert seed is None, 'Cannot set seed if randomize_layout=False'
self.set_seed(0)
elif seed is not None:
self.set_seed(seed)
self.terminated = False
self.truncated = False
self.steps = 0 # Count of steps taken in this episode
self.task.reset()
self.task.update_world() # refresh specific settings
self.task.specific_reset()
self.task.agent.reset()
cost = self._cost()
assert cost['agent_0']['cost_sum'] == 0, f'World has starting cost! {cost}'
assert cost['agent_1']['cost_sum'] == 0, f'World has starting cost! {cost}'
# Reset stateful parts of the environment
self.first_reset = False # Built our first world successfully
state = self.task.obs()
observations, infos = {}, {}
for agents in self.possible_agents:
observations[agents] = state
infos[agents] = info
# Return an observation
return (observations, infos)
# pylint: disable=too-many-branches
def step(self, action: dict) -> tuple[np.ndarray, float, float, bool, bool, dict]:
"""Take a step and return observation, reward, cost, terminated, truncated, info."""
assert not self.done, 'Environment must be reset before stepping.'
info = {}
global_action = np.zeros(
# pylint: disable-next=consider-using-generator
(sum([self.action_space(agent).shape[0] for agent in self.possible_agents]),),
)
for index, agent in enumerate(self.possible_agents):
action[agent] = np.array(action[agent], copy=False) # cast to ndarray
if action[agent].shape != self.action_space(agent).shape: # check action dimension
raise ValueError('Action dimension mismatch')
global_action[
index
* self.action_space(agent).shape[0] : (index + 1)
* self.action_space(agent).shape[0]
] = action[agent]
exception = self.task.simulation_forward(global_action)
if exception:
self.truncated = True
rewards = self.task.reward_conf.reward_exception
info['cost_exception'] = 1.0
else:
# Reward processing
rewards = self._reward()
# Constraint violations
info.update(self._cost())
info['reward_sum'] = sum(rewards.values())
costs = {'agent_0': info['agent_0']['cost_sum'], 'agent_1': info['agent_1']['cost_sum']}
self.task.specific_step()
# Goal processing
if self.task.goal_achieved[0] or self.task.goal_achieved[1]:
info['goal_met'] = True
if self.task.mechanism_conf.continue_goal:
# Update the internal layout
# so we can correctly resample (given objects have moved)
self.task.update_layout()
# Try to build a new goal, end if we fail
if self.task.mechanism_conf.terminate_resample_failure:
try:
self.task.update_world()
except ResamplingError:
# Normal end of episode
self.terminated = True
else:
# Try to make a goal, which could raise a ResamplingError exception
self.task.update_world()
else:
self.terminated = True
# termination of death processing
if not self.task.agent.is_alive():
self.terminated = True
# Timeout
self.steps += 1
if self.steps >= self.task.num_steps:
self.truncated = True # Maximum number of steps in an episode reached
if self.render_parameters.mode == 'human':
self.render()
state = self.task.obs()
observations, terminateds, truncateds, infos = {}, {}, {}, {}
for agents in self.possible_agents:
observations[agents] = state
terminateds[agents] = self.terminated
truncateds[agents] = self.truncated
infos[agents] = info
return observations, rewards, costs, terminateds, truncateds, infos
def _reward(self) -> float:
"""Calculate the current rewards.
Call exactly once per step.
"""
reward = self.task.calculate_reward()
# Intrinsic reward for uprightness
if self.task.reward_conf.reward_orientation:
zalign = quat2zalign(
self.task.data.get_body_xquat(self.task.reward_conf.reward_orientation_body),
)
reward += self.task.reward_conf.reward_orientation_scale * zalign
# Clip reward
reward_clip = self.task.reward_conf.reward_clip
if reward_clip:
for reward_i in reward.values():
in_range = -reward_clip < reward_i < reward_clip
if not in_range:
reward_i = np.clip(reward_i, -reward_clip, reward_clip)
print('Warning: reward was outside of range!')
return reward
def _cost(self) -> dict:
"""Calculate the current costs and return a dict.
Call exactly once per step.
"""
cost = self.task.calculate_cost()
# Optionally remove shaping from reward functions.
if self.task.cost_conf.constrain_indicator:
for _agent, agent_cost in cost.items():
for k in list(agent_cost.keys()):
agent_cost[k] = float(agent_cost[k] > 0.0) # Indicator function
self.cost = cost
return cost
def render(self) -> np.ndarray | None:
"""Call underlying :meth:`safety_gymnasium.bases.underlying.Underlying.render` directly.
Width and height in parameters are constant defaults for rendering
frames for humans. (not used for vision)
The set of supported modes varies per environment. (And some
third-party environments may not support rendering at all.)
By convention, if render_mode is:
- None (default): no render is computed.
- human: render return None.
The environment is continuously rendered in the current display or terminal. Usually for human consumption.
- rgb_array: return a single frame representing the current state of the environment.
A frame is a numpy.ndarray with shape (x, y, 3) representing RGB values for an x-by-y pixel image.
- rgb_array_list: return a list of frames representing the states of the environment since the last reset.
Each frame is a numpy.ndarray with shape (x, y, 3), as with `rgb_array`.
- depth_array: return a single frame representing the current state of the environment.
A frame is a numpy.ndarray with shape (x, y) representing depth values for an x-by-y pixel image.
- depth_array_list: return a list of frames representing the states of the environment since the last reset.
Each frame is a numpy.ndarray with shape (x, y), as with `depth_array`.
"""
assert self.render_parameters.mode, 'Please specify the render mode when you make env.'
assert (
not self.task.observe_vision
), 'When you use vision envs, you should not call this function explicitly.'
return self.task.render(cost=self.cost, **asdict(self.render_parameters))
def action_space(self, agent: str) -> gymnasium.spaces.Box:
"""Helper to get action space."""
return self.task.action_space[agent]
@property
def state(self):
"""Helper to get state."""
return self.task.obs()
@property
def num_agents(self) -> int:
"""Helper to get number of agents."""
return self.task.agent.nums
@property
def possible_agents(self) -> list[str]:
"""Helper to get possible agents."""
return self.task.agent.possible_agents
@property
def agents(self) -> list[str]:
"""Helper to get possible agents."""
return self.task.agent.possible_agents
def observation_space(self, _: str) -> gymnasium.spaces.Box | gymnasium.spaces.Dict:
"""Helper to get observation space."""
return self.task.observation_space
@property
def obs_space_dict(self) -> dict[str, gymnasium.spaces.Box]:
"""Helper to get observation space dictionary."""
return self.task.obs_info.obs_space_dict
@property
def done(self) -> bool:
"""Whether this episode is ended."""
return self.terminated or self.truncated
@property
def render_mode(self) -> str:
"""The render mode."""
return self.render_parameters.mode | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_multi_agent/builder.py | 0.96262 | 0.479991 | builder.py | pypi |
"""Base mujoco task."""
from __future__ import annotations
import abc
from copy import deepcopy
from dataclasses import dataclass
import gymnasium
import mujoco
import numpy as np
from gymnasium.envs.mujoco.mujoco_rendering import OffScreenViewer
import safety_gymnasium
from safety_gymnasium.tasks.safe_multi_agent import agents
from safety_gymnasium.tasks.safe_multi_agent.assets.color import COLOR
from safety_gymnasium.tasks.safe_multi_agent.assets.free_geoms import FREE_GEOMS_REGISTER
from safety_gymnasium.tasks.safe_multi_agent.assets.geoms import GEOMS_REGISTER
from safety_gymnasium.tasks.safe_multi_agent.assets.mocaps import MOCAPS_REGISTER
from safety_gymnasium.tasks.safe_multi_agent.bases.base_object import FreeGeom, Geom, Mocap
from safety_gymnasium.tasks.safe_multi_agent.utils.common_utils import MujocoException
from safety_gymnasium.tasks.safe_multi_agent.utils.keyboard_viewer import KeyboardViewer
from safety_gymnasium.tasks.safe_multi_agent.utils.random_generator import RandomGenerator
from safety_gymnasium.tasks.safe_multi_agent.world import World
@dataclass
class RenderConf:
r"""Render options.
Attributes:
libels (bool): Whether to render labels.
lidar_markers (bool): Whether to render lidar markers.
lidar_radius (float): Radius of the lidar markers.
lidar_size (float): Size of the lidar markers.
lidar_offset_init (float): Initial offset of the lidar markers.
lidar_offset_delta (float): Delta offset of the lidar markers.
"""
labels: bool = False
lidar_markers: bool = True
lidar_radius: float = 0.15
lidar_size: float = 0.025
lidar_offset_init: float = 0.5
lidar_offset_delta: float = 0.06
@dataclass
class PlacementsConf:
r"""Placement options.
Attributes:
placements (dict): Generated during running.
extents (list): Placement limits (min X, min Y, max X, max Y).
margin (float): Additional margin added to keepout when placing objects.
"""
placements = None
# FIXME: fix mutable default arguments # pylint: disable=fixme
extents = (-2, -2, 2, 2)
margin = 0.0
@dataclass
class SimulationConf:
r"""Simulation options.
Note:
Frameskip is the number of physics simulation steps per environment step and is sampled
as a binomial distribution.
For deterministic steps, set frameskip_binom_p = 1.0 (always take max frameskip).
Attributes:
frameskip_binom_n (int): Number of draws trials in binomial distribution (max frameskip).
frameskip_binom_p (float): Probability of trial return (controls distribution).
"""
frameskip_binom_n: int = 10
frameskip_binom_p: float = 1.0
@dataclass
class VisionEnvConf:
r"""Vision observation parameters.
Attributes:
vision_size (tuple): Size (width, height) of vision observation.
"""
vision_size = (256, 256)
@dataclass
class FloorConf:
r"""Floor options.
Attributes:
type (str): Type of floor.
size (tuple): Size of floor in environments.
"""
type: str = 'mat' # choose from 'mat' and 'village'
size: tuple = (3.5, 3.5, 0.1)
@dataclass
class WorldInfo:
r"""World information generated in running.
Attributes:
layout (dict): Layout of the world.
reset_layout (dict): Saved layout of the world after reset.
world_config_dict (dict): World configuration dictionary.
"""
layout: dict = None
reset_layout: dict = None
world_config_dict: dict = None
class Underlying(abc.ABC): # pylint: disable=too-many-instance-attributes
r"""Base class which is in charge of mujoco and underlying process.
Methods:
- :meth:`_parse`: Parse the configuration from dictionary.
- :meth:`_build_agent`: Build the agent instance.
- :meth:`_add_geoms`: Add geoms into current environment.
- :meth:`_add_free_geoms`: Add free geoms into current environment.
- :meth:`_add_mocaps`: Add mocaps into current environment.
- :meth:`reset`: Reset the environment, it is dependent on :meth:`_build`.
- :meth:`_build`: Build the mujoco instance of environment from configurations.
- :meth:`simulation_forward`: Forward the simulation.
- :meth:`update_layout`: Update the layout dictionary of the world to update states of some objects.
- :meth:`_set_goal`: Set the goal position in physical simulator.
- :meth:`_render_lidar`: Render the lidar.
- :meth:`_render_compass`: Render the compass.
- :meth:`_render_area`: Render the area.
- :meth:`_render_sphere`: Render the sphere.
- :meth:`render`: Render the environment, it may call :meth:`_render_lidar`, :meth:`_render_compass`
:meth:`_render_area`, :meth:`_render_sphere`.
- :meth:`_get_viewer`: Get the viewer instance according to render_mode.
- :meth:`_update_viewer`: Update the viewer when world is updated.
- :meth:`_obs_lidar`: Get observations from the lidar.
- :meth:`_obs_compass`: Get observations from the compass.
- :meth:`_build_placements_dict`: Build the placements dictionary for different types of object.
- :meth:`_build_world_config`: Build the world configuration, combine separate configurations from
different types of objects together as world configuration.
Attributes:
- :attr:`sim_conf` (SimulationConf): Simulation options.
- :attr:`placements_conf` (PlacementsConf): Placement options.
- :attr:`render_conf` (RenderConf): Render options.
- :attr:`vision_env_conf` (VisionEnvConf): Vision observation parameters.
- :attr:`floor_conf` (FloorConf): Floor options.
- :attr:`random_generator` (RandomGenerator): Random generator instance.
- :attr:`world` (World): World, which is in charge of mujoco.
- :attr:`world_info` (WorldInfo): World information generated according to environment in running.
- :attr:`viewer` (Union[KeyboardViewer, RenderContextOffscreen]): Viewer for environment.
- :attr:`_viewers` (dict): Viewers.
- :attr:`_geoms` (dict): Geoms which are added into current environment.
- :attr:`_free_geoms` (dict): FreeGeoms which are added into current environment.
- :attr:`_mocaps` (dict): Mocaps which are added into current environment.
- :attr:`agent_name` (str): Name of the agent in current environment.
- :attr:`observe_vision` (bool): Whether to observe vision from the agent.
- :attr:`debug` (bool): Whether to enable debug mode, which is pre-config during registration.
- :attr:`observation_flatten` (bool): Whether to flatten the observation.
- :attr:`agent` (Agent): Agent instance added into current environment.
- :attr:`action_noise` (float): Magnitude of independent per-component gaussian action noise.
- :attr:`model`: mjModel.
- :attr:`data`: mjData.
- :attr:`_obstacles` (list): All types of object in current environment.
"""
def __init__(self, config: dict | None = None) -> None:
"""Initialize the engine.
Args:
config (dict): Configuration dictionary, used to pre-config some attributes
according to tasks via :meth:`safety_gymnasium.register`.
"""
self.sim_conf = SimulationConf()
self.placements_conf = PlacementsConf()
self.render_conf = RenderConf()
self.vision_env_conf = VisionEnvConf()
self.floor_conf = FloorConf()
self.random_generator = RandomGenerator()
self.world = None
self.world_info = WorldInfo()
self.viewer = None
self._viewers = {}
# Obstacles which are added in environments.
self._geoms = {}
self._free_geoms = {}
self._mocaps = {}
# something are parsed from pre-defined configs
self.agent_name = None
self.observe_vision = False # Observe vision from the agent
self.debug = False
self.observation_flatten = True # Flatten observation into a vector
self._parse(config)
self.agent = None
self.action_noise: float = (
0.0 # Magnitude of independent per-component gaussian action noise
)
self._build_agent(self.agent_name)
def _parse(self, config: dict) -> None:
"""Parse a config dict.
Modify some attributes according to config.
So that easily adapt to different environment settings.
Args:
config (dict): Configuration dictionary.
"""
for key, value in config.items():
if '.' in key:
obj, key = key.split('.')
assert hasattr(self, obj) and hasattr(getattr(self, obj), key), f'Bad key {key}'
setattr(getattr(self, obj), key, value)
else:
assert hasattr(self, key), f'Bad key {key}'
setattr(self, key, value)
def _build_agent(self, agent_name: str) -> None:
"""Build the agent in the world."""
assert hasattr(agents, agent_name), 'agent not found'
agent_cls = getattr(agents, agent_name)
self.agent = agent_cls(random_generator=self.random_generator)
def _add_geoms(self, *geoms: Geom) -> None:
"""Register geom type objects into environments and set corresponding attributes."""
for geom in geoms:
assert (
type(geom) in GEOMS_REGISTER
), 'Please figure out the type of object before you add it into envs.'
self._geoms[geom.name] = geom
setattr(self, geom.name, geom)
geom.set_agent(self.agent)
def _add_free_geoms(self, *free_geoms: FreeGeom) -> None:
"""Register FreeGeom type objects into environments and set corresponding attributes."""
for obj in free_geoms:
assert (
type(obj) in FREE_GEOMS_REGISTER
), 'Please figure out the type of object before you add it into envs.'
self._free_geoms[obj.name] = obj
setattr(self, obj.name, obj)
obj.set_agent(self.agent)
def _add_mocaps(self, *mocaps: Mocap) -> None:
"""Register mocap type objects into environments and set corresponding attributes."""
for mocap in mocaps:
assert (
type(mocap) in MOCAPS_REGISTER
), 'Please figure out the type of object before you add it into envs.'
self._mocaps[mocap.name] = mocap
setattr(self, mocap.name, mocap)
mocap.set_agent(self.agent)
def reset(self) -> None:
"""Reset the environment."""
self._build()
# Save the layout at reset
self.world_info.reset_layout = deepcopy(self.world_info.layout)
def _build(self) -> None:
"""Build the mujoco instance of environment from configurations."""
if self.placements_conf.placements is None:
self._build_placements_dict()
self.random_generator.set_placements_info(
self.placements_conf.placements,
self.placements_conf.extents,
self.placements_conf.margin,
)
# Sample object positions
self.world_info.layout = self.random_generator.build_layout()
# Build the underlying physics world
self.world_info.world_config_dict = self._build_world_config(self.world_info.layout)
if self.world is None:
self.world = World(self.agent, self._obstacles, self.world_info.world_config_dict)
self.world.reset()
self.world.build()
else:
self.world.reset(build=False)
self.world.rebuild(self.world_info.world_config_dict, state=False)
if self.viewer:
self._update_viewer(self.model, self.data)
def simulation_forward(self, action: np.ndarray) -> None:
"""Take a step in the physics simulation.
Note:
- The **step** mentioned above is not the same as the **step** in Mujoco sense.
- The **step** here is the step in episode sense.
"""
# Simulate physics forward
if self.debug:
self.agent.debug()
else:
noise = (
self.action_noise * self.random_generator.randn(self.agent.body_info.nu)
if self.action_noise
else None
)
self.agent.apply_action(action, noise)
exception = False
for _ in range(
self.random_generator.binomial(
self.sim_conf.frameskip_binom_n,
self.sim_conf.frameskip_binom_p,
),
):
try:
for mocap in self._mocaps.values():
mocap.move()
# pylint: disable-next=no-member
mujoco.mj_step(self.model, self.data) # Physics simulation step
except MujocoException as me: # pylint: disable=invalid-name
print('MujocoException', me)
exception = True
break
if exception:
return exception
# pylint: disable-next=no-member
mujoco.mj_forward(self.model, self.data) # Needed to get sensor readings correct!
return exception
def update_layout(self) -> None:
"""Update layout dictionary with new places of objects from Mujoco instance.
When the objects moves, and if we want to update locations of some objects in environment,
then the layout dictionary needs to be updated to make sure that we won't wrongly change
the locations of other objects because we build world according to layout dictionary.
"""
mujoco.mj_forward(self.model, self.data) # pylint: disable=no-member
for k in list(self.world_info.layout.keys()):
# Mocap objects have to be handled separately
if 'gremlin' in k:
continue
self.world_info.layout[k] = self.data.body(k).xpos[:2].copy()
def _set_goal(self, name, pos: np.ndarray) -> None:
"""Set position of goal object in Mujoco instance.
Note:
This method is used to make sure the position of goal object in Mujoco instance
is the same as the position of goal object in layout dictionary or in attributes
of task instance.
"""
if pos.shape == (2,):
self.model.body(name).pos[:2] = pos[:2]
elif pos.shape == (3,):
self.model.body(name).pos[:3] = pos[:3]
else:
raise NotImplementedError
def _render_lidar(
self,
poses: np.ndarray,
color: np.ndarray,
offset: float,
group: int,
) -> None:
"""Render the lidar observation."""
agent_pos = self.agent.pos_0
agent_mat = self.agent.mat_0
lidar = self._obs_lidar(poses, group)
for i, sensor in enumerate(lidar):
if self.lidar_conf.type == 'pseudo': # pylint: disable=no-member
i += 0.5 # Offset to center of bin
theta = 2 * np.pi * i / self.lidar_conf.num_bins # pylint: disable=no-member
rad = self.render_conf.lidar_radius
binpos = np.array([np.cos(theta) * rad, np.sin(theta) * rad, offset])
pos = agent_pos + np.matmul(binpos, agent_mat.transpose())
alpha = min(1, sensor + 0.1)
self.viewer.add_marker(
pos=pos,
size=self.render_conf.lidar_size * np.ones(3),
type=mujoco.mjtGeom.mjGEOM_SPHERE, # pylint: disable=no-member
rgba=np.array(color) * alpha,
label='',
)
def _render_lidar1(
self,
poses: np.ndarray,
color: np.ndarray,
offset: float,
group: int,
) -> None:
"""Render the lidar observation."""
agent_pos = self.agent.pos_1
agent_mat = self.agent.mat_1
lidar = self._obs_lidar1(poses, group)
for i, sensor in enumerate(lidar):
if self.lidar_conf.type == 'pseudo': # pylint: disable=no-member
i += 0.5 # Offset to center of bin
theta = 2 * np.pi * i / self.lidar_conf.num_bins # pylint: disable=no-member
rad = self.render_conf.lidar_radius
binpos = np.array([np.cos(theta) * rad, np.sin(theta) * rad, offset])
pos = agent_pos + np.matmul(binpos, agent_mat.transpose())
alpha = min(1, sensor + 0.1)
self.viewer.add_marker(
pos=pos,
size=self.render_conf.lidar_size * np.ones(3),
type=mujoco.mjtGeom.mjGEOM_SPHERE, # pylint: disable=no-member
rgba=np.array(color) * alpha,
label='',
)
def _render_compass(self, pose: np.ndarray, color: np.ndarray, offset: float) -> None:
"""Render a compass observation."""
agent_pos = self.agent.pos
agent_mat = self.agent.mat
# Truncate the compass to only visualize XY component
compass = np.concatenate([self._obs_compass(pose)[:2] * 0.15, [offset]])
pos = agent_pos + np.matmul(compass, agent_mat.transpose())
self.viewer.add_marker(
pos=pos,
size=0.05 * np.ones(3),
type=mujoco.mjtGeom.mjGEOM_SPHERE, # pylint: disable=no-member
rgba=np.array(color) * 0.5,
label='',
)
# pylint: disable-next=too-many-arguments
def _render_area(
self,
pos: np.ndarray,
size: float,
color: np.ndarray,
label: str = '',
alpha: float = 0.1,
) -> None:
"""Render a radial area in the environment."""
z_size = min(size, 0.3)
pos = np.asarray(pos)
if pos.shape == (2,):
pos = np.r_[pos, 0] # Z coordinate 0
self.viewer.add_marker(
pos=pos,
size=[size, size, z_size],
type=mujoco.mjtGeom.mjGEOM_CYLINDER, # pylint: disable=no-member
rgba=np.array(color) * alpha,
label=label if self.render_conf.labels else '',
)
# pylint: disable-next=too-many-arguments
def _render_sphere(
self,
pos: np.ndarray,
size: float,
color: np.ndarray,
label: str = '',
alpha: float = 0.1,
) -> None:
"""Render a radial area in the environment."""
pos = np.asarray(pos)
if pos.shape == (2,):
pos = np.r_[pos, 0] # Z coordinate 0
self.viewer.add_marker(
pos=pos,
size=size * np.ones(3),
type=mujoco.mjtGeom.mjGEOM_SPHERE, # pylint: disable=no-member
rgba=np.array(color) * alpha,
label=label if self.render_conf.labels else '',
)
# pylint: disable-next=too-many-arguments,too-many-branches,too-many-statements
def render(
self,
width: int,
height: int,
mode: str,
camera_id: int | None = None,
camera_name: str | None = None,
cost: float | None = None,
) -> None:
"""Render the environment to somewhere.
Note:
The camera_name parameter can be chosen from:
- **human**: the camera used for freely moving around and can get input
from keyboard real time.
- **vision**: the camera used for vision observation, which is fixed in front of the
agent's head.
- **track**: The camera used for tracking the agent.
- **fixednear**: the camera used for top-down observation.
- **fixedfar**: the camera used for top-down observation, but is further than **fixednear**.
"""
self.model.vis.global_.offwidth = width
self.model.vis.global_.offheight = height
if mode in {
'rgb_array',
'depth_array',
}:
if camera_id is not None and camera_name is not None:
raise ValueError(
'Both `camera_id` and `camera_name` cannot be specified at the same time.',
)
no_camera_specified = camera_name is None and camera_id is None
if no_camera_specified:
camera_name = 'vision'
if camera_id is None:
# pylint: disable-next=no-member
camera_id = mujoco.mj_name2id(
self.model,
mujoco.mjtObj.mjOBJ_CAMERA, # pylint: disable=no-member
camera_name,
)
self._get_viewer(mode)
# Turn all the geom groups on
self.viewer.vopt.geomgroup[:] = 1
# Lidar and Compass markers
if self.render_conf.lidar_markers:
offset = (
self.render_conf.lidar_offset_init
) # Height offset for successive lidar indicators
for obstacle in self._obstacles:
if obstacle.is_lidar_observed:
self._render_lidar(obstacle.pos, obstacle.color, offset, obstacle.group)
self._render_lidar1(obstacle.pos, obstacle.color, offset, obstacle.group)
if hasattr(obstacle, 'is_comp_observed') and obstacle.is_comp_observed:
self._render_compass(
getattr(self, obstacle.name + '_pos'),
obstacle.color,
offset,
)
offset += self.render_conf.lidar_offset_delta
# Add indicator for nonzero cost
if cost['agent_0'].get('cost_sum', 0) > 0:
self._render_sphere(self.agent.pos_0, 0.25, COLOR['red'], alpha=0.5)
if cost['agent_1'].get('cost_sum', 0) > 0:
self._render_sphere(self.agent.pos_1, 0.25, COLOR['red'], alpha=0.5)
# Draw vision pixels
if mode in {'rgb_array', 'depth_array'}:
# Extract depth part of the read_pixels() tuple
data = self._get_viewer(mode).render(render_mode=mode, camera_id=camera_id)
self.viewer._markers[:] = [] # pylint: disable=protected-access
self.viewer._overlays.clear() # pylint: disable=protected-access
return data
if mode == 'human':
self._get_viewer(mode).render()
return None
raise NotImplementedError(f'Render mode {mode} is not implemented.')
def _get_viewer(
self,
mode: str,
) -> (
safety_gymnasium.utils.keyboard_viewer.KeyboardViewer
| gymnasium.envs.mujoco.mujoco_rendering.RenderContextOffscreen
):
self.viewer = self._viewers.get(mode)
if self.viewer is None:
if mode == 'human':
self.viewer = KeyboardViewer(
self.model,
self.data,
self.agent.keyboard_control_callback,
)
elif mode in {'rgb_array', 'depth_array'}:
self.viewer = OffScreenViewer(self.model, self.data)
else:
raise AttributeError(f'Unexpected mode: {mode}')
# self.viewer_setup()
self._viewers[mode] = self.viewer
return self.viewer
def _update_viewer(self, model, data) -> None:
"""update the viewer with new model and data"""
assert self.viewer, 'Call before self.viewer existing.'
self.viewer.model = model
self.viewer.data = data
@abc.abstractmethod
def _obs_lidar(self, positions: np.ndarray, group: int) -> np.ndarray:
"""Calculate and return a lidar observation. See sub methods for implementation."""
@abc.abstractmethod
def _obs_lidar1(self, positions: np.ndarray, group: int) -> np.ndarray:
"""Calculate and return a lidar observation. See sub methods for implementation."""
@abc.abstractmethod
def _obs_compass(self, pos: np.ndarray) -> np.ndarray:
"""Return an agent-centric compass observation of a list of positions.
Compass is a normalized (unit-length) egocentric XY vector,
from the agent to the object.
This is equivalent to observing the egocentric XY angle to the target,
projected into the sin/cos space we use for joints.
(See comment on joint observation for why we do this.)
"""
@abc.abstractmethod
def _build_placements_dict(self) -> dict:
"""Build a dict of placements. Happens only once."""
@abc.abstractmethod
def _build_world_config(self, layout: dict) -> dict:
"""Create a world_config from our own config."""
@property
def model(self):
"""Helper to get the world's model instance."""
return self.world.model
@property
def data(self):
"""Helper to get the world's simulation data instance."""
return self.world.data
@property
def _obstacles(self) -> list[Geom | FreeGeom | Mocap]:
"""Get the obstacles in the task.
Combine all types of object in current environment together into single list
in order to easily iterate them.
"""
return (
list(self._geoms.values())
+ list(self._free_geoms.values())
+ list(self._mocaps.values())
) | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_multi_agent/bases/underlying.py | 0.879341 | 0.556219 | underlying.py | pypi |
"""Base class for agents."""
from __future__ import annotations
import abc
import os
from dataclasses import dataclass, field
import glfw
import gymnasium
import mujoco
import numpy as np
from gymnasium import spaces
import safety_gymnasium
from safety_gymnasium.tasks.safe_multi_agent.utils.random_generator import RandomGenerator
from safety_gymnasium.tasks.safe_multi_agent.utils.task_utils import get_body_xvelp, quat2mat
from safety_gymnasium.tasks.safe_multi_agent.world import Engine
BASE_DIR = os.path.join(os.path.dirname(safety_gymnasium.__file__), 'tasks/safe_multi_agent')
@dataclass
class SensorConf:
r"""Sensor observations configuration.
Attributes:
sensors (tuple): Specify which sensors to add to observation space.
sensors_hinge_joints (bool): Observe named joint position / velocity sensors.
sensors_ball_joints (bool): Observe named ball joint position / velocity sensors.
sensors_angle_components (bool): Observe sin/cos theta instead of theta.
"""
sensors: tuple = (
'accelerometer',
'velocimeter',
'gyro',
'magnetometer',
'accelerometer1',
'velocimeter1',
'gyro1',
'magnetometer1',
)
sensors_hinge_joints: bool = True
sensors_ball_joints: bool = True
sensors_angle_components: bool = True
@dataclass
class SensorInfo:
r"""Sensor information generated in running.
Needed to figure out observation space.
Attributes:
hinge_pos_names (list): List of hinge joint position sensor names.
hinge_vel_names (list): List of hinge joint velocity sensor names.
freejoint_pos_name (str): Name of free joint position sensor.
freejoint_qvel_name (str): Name of free joint velocity sensor.
ballquat_names (list): List of ball joint quaternion sensor names.
ballangvel_names (list): List of ball joint angular velocity sensor names.
sensor_dim (list): List of sensor dimensions.
"""
hinge_pos_names: list = field(default_factory=list)
hinge_vel_names: list = field(default_factory=list)
freejoint_pos_name: str = None
freejoint_qvel_name: str = None
ballquat_names: list = field(default_factory=list)
ballangvel_names: list = field(default_factory=list)
sensor_dim: list = field(default_factory=dict)
@dataclass
class BodyInfo:
r"""Body information generated in running.
Needed to figure out the observation spaces.
Attributes:
nq (int): Number of generalized coordinates in agent = dim(qpos).
nv (int): Number of degrees of freedom in agent = dim(qvel).
nu (int): Number of actuators/controls in agent = dim(ctrl),
needed to figure out action space.
nbody (int): Number of bodies in agent.
geom_names (list): List of geom names in agent.
"""
nq: int = None
nv: int = None
nu: int = None
nbody: int = None
geom_names: list = field(default_factory=list)
@dataclass
class DebugInfo:
r"""Debug information generated in running.
Attributes:
keys (set): Set of keys are pressed on keyboard.
"""
keys: set = field(default_factory=set)
class BaseAgent(abc.ABC): # pylint: disable=too-many-instance-attributes
r"""Base class for agent.
Get mujoco-specific info about agent and control agent in environments.
Methods:
- :meth:`_load_model`: Load agent model from xml file.
- :meth:`_init_body_info`: Initialize body information.
- :meth:`_build_action_space`: Build action space for agent.
- :meth:`_init_jnt_sensors`: Initialize information of joint sensors in current agent.
- :meth:`set_engine`: Set physical engine instance.
- :meth:`apply_action`: Agent in physical simulator take specific action.
- :meth:`build_sensor_observation_space`: Build agent specific observation space according to sensors.
- :meth:`obs_sensor`: Get agent specific observations according to sensors.
- :meth:`get_sensor`: Get specific sensor observations in agent.
- :meth:`dist_xy`: Get distance between agent and target in XY plane.
- :meth:`world_xy`: Get agent XY coordinate in world frame.
- :meth:`keyboard_control_callback`: Keyboard control callback designed for debug mode for keyboard controlling.
- :meth:`debug`: Implement specific action debug mode which maps keyboard input into action of agent.
- :meth:`is_alive`: Check if agent is alive.
- :meth:`reset`: Reset agent to specific initial internal state, eg.joints angles.
Attributes:
- :attr:`base` (str): Path to agent XML.
- :attr:`random_generator` (RandomGenerator): Random generator.
- :attr:`placements` (list): Agent placements list (defaults to full extents).
- :attr:`locations` (list): Explicitly place agent XY coordinate.
- :attr:`keepout` (float): Needs to be set to match the agent XML used.
- :attr:`rot` (float): Override agent starting angle.
- :attr:`engine` (:class:`Engine`): Physical engine instance.
- :attr:`sensor_conf` (:class:`SensorConf`): Sensor observations configuration.
- :attr:`sensor_info` (:class:`SensorInfo`): Sensor information.
- :attr:`body_info` (:class:`BodyInfo`): Body information.
- :attr:`debug_info` (:class:`DebugInfo`): Debug information.
- :attr:`z_height` (float): Initial height of agent in environments.
- :attr:`action_space` (:class:`gymnasium.spaces.Box`): Action space.
- :attr:`com` (np.ndarray): The Cartesian coordinate of agent center of mass.
- :attr:`mat` (np.ndarray): The Cartesian rotation matrix of agent.
- :attr:`vel` (np.ndarray): The Cartesian velocity of agent.
- :attr:`pos` (np.ndarray): The Cartesian position of agent.
"""
def __init__( # pylint: disable=too-many-arguments
self,
name: str,
random_generator: RandomGenerator,
placements: list | None = None,
locations: list | None = None,
keepout: float = 0.4,
rot: float | None = None,
) -> None:
"""Initialize the agent.
Args:
name (str): Name of agent.
random_generator (RandomGenerator): Random generator.
placements (list): Agent placements list (defaults to full extents).
locations (list): Explicitly place agent XY coordinate.
keepout (float): Needs to be set to match the agent XML used.
rot (float): Override agent starting angle.
"""
self.base: str = f'assets/xmls/multi_{name.lower()}.xml'
self.random_generator: RandomGenerator = random_generator
self.placements: list = placements
self.locations: list = [] if locations is None else locations
self.keepout: float = keepout
self.rot: float = rot
self.possible_agents: list = ['agent_0', 'agent_1']
self.nums: int = 2
self.engine: Engine = None
self._load_model()
self.sensor_conf = SensorConf()
self.sensor_info = SensorInfo()
self.body_info = [BodyInfo(), BodyInfo()]
self._init_body_info()
self.debug_info = DebugInfo()
# Needed to figure out z-height of free joint of offset body
self.z_height: float = self.engine.data.body('agent').xpos[2]
self.action_space: dict[gymnasium.spaces.Box] = self._build_action_space()
self._init_jnt_sensors()
def _load_model(self) -> None:
"""Load the agent model from the xml file.
Note:
The physical engine instance which is created here is just used to figure out the dynamics
of agent and save some useful information, when the environment is actually created, the
physical engine instance will be replaced by the new instance which is created in
:class:`safety_gymnasium.World` via :meth:`set_engine`.
"""
base_path = os.path.join(BASE_DIR, self.base)
model = mujoco.MjModel.from_xml_path(base_path) # pylint: disable=no-member
data = mujoco.MjData(model) # pylint: disable=no-member
mujoco.mj_forward(model, data) # pylint: disable=no-member
self.set_engine(Engine(model, data))
def _init_body_info(self) -> None:
"""Initialize body information.
Access directly from mujoco instance created on agent xml model.
"""
for i in range(2):
self.body_info[i].nq = int(self.engine.model.nq / 2)
self.body_info[i].nv = int(self.engine.model.nv / 2)
self.body_info[i].nu = int(self.engine.model.nu / 2)
self.body_info[i].nbody = int(self.engine.model.nbody / 2)
self.body_info[i].geom_names = [
self.engine.model.geom(i).name
for i in range(self.engine.model.ngeom)
if self.engine.model.geom(i).name != 'floor'
][i * 2 : (i + 1) * 2]
def _build_action_space(self) -> gymnasium.spaces.Box:
"""Build the action space for this agent.
Access directly from mujoco instance created on agent xml model.
"""
bounds = self.engine.model.actuator_ctrlrange.copy().astype(np.float32)
low, high = bounds.T
divide_index = int(len(low) / 2)
return {
'agent_0': spaces.Box(
low=low[:divide_index],
high=high[:divide_index],
dtype=np.float64,
),
'agent_1': spaces.Box(
low=low[divide_index:],
high=high[divide_index:],
dtype=np.float64,
),
}
def _init_jnt_sensors(self) -> None: # pylint: disable=too-many-branches
"""Initialize joint sensors.
Access directly from mujoco instance created on agent xml model and save different
joint names into different lists.
"""
for i in range(self.engine.model.nsensor):
name = self.engine.model.sensor(i).name
sensor_id = self.engine.model.sensor(
name,
).id # pylint: disable=redefined-builtin, invalid-name
self.sensor_info.sensor_dim[name] = self.engine.model.sensor(sensor_id).dim[0]
sensor_type = self.engine.model.sensor(sensor_id).type
if (
# pylint: disable-next=no-member
self.engine.model.sensor(sensor_id).objtype
== mujoco.mjtObj.mjOBJ_JOINT # pylint: disable=no-member
): # pylint: disable=no-member
joint_id = self.engine.model.sensor(sensor_id).objid
joint_type = self.engine.model.jnt(joint_id).type
if joint_type == mujoco.mjtJoint.mjJNT_HINGE: # pylint: disable=no-member
if sensor_type == mujoco.mjtSensor.mjSENS_JOINTPOS: # pylint: disable=no-member
self.sensor_info.hinge_pos_names.append(name)
elif (
sensor_type == mujoco.mjtSensor.mjSENS_JOINTVEL
): # pylint: disable=no-member
self.sensor_info.hinge_vel_names.append(name)
else:
t = self.engine.model.sensor(i).type # pylint: disable=invalid-name
raise ValueError(f'Unrecognized sensor type {t} for joint')
elif joint_type == mujoco.mjtJoint.mjJNT_BALL: # pylint: disable=no-member
if sensor_type == mujoco.mjtSensor.mjSENS_BALLQUAT: # pylint: disable=no-member
self.sensor_info.ballquat_names.append(name)
elif (
sensor_type == mujoco.mjtSensor.mjSENS_BALLANGVEL
): # pylint: disable=no-member
self.sensor_info.ballangvel_names.append(name)
elif joint_type == mujoco.mjtJoint.mjJNT_SLIDE: # pylint: disable=no-member
# Adding slide joints is trivially easy in code,
# but this removes one of the good properties about our observations.
# (That we are invariant to relative whole-world transforms)
# If slide joints are added we should ensure this stays true!
raise ValueError('Slide joints in agents not currently supported')
elif (
# pylint: disable-next=no-member
self.engine.model.sensor(sensor_id).objtype
== mujoco.mjtObj.mjOBJ_SITE # pylint: disable=no-member
):
if name == 'agent_pos':
self.sensor_info.freejoint_pos_name = name
elif name == 'agent_qvel':
self.sensor_info.freejoint_qvel_name = name
def set_engine(self, engine: Engine) -> None:
"""Set the engine instance.
Args:
engine (Engine): The engine instance.
Note:
This method will be called twice in one single environment.
1. When the agent is initialized, used to get and save useful information.
2. When the environment is created, used to update the engine instance.
"""
self.engine = engine
def apply_action(self, action: np.ndarray, noise: np.ndarray | None = None) -> None:
"""Apply an action to the agent.
Just fill up the control array in the engine data.
Args:
action (np.ndarray): The action to apply.
noise (np.ndarray): The noise to add to the action.
"""
action = np.array(action, copy=False) # Cast to ndarray
# Set action
action_range = self.engine.model.actuator_ctrlrange
self.engine.data.ctrl[:] = np.clip(action, action_range[:, 0], action_range[:, 1])
if noise:
self.engine.data.ctrl[:] += noise
def build_sensor_observation_space(self) -> gymnasium.spaces.Dict:
"""Build observation space for all sensor types.
Returns:
gymnasium.spaces.Dict: The observation space generated by sensors bound with agent.
"""
obs_space_dict = {}
for sensor in self.sensor_conf.sensors: # Explicitly listed sensors
dim = self.sensor_info.sensor_dim[sensor]
obs_space_dict[sensor] = gymnasium.spaces.Box(-np.inf, np.inf, (dim,), dtype=np.float64)
# Velocities don't have wraparound effects that rotational positions do
# Wraparounds are not kind to neural networks
# Whereas the angle 2*pi is very close to 0, this isn't true in the network
# In theory the network could learn this, but in practice we simplify it
# when the sensors_angle_components switch is enabled.
for sensor in self.sensor_info.hinge_vel_names:
obs_space_dict[sensor] = gymnasium.spaces.Box(-np.inf, np.inf, (1,), dtype=np.float64)
for sensor in self.sensor_info.ballangvel_names:
obs_space_dict[sensor] = gymnasium.spaces.Box(-np.inf, np.inf, (3,), dtype=np.float64)
if self.sensor_info.freejoint_pos_name:
sensor = self.sensor_info.freejoint_pos_name
obs_space_dict[sensor] = gymnasium.spaces.Box(-np.inf, np.inf, (1,), dtype=np.float64)
obs_space_dict[sensor + '1'] = gymnasium.spaces.Box(
-np.inf,
np.inf,
(1,),
dtype=np.float64,
)
if self.sensor_info.freejoint_qvel_name:
sensor = self.sensor_info.freejoint_qvel_name
obs_space_dict[sensor] = gymnasium.spaces.Box(-np.inf, np.inf, (3,), dtype=np.float64)
obs_space_dict[sensor + '1'] = gymnasium.spaces.Box(
-np.inf,
np.inf,
(3,),
dtype=np.float64,
)
# Angular positions have wraparound effects, so output something more friendly
if self.sensor_conf.sensors_angle_components:
# Single joints are turned into sin(x), cos(x) pairs
# These should be easier to learn for neural networks,
# Since for angles, small perturbations in angle give small differences in sin/cos
for sensor in self.sensor_info.hinge_pos_names:
obs_space_dict[sensor] = gymnasium.spaces.Box(
-np.inf,
np.inf,
(2,),
dtype=np.float64,
)
# Quaternions are turned into 3x3 rotation matrices
# Quaternions have a wraparound issue in how they are normalized,
# where the convention is to change the sign so the first element to be positive.
# If the first element is close to 0, this can mean small differences in rotation
# lead to large differences in value as the latter elements change sign.
# This also means that the first element of the quaternion is not expectation zero.
# The SO(3) rotation representation would be a good replacement here,
# since it smoothly varies between values in all directions (the property we want),
# but right now we have very little code to support SO(3) rotations.
# Instead we use a 3x3 rotation matrix, which if normalized, smoothly varies as well.
for sensor in self.sensor_info.ballquat_names:
obs_space_dict[sensor] = gymnasium.spaces.Box(
-np.inf,
np.inf,
(3, 3),
dtype=np.float64,
)
else:
# Otherwise include the sensor without any processing
for sensor in self.sensor_info.hinge_pos_names:
obs_space_dict[sensor] = gymnasium.spaces.Box(
-np.inf,
np.inf,
(1,),
dtype=np.float64,
)
for sensor in self.sensor_info.ballquat_names:
obs_space_dict[sensor] = gymnasium.spaces.Box(
-np.inf,
np.inf,
(4,),
dtype=np.float64,
)
return obs_space_dict
def obs_sensor(self) -> dict[str, np.ndarray]:
"""Get observations of all sensor types.
Returns:
Dict[str, np.ndarray]: The observations generated by sensors bound with agent.
"""
obs = {}
# Sensors which can be read directly, without processing
for sensor in self.sensor_conf.sensors: # Explicitly listed sensors
obs[sensor] = self.get_sensor(sensor)
for sensor in self.sensor_info.hinge_vel_names:
obs[sensor] = self.get_sensor(sensor)
for sensor in self.sensor_info.ballangvel_names:
obs[sensor] = self.get_sensor(sensor)
if self.sensor_info.freejoint_pos_name:
sensor = self.sensor_info.freejoint_pos_name
obs[sensor] = self.get_sensor(sensor)[2:]
obs[sensor + '1'] = self.get_sensor(sensor + '1')[2:]
if self.sensor_info.freejoint_qvel_name:
sensor = self.sensor_info.freejoint_qvel_name
obs[sensor] = self.get_sensor(sensor)
obs[sensor + '1'] = self.get_sensor(sensor + '1')
# Process angular position sensors
if self.sensor_conf.sensors_angle_components:
for sensor in self.sensor_info.hinge_pos_names:
theta = float(self.get_sensor(sensor)) # Ensure not 1D, 1-element array
obs[sensor] = np.array([np.sin(theta), np.cos(theta)])
for sensor in self.sensor_info.ballquat_names:
quat = self.get_sensor(sensor)
obs[sensor] = quat2mat(quat)
else: # Otherwise read sensors directly
for sensor in self.sensor_info.hinge_pos_names:
obs[sensor] = self.get_sensor(sensor)
for sensor in self.sensor_info.ballquat_names:
obs[sensor] = self.get_sensor(sensor)
return obs
def get_sensor(self, name: str) -> np.ndarray:
"""Get the value of one sensor.
Args:
name (str): The name of the sensor to checkout.
Returns:
np.ndarray: The observation value of the sensor.
"""
id = self.engine.model.sensor(name).id # pylint: disable=redefined-builtin, invalid-name
adr = self.engine.model.sensor_adr[id]
dim = self.engine.model.sensor_dim[id]
return self.engine.data.sensordata[adr : adr + dim].copy()
def dist_xy(self, index, pos: np.ndarray) -> float:
"""Return the distance from the agent to an XY position.
Args:
pos (np.ndarray): The position to measure the distance to.
Returns:
float: The distance from the agent to the position.
"""
pos = np.asarray(pos)
if pos.shape == (3,):
pos = pos[:2]
if index == 0:
agent_pos = self.pos_0
elif index == 1:
agent_pos = self.pos_1
return np.sqrt(np.sum(np.square(pos - agent_pos[:2])))
def world_xy(self, pos: np.ndarray) -> np.ndarray:
"""Return the world XY vector to a position from the agent.
Args:
pos (np.ndarray): The position to measure the vector to.
Returns:
np.ndarray: The world XY vector to the position.
"""
assert pos.shape == (2,)
return pos - self.agent.agent_pos()[:2] # pylint: disable=no-member
def keyboard_control_callback(self, key: int, action: int) -> None:
"""Callback for keyboard control.
Collect keys which are pressed.
Args:
key (int): The key code inputted by user.
action (int): The action of the key in glfw.
"""
if action == glfw.PRESS:
self.debug_info.keys.add(key)
elif action == glfw.RELEASE:
self.debug_info.keys.remove(key)
def debug(self) -> None:
"""Debug mode.
Apply action which is inputted from keyboard.
"""
raise NotImplementedError
@abc.abstractmethod
def is_alive(self) -> bool:
"""Returns True if the agent is healthy.
Returns:
bool: True if the agent is healthy,
False if the agent is unhealthy.
"""
@abc.abstractmethod
def reset(self) -> None:
"""Called when the environment is reset."""
@property
def com(self) -> np.ndarray:
"""Get the position of the agent center of mass in the simulator world reference frame.
Returns:
np.ndarray: The Cartesian position of the agent center of mass.
"""
return self.engine.data.body('agent').subtree_com.copy()
@property
def mat_0(self) -> np.ndarray:
"""Get the rotation matrix of the agent in the simulator world reference frame.
Returns:
np.ndarray: The Cartesian rotation matrix of the agent.
"""
return self.engine.data.body('agent').xmat.copy().reshape(3, -1)
@property
def mat_1(self) -> np.ndarray:
"""Get the rotation matrix of the agent in the simulator world reference frame.
Returns:
np.ndarray: The Cartesian rotation matrix of the agent.
"""
return self.engine.data.body('agent1').xmat.copy().reshape(3, -1)
@property
def vel(self) -> np.ndarray:
"""Get the velocity of the agent in the simulator world reference frame.
Returns:
np.ndarray: The velocity of the agent.
"""
return get_body_xvelp(self.engine.model, self.engine.data, 'agent').copy()
@property
def pos_0(self) -> np.ndarray:
"""Get the position of the agent in the simulator world reference frame.
Returns:
np.ndarray: The Cartesian position of the agent.
"""
return self.engine.data.body('agent').xpos.copy()
@property
def pos_1(self) -> np.ndarray:
"""Get the position of the agent in the simulator world reference frame.
Returns:
np.ndarray: The Cartesian position of the agent.
"""
return self.engine.data.body('agent1').xpos.copy() | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_multi_agent/bases/base_agent.py | 0.934012 | 0.55097 | base_agent.py | pypi |
"""Base task."""
from __future__ import annotations
import abc
import os
from collections import OrderedDict
from dataclasses import dataclass
import gymnasium
import mujoco
import numpy as np
import yaml
import safety_gymnasium
from safety_gymnasium.tasks.safe_multi_agent.bases.underlying import Underlying
from safety_gymnasium.tasks.safe_multi_agent.utils.common_utils import ResamplingError
from safety_gymnasium.tasks.safe_multi_agent.utils.task_utils import theta2vec
@dataclass
class LidarConf:
r"""Lidar observation parameters.
Attributes:
num_bins (int): Bins (around a full circle) for lidar sensing.
max_dist (float): Maximum distance for lidar sensitivity (if None, exponential distance).
exp_gain (float): Scaling factor for distance in exponential distance lidar.
type (str): 'pseudo', 'natural', see self._obs_lidar().
alias (bool): Lidar bins alias into each other.
"""
num_bins: int = 16
max_dist: float = 3
exp_gain: float = 1.0
type: str = 'pseudo'
alias: bool = True
@dataclass
class CompassConf:
r"""Compass observation parameters.
Attributes:
shape (int): 2 for XY unit vector, 3 for XYZ unit vector.
"""
shape: int = 2
@dataclass
class RewardConf:
r"""Reward options.
Attributes:
reward_orientation (bool): Reward for being upright.
reward_orientation_scale (float): Scale for uprightness reward.
reward_orientation_body (str): What body to get orientation from.
reward_exception (float): Reward when encountering a mujoco exception.
reward_clip (float): Clip reward, last resort against physics errors causing magnitude spikes.
"""
reward_orientation: bool = False
reward_orientation_scale: float = 0.002
reward_orientation_body: str = 'agent'
reward_exception: float = -10.0
reward_clip: float = 10
@dataclass
class CostConf:
r"""Cost options.
Attributes:
constrain_indicator (bool): If true, all costs are either 1 or 0 for a given step.
"""
constrain_indicator: bool = True
@dataclass
class MechanismConf:
r"""Mechanism options.
Starting position distribution.
Attributes:
randomize_layout (bool): If false, set the random seed before layout to constant.
continue_goal (bool): If true, draw a new goal after achievement.
terminate_resample_failure (bool): If true, end episode when resampling fails,
otherwise, raise a python exception.
"""
randomize_layout: bool = True
continue_goal: bool = True
terminate_resample_failure: bool = True
@dataclass
class ObservationInfo:
r"""Observation information generated in running.
Attributes:
obs_space_dict (:class:`gymnasium.spaces.Dict`): Observation space dictionary.
"""
obs_space_dict: gymnasium.spaces.Dict = None
class BaseTask(Underlying): # pylint: disable=too-many-instance-attributes,too-many-public-methods
r"""Base task class for defining some common characteristic and mechanism.
Methods:
- :meth:`dist_goal`: Return the distance from the agent to the goal XY position.
- :meth:`calculate_cost`: Determine costs depending on the agent and obstacles, actually all
cost calculation is done in different :meth:`safety_gymnasium.bases.base_obstacle.BaseObject.cal_cost`
which implemented in different types of object, We just combine all results of them here.
- :meth:`build_observation_space`: Build observation space, combine agent specific observation space
and task specific observation space together.
- :meth:`_build_placements_dict`: Build placement dictionary for all types of object.
- :meth:`toggle_observation_space`: Toggle observation space.
- :meth:`_build_world_config`: Create a world_config from all separate configs of different types of object.
- :meth:`_build_static_geoms_config`: Build static geoms config from yaml files.
- :meth:`build_goal_position`: Build goal position, it will be called when the task is initialized or
when the goal is achieved.
- :meth:`_placements_dict_from_object`: Build placement dictionary for a specific type of object.
- :meth:`obs`: Combine and return all separate observations of different types of object.
- :meth:`_obs_lidar`: Return lidar observation, unify natural lidar and pseudo lidar in API.
- :meth:`_obs_lidar_natural`: Return natural lidar observation.
- :meth:`_obs_lidar_pseudo`: Return pseudo lidar observation.
- :meth:`_obs_compass`: Return compass observation.
- :meth:`_obs_vision`: Return vision observation, that is RGB image captured by camera
fixed in front of agent.
- :meth:`_ego_xy`: Return the egocentric XY vector to a position from the agent.
- :meth:`calculate_reward`: Calculate reward, it will be called in every timestep, and it is
implemented in different task.
- :meth:`specific_reset`: Reset task specific parameters, it will be called in every reset.
- :meth:`specific_step`: Step task specific parameters, it will be called in every timestep.
- :meth:`update_world`: Update world, it will be called when ``env.reset()`` or :meth:`goal_achieved` == True.
Attributes:
- :attr:`num_steps` (int): Maximum number of environment steps in an episode.
- :attr:`lidar_conf` (:class:`LidarConf`): Lidar observation parameters.
- :attr:`reward_conf` (:class:`RewardConf`): Reward options.
- :attr:`cost_conf` (:class:`CostConf`): Cost options.
- :attr:`mechanism_conf` (:class:`MechanismConf`): Mechanism options.
- :attr:`action_space` (gymnasium.spaces.Box): Action space.
- :attr:`observation_space` (gymnasium.spaces.Dict): Observation space.
- :attr:`obs_info` (:class:`ObservationInfo`): Observation information generated in running.
- :attr:`_is_load_static_geoms` (bool): Whether to load static geoms in current task which is mean
some geoms that has no randomness.
- :attr:`goal_achieved` (bool): Determine whether the goal is achieved, it will be called in every timestep
and it is implemented in different task.
"""
def __init__(self, config: dict) -> None: # pylint: disable-next=too-many-statements
"""Initialize the task.
Args:
config (dict): Configuration dictionary, used to pre-config some attributes
according to tasks via :meth:`safety_gymnasium.register`.
"""
super().__init__(config=config)
self.num_steps = 1000 # Maximum number of environment steps in an episode
self.lidar_conf = LidarConf()
self.compass_conf = CompassConf()
self.reward_conf = RewardConf()
self.cost_conf = CostConf()
self.mechanism_conf = MechanismConf()
self.action_space = self.agent.action_space
self.observation_space = None
self.obs_info = ObservationInfo()
self._is_load_static_geoms = False # Whether to load static geoms in current task.
self.static_geoms_names: dict
self.static_geoms_contact_cost: float = None
self.contact_other_cost: float = None
def dist_goal(self) -> float:
"""Return the distance from the agent to the goal XY position."""
assert hasattr(self, 'goal'), 'Please make sure you have added goal into env.'
return self.agent.dist_xy(self.goal.pos) # pylint: disable=no-member
def calculate_cost(self) -> dict:
"""Determine costs depending on the agent and obstacles."""
# pylint: disable-next=no-member
mujoco.mj_forward(self.model, self.data) # Ensure positions and contacts are correct
cost = {'agent_0': {}, 'agent_1': {}}
# Calculate constraint violations
for obstacle in self._obstacles:
obj_cost = obstacle.cal_cost()
if 'agent_0' in obj_cost:
cost['agent_0'].update(obj_cost['agent_0'])
if 'agent_1' in obj_cost:
cost['agent_1'].update(obj_cost['agent_1'])
if self.contact_other_cost:
for contact in self.data.contact[: self.data.ncon]:
geom_ids = [contact.geom1, contact.geom2]
geom_names = sorted([self.model.geom(g).name for g in geom_ids])
if any(n in self.agent.body_info[1].geom_names for n in geom_names) and any(
n in self.agent.body_info[0].geom_names for n in geom_names
):
cost['agent_0']['cost_contact_other'] = self.contact_other_cost
cost['agent_1']['cost_contact_other'] = self.contact_other_cost
if self._is_load_static_geoms and self.static_geoms_contact_cost:
cost['cost_static_geoms_contact'] = 0.0
for contact in self.data.contact[: self.data.ncon]:
geom_ids = [contact.geom1, contact.geom2]
geom_names = sorted([self.model.geom(g).name for g in geom_ids])
if any(n in self.static_geoms_names for n in geom_names) and any(
n in self.agent.body_info.geom_names for n in geom_names
):
# pylint: disable-next=no-member
cost['cost_static_geoms_contact'] += self.static_geoms_contact_cost
# Sum all costs into single total cost
for agent_cost in cost.values():
agent_cost['cost_sum'] = sum(v for k, v in agent_cost.items() if k.startswith('cost_'))
return cost
# pylint: disable-next=too-many-branches
def build_observation_space(self) -> gymnasium.spaces.Dict:
"""Construct observation space. Happens only once during __init__ in Builder."""
obs_space_dict = OrderedDict() # See self.obs()
sensor_dict = self.agent.build_sensor_observation_space()
agent0_sensor_dict = {}
agent1_sensor_dict = {}
for name, space in sensor_dict.items():
if name.endswith('1') and name[-2] != '_':
agent1_sensor_dict[name] = space
else:
agent0_sensor_dict[name] = space
obs_space_dict.update(agent0_sensor_dict)
for obstacle in self._obstacles:
if obstacle.is_lidar_observed:
name = obstacle.name + '_' + 'lidar'
obs_space_dict[name] = gymnasium.spaces.Box(
0.0,
1.0,
(self.lidar_conf.num_bins,),
dtype=np.float64,
)
if hasattr(obstacle, 'is_comp_observed') and obstacle.is_comp_observed:
name = obstacle.name + '_' + 'comp'
obs_space_dict[name] = gymnasium.spaces.Box(
-1.0,
1.0,
(self.compass_conf.shape,),
dtype=np.float64,
)
if self.observe_vision:
width, height = self.vision_env_conf.vision_size
rows, cols = height, width
self.vision_env_conf.vision_size = (rows, cols)
obs_space_dict['vision_0'] = gymnasium.spaces.Box(
0,
255,
(*self.vision_env_conf.vision_size, 3),
dtype=np.uint8,
)
obs_space_dict.update(agent1_sensor_dict)
for obstacle in self._obstacles:
if obstacle.is_lidar_observed:
name = obstacle.name + '_' + 'lidar1'
obs_space_dict[name] = gymnasium.spaces.Box(
0.0,
1.0,
(self.lidar_conf.num_bins,),
dtype=np.float64,
)
if hasattr(obstacle, 'is_comp_observed') and obstacle.is_comp_observed:
name = obstacle.name + '_' + 'comp1'
obs_space_dict[name] = gymnasium.spaces.Box(
-1.0,
1.0,
(self.compass_conf.shape,),
dtype=np.float64,
)
if self.observe_vision:
width, height = self.vision_env_conf.vision_size
rows, cols = height, width
self.vision_env_conf.vision_size = (rows, cols)
obs_space_dict['vision_1'] = gymnasium.spaces.Box(
0,
255,
(*self.vision_env_conf.vision_size, 3),
dtype=np.uint8,
)
self.obs_info.obs_space_dict = gymnasium.spaces.Dict(obs_space_dict)
if self.observation_flatten:
self.observation_space = gymnasium.spaces.utils.flatten_space(
self.obs_info.obs_space_dict,
)
else:
self.observation_space = self.obs_info.obs_space_dict
def _build_placements_dict(self) -> None:
"""Build a dict of placements.
Happens only once.
"""
placements = {}
placements.update(self._placements_dict_from_object('agent'))
for obstacle in self._obstacles:
placements.update(self._placements_dict_from_object(obstacle.name))
self.placements_conf.placements = placements
def toggle_observation_space(self) -> None:
"""Toggle observation space."""
self.observation_flatten = not self.observation_flatten
self.build_observation_space()
def _build_world_config(self, layout: dict) -> dict: # pylint: disable=too-many-branches
"""Create a world_config from our own config."""
world_config = {
'floor_type': self.floor_conf.type,
'floor_size': self.floor_conf.size,
'agent_base': self.agent.base,
'agent_xy': layout['agent'],
}
if self.agent.rot is None:
world_config['agent_rot'] = self.random_generator.generate_rots(2)
else:
world_config['agent_rot'] = float(self.agent.rot)
# process world config via different objects.
world_config.update(
{
'geoms': {},
'free_geoms': {},
'mocaps': {},
},
)
for obstacle in self._obstacles:
num = obstacle.num if hasattr(obstacle, 'num') else 1
if obstacle.name == 'agent':
num = 2
obstacle.process_config(world_config, layout, self.random_generator.generate_rots(num))
if self._is_load_static_geoms:
self._build_static_geoms_config(world_config['geoms'])
return world_config
def _build_static_geoms_config(self, geoms_config: dict) -> None:
"""Load static geoms from .yaml file.
Static geoms are geoms which won't be considered when calculate reward and cost in general.
And have no randomness.
Some tasks may generate cost when contacting static geoms.
"""
env_info = self.__class__.__name__.split('Level')
config_name = env_info[0].lower()
level = int(env_info[1])
# load all config of meshes in specific environment from .yaml file
base_dir = os.path.dirname(safety_gymnasium.__file__)
with open(os.path.join(base_dir, f'configs/{config_name}.yaml'), encoding='utf-8') as file:
meshes_config = yaml.load(file, Loader=yaml.FullLoader) # noqa: S506
self.static_geoms_names = set()
for idx in range(level + 1):
for group in meshes_config[idx].values():
geoms_config.update(group)
for item in group.values():
self.static_geoms_names.add(item['name'])
def build_goal_position(self) -> None:
"""Build a new goal position, maybe with resampling due to hazards."""
# Resample until goal is compatible with layout
if 'goal' in self.world_info.layout:
del self.world_info.layout['goal']
for _ in range(10000): # Retries
if self.random_generator.sample_goal_position():
break
else:
raise ResamplingError('Failed to generate goal')
# Move goal geom to new layout position
if self.goal_achieved[0]:
self.world_info.world_config_dict['geoms']['goal_red']['pos'][
:2
] = self.world_info.layout['goal_red']
self._set_goal('goal_red', self.world_info.layout['goal_red'])
if self.goal_achieved[1]:
self.world_info.world_config_dict['geoms']['goal_blue']['pos'][
:2
] = self.world_info.layout['goal_blue']
self._set_goal('goal_blue', self.world_info.layout['goal_blue'])
mujoco.mj_forward(self.model, self.data) # pylint: disable=no-member
def _placements_dict_from_object(self, object_name: dict) -> dict:
"""Get the placements dict subset just for a given object name."""
placements_dict = {}
assert hasattr(self, object_name), f'object{object_name} does not exist, but you use it!'
data_obj = getattr(self, object_name)
if hasattr(data_obj, 'num'): # Objects with multiplicity
object_fmt = object_name[:-1] + '{i}'
object_num = getattr(data_obj, 'num', None)
object_locations = getattr(data_obj, 'locations', [])
object_placements = getattr(data_obj, 'placements', None)
object_keepout = data_obj.keepout
else: # Unique objects
object_fmt = object_name
object_num = 1
object_locations = getattr(data_obj, 'locations', [])
object_placements = getattr(data_obj, 'placements', None)
object_keepout = data_obj.keepout
for i in range(object_num):
if i < len(object_locations):
x, y = object_locations[i] # pylint: disable=invalid-name
k = object_keepout + 1e-9 # Epsilon to account for numerical issues
placements = [(x - k, y - k, x + k, y + k)]
else:
placements = object_placements
placements_dict[object_fmt.format(i=i)] = (placements, object_keepout)
return placements_dict
def obs(self) -> dict | np.ndarray:
"""Return the observation of our agent."""
# pylint: disable-next=no-member
mujoco.mj_forward(self.model, self.data) # Needed to get sensor's data correct
obs = {}
obs.update(self.agent.obs_sensor())
for obstacle in self._obstacles:
if obstacle.is_lidar_observed:
obs[obstacle.name + '_lidar'] = self._obs_lidar(obstacle.pos, obstacle.group)
obs[obstacle.name + '_lidar1'] = self._obs_lidar1(obstacle.pos, obstacle.group)
if hasattr(obstacle, 'is_comp_observed') and obstacle.is_comp_observed:
obs[obstacle.name + '_comp'] = self._obs_compass(obstacle.pos)
if self.observe_vision:
obs['vision_0'] = self._obs_vision()
obs['vision_1'] = self._obs_vision(camera_name='vision1')
assert self.obs_info.obs_space_dict.contains(
obs,
), f'Bad obs {obs} {self.obs_info.obs_space_dict}'
if self.observation_flatten:
obs = gymnasium.spaces.utils.flatten(self.obs_info.obs_space_dict, obs)
return obs
def _obs_lidar(self, positions: np.ndarray | list, group: int) -> np.ndarray:
"""Calculate and return a lidar observation.
See sub methods for implementation.
"""
if self.lidar_conf.type == 'pseudo':
return self._obs_lidar_pseudo(positions)
if self.lidar_conf.type == 'natural':
return self._obs_lidar_natural(group)
raise ValueError(f'Invalid lidar_type {self.lidar_conf.type}')
def _obs_lidar1(self, positions: np.ndarray | list, group: int) -> np.ndarray:
"""Calculate and return a lidar observation.
See sub methods for implementation.
"""
if self.lidar_conf.type == 'pseudo':
return self._obs_lidar_pseudo1(positions)
if self.lidar_conf.type == 'natural':
return self._obs_lidar_natural(group)
raise ValueError(f'Invalid lidar_type {self.lidar_conf.type}')
def _obs_lidar_natural(self, group: int) -> np.ndarray:
"""Natural lidar casts rays based on the ego-frame of the agent.
Rays are circularly projected from the agent body origin around the agent z axis.
"""
body = self.model.body('agent').id
# pylint: disable-next=no-member
grp = np.asarray([i == group for i in range(int(mujoco.mjNGROUP))], dtype='uint8')
pos = np.asarray(self.agent.pos, dtype='float64')
mat_t = self.agent.mat
obs = np.zeros(self.lidar_conf.num_bins)
for i in range(self.lidar_conf.num_bins):
theta = (i / self.lidar_conf.num_bins) * np.pi * 2
vec = np.matmul(mat_t, theta2vec(theta)) # Rotate from ego to world frame
vec = np.asarray(vec, dtype='float64')
geom_id = np.array([0], dtype='int32')
dist = mujoco.mj_ray( # pylint: disable=no-member
self.model,
self.data,
pos,
vec,
grp,
1,
body,
geom_id,
)
if dist >= 0:
obs[i] = np.exp(-dist)
return obs
def _obs_lidar_pseudo(self, positions: np.ndarray) -> np.ndarray:
"""Return an agent-centric lidar observation of a list of positions.
Lidar is a set of bins around the agent (divided evenly in a circle).
The detection directions are exclusive and exhaustive for a full 360 view.
Each bin reads 0 if there are no objects in that direction.
If there are multiple objects, the distance to the closest one is used.
Otherwise the bin reads the fraction of the distance towards the agent.
E.g. if the object is 90% of lidar_max_dist away, the bin will read 0.1,
and if the object is 10% of lidar_max_dist away, the bin will read 0.9.
(The reading can be thought of as "closeness" or inverse distance)
This encoding has some desirable properties:
- bins read 0 when empty
- bins smoothly increase as objects get close
- maximum reading is 1.0 (where the object overlaps the agent)
- close objects occlude far objects
- constant size observation with variable numbers of objects
"""
positions = np.array(positions, ndmin=2)
obs = np.zeros(self.lidar_conf.num_bins)
for pos in positions:
pos = np.asarray(pos)
if pos.shape == (3,):
pos = pos[:2] # Truncate Z coordinate
# pylint: disable-next=invalid-name
z = complex(*self._ego_xy(pos)) # X, Y as real, imaginary components
dist = np.abs(z)
angle = np.angle(z) % (np.pi * 2)
bin_size = (np.pi * 2) / self.lidar_conf.num_bins
bin = int(angle / bin_size) # pylint: disable=redefined-builtin
bin_angle = bin_size * bin
if self.lidar_conf.max_dist is None:
sensor = np.exp(-self.lidar_conf.exp_gain * dist)
else:
sensor = max(0, self.lidar_conf.max_dist - dist) / self.lidar_conf.max_dist
obs[bin] = max(obs[bin], sensor)
# Aliasing
if self.lidar_conf.alias:
alias = (angle - bin_angle) / bin_size
assert 0 <= alias <= 1, f'bad alias {alias}, dist {dist}, angle {angle}, bin {bin}'
bin_plus = (bin + 1) % self.lidar_conf.num_bins
bin_minus = (bin - 1) % self.lidar_conf.num_bins
obs[bin_plus] = max(obs[bin_plus], alias * sensor)
obs[bin_minus] = max(obs[bin_minus], (1 - alias) * sensor)
return obs
def _obs_lidar_pseudo1(self, positions: np.ndarray) -> np.ndarray:
"""Return an agent-centric lidar observation of a list of positions.
Lidar is a set of bins around the agent (divided evenly in a circle).
The detection directions are exclusive and exhaustive for a full 360 view.
Each bin reads 0 if there are no objects in that direction.
If there are multiple objects, the distance to the closest one is used.
Otherwise the bin reads the fraction of the distance towards the agent.
E.g. if the object is 90% of lidar_max_dist away, the bin will read 0.1,
and if the object is 10% of lidar_max_dist away, the bin will read 0.9.
(The reading can be thought of as "closeness" or inverse distance)
This encoding has some desirable properties:
- bins read 0 when empty
- bins smoothly increase as objects get close
- maximum reading is 1.0 (where the object overlaps the agent)
- close objects occlude far objects
- constant size observation with variable numbers of objects
"""
positions = np.array(positions, ndmin=2)
obs = np.zeros(self.lidar_conf.num_bins)
for pos in positions:
pos = np.asarray(pos)
if pos.shape == (3,):
pos = pos[:2] # Truncate Z coordinate
# pylint: disable-next=invalid-name
z = complex(*self._ego_xy1(pos)) # X, Y as real, imaginary components
dist = np.abs(z)
angle = np.angle(z) % (np.pi * 2)
bin_size = (np.pi * 2) / self.lidar_conf.num_bins
bin = int(angle / bin_size) # pylint: disable=redefined-builtin
bin_angle = bin_size * bin
if self.lidar_conf.max_dist is None:
sensor = np.exp(-self.lidar_conf.exp_gain * dist)
else:
sensor = max(0, self.lidar_conf.max_dist - dist) / self.lidar_conf.max_dist
obs[bin] = max(obs[bin], sensor)
# Aliasing
if self.lidar_conf.alias:
alias = (angle - bin_angle) / bin_size
assert 0 <= alias <= 1, f'bad alias {alias}, dist {dist}, angle {angle}, bin {bin}'
bin_plus = (bin + 1) % self.lidar_conf.num_bins
bin_minus = (bin - 1) % self.lidar_conf.num_bins
obs[bin_plus] = max(obs[bin_plus], alias * sensor)
obs[bin_minus] = max(obs[bin_minus], (1 - alias) * sensor)
return obs
def _obs_compass(self, pos: np.ndarray) -> np.ndarray:
"""Return an agent-centric compass observation of a list of positions.
Compass is a normalized (unit-length) egocentric XY vector,
from the agent to the object.
This is equivalent to observing the egocentric XY angle to the target,
projected into the sin/cos space we use for joints.
(See comment on joint observation for why we do this.)
"""
pos = np.asarray(pos)
if pos.shape == (2,):
pos = np.concatenate([pos, [0]]) # Add a zero z-coordinate
# Get ego vector in world frame
vec = pos - self.agent.pos
# Rotate into frame
vec = np.matmul(vec, self.agent.mat)
# Truncate
vec = vec[: self.compass_conf.shape]
# Normalize
vec /= np.sqrt(np.sum(np.square(vec))) + 0.001
assert vec.shape == (self.compass_conf.shape,), f'Bad vec {vec}'
return vec
def _obs_vision(self, camera_name='vision') -> np.ndarray:
"""Return pixels from the agent camera.
Note:
This is a 3D array of shape (rows, cols, channels).
The channels are RGB, in that order.
If you are on a headless machine, you may need to checkout this:
URL: `issue <https://github.com/PKU-Alignment/safety-gymnasium/issues/27>`_
"""
rows, cols = self.vision_env_conf.vision_size
width, height = cols, rows
return self.render(
width,
height,
mode='rgb_array',
camera_name=camera_name,
cost={'agent_0': {}, 'agent_1': {}},
)
def _ego_xy(self, pos: np.ndarray) -> np.ndarray:
"""Return the egocentric XY vector to a position from the agent."""
assert pos.shape == (2,), f'Bad pos {pos}'
agent_3vec = self.agent.pos_0
agent_mat = self.agent.mat_0
pos_3vec = np.concatenate([pos, [0]]) # Add a zero z-coordinate
world_3vec = pos_3vec - agent_3vec
return np.matmul(world_3vec, agent_mat)[:2] # only take XY coordinates
def _ego_xy1(self, pos: np.ndarray) -> np.ndarray:
"""Return the egocentric XY vector to a position from the agent."""
assert pos.shape == (2,), f'Bad pos {pos}'
agent_3vec = self.agent.pos_1
agent_mat = self.agent.mat_1
pos_3vec = np.concatenate([pos, [0]]) # Add a zero z-coordinate
world_3vec = pos_3vec - agent_3vec
return np.matmul(world_3vec, agent_mat)[:2] # only take XY coordinates
@abc.abstractmethod
def calculate_reward(self) -> float:
"""Determine reward depending on the agent and tasks."""
@abc.abstractmethod
def specific_reset(self) -> None:
"""Set positions and orientations of agent and obstacles."""
@abc.abstractmethod
def specific_step(self) -> None:
"""Each task can define a specific step function.
It will be called when :meth:`safety_gymnasium.builder.Builder.step()` is called using env.step().
For example, you can do specific data modification.
"""
@abc.abstractmethod
def update_world(self) -> None:
"""Update one task specific goal."""
@property
@abc.abstractmethod
def goal_achieved(self) -> bool:
"""Check if task specific goal is achieved.""" | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_multi_agent/bases/base_task.py | 0.936742 | 0.498779 | base_task.py | pypi |
"""Base class for obstacles."""
import abc
from dataclasses import dataclass
import numpy as np
from safety_gymnasium.tasks.safe_multi_agent.bases.base_agent import BaseAgent
from safety_gymnasium.tasks.safe_multi_agent.utils.random_generator import RandomGenerator
from safety_gymnasium.tasks.safe_multi_agent.world import Engine
@dataclass
class BaseObject(abc.ABC):
r"""Base class for obstacles.
Methods:
- :meth:`cal_cost`: Calculate the cost of the object, only when the object can be constrained, it
is needed to be implemented.
- :meth:`set_agent`: Set the agent instance, only called once for each object in one environment.
- :meth:`set_engine`: Set the engine instance, only called once in :class:`safety_gymnasium.World`.
- :meth:`set_random_generator`: Set the random generator instance, only called once in one environment.
- :meth:`process_config`: Process the config, used to fill the configuration dictionary which used to
generate mujoco instance xml string of environments.
- :meth:`_specific_agent_config`: Modify properties according to specific agent.
- :meth:`get_config`: Define how to generate config of different objects, it will be called in process_config.
Attributes:
- :attr:`type` (str): Type of the obstacle, used as key in :meth:`process_config` to fill configuration
dictionary.
- :attr:`name` (str): Name of the obstacle, used as key in :meth:`process_config` to fill configuration
dictionary.
- :attr:`engine` (:class:`safety_gymnasium.world.Engine`): Physical engine instance.
- :attr:`random_generator` (:class:`safety_gymnasium.utils.random_generator.RandomGenerator`):
Random generator instance.
- :attr:`agent` (:class:`safety_gymnasium.bases.base_agent.BaseAgent`): Agent instance.
- :attr:`pos` (np.ndarray): Get the position of the object.
"""
type: str = None
name: str = None
engine: Engine = None
random_generator: RandomGenerator = None
agent: BaseAgent = None
def cal_cost(self) -> dict:
"""Calculate the cost of the obstacle.
Returns:
dict: Cost of the object in current environments at this timestep.
"""
return {}
def set_agent(self, agent: BaseAgent) -> None:
"""Set the agent instance.
Note:
This method will be called only once in one environment, that is when the object
is instantiated.
Args:
agent (BaseAgent): Agent instance in current environment.
"""
self.agent = agent
self._specific_agent_config()
def set_engine(self, engine: Engine) -> None:
"""Set the engine instance.
Note:
This method will be called only once in one environment, that is when the whole
environment is instantiated in :meth:`safety_gymnasium.World.bind_engine`.
Args:
engine (Engine): Physical engine instance.
"""
self.engine = engine
def set_random_generator(self, random_generator: RandomGenerator) -> None:
"""Set the random generator instance.
Args:
random_generator (RandomGenerator): Random generator instance.
"""
self.random_generator = random_generator
def process_config(self, config: dict, layout: dict, rots: float) -> None:
"""Process the config.
Note:
This method is called in :meth:`safety_gymnasium.bases.base_task._build_world_config` to
fill the configuration dictionary which used to generate mujoco instance xml string of
environments in :meth:`safety_gymnasium.World.build`.
"""
if hasattr(self, 'num'):
assert (
len(rots) == self.num
), 'The number of rotations should be equal to the number of obstacles.'
for i in range(self.num):
name = f'{self.name[:-1]}{i}'
config[self.type][name] = self.get_config(xy_pos=layout[name], rot=rots[i])
config[self.type][name].update({'name': name})
else:
assert len(rots) == 1, 'The number of rotations should be 1.'
config[self.type][self.name] = self.get_config(xy_pos=layout[self.name], rot=rots[0])
def _specific_agent_config(self) -> None: # noqa: B027
"""Modify properties according to specific agent.
Note:
This method will be called only once in one environment, that is when :meth:`set_agent`
is called.
"""
@property
@abc.abstractmethod
def pos(self) -> np.ndarray:
"""Get the position of the obstacle.
Returns:
np.ndarray: Position of the obstacle.
"""
raise NotImplementedError
@abc.abstractmethod
def get_config(self, xy_pos: np.ndarray, rot: float):
"""Get the config of the obstacle.
Returns:
dict: Configuration of this type of object in current environment.
"""
raise NotImplementedError
@dataclass
class Geom(BaseObject):
r"""Base class for obstacles that are geoms.
Attributes:
type (str): Type of the object, used as key in :meth:`process_config` to fill configuration
dictionary.
"""
type: str = 'geoms'
@dataclass
class FreeGeom(BaseObject):
r"""Base class for obstacles that are objects.
Attributes:
type (str): Type of the object, used as key in :meth:`process_config` to fill configuration
dictionary.
"""
type: str = 'free_geoms'
@dataclass
class Mocap(BaseObject):
r"""Base class for obstacles that are mocaps.
Attributes:
type (str): Type of the object, used as key in :meth:`process_config` to fill configuration
dictionary.
"""
type: str = 'mocaps'
def process_config(self, config: dict, layout: dict, rots: float) -> None:
"""Process the config.
Note:
This method is called in :meth:`safety_gymnasium.bases.base_task._build_world_config` to
fill the configuration dictionary which used to generate mujoco instance xml string of
environments in :meth:`safety_gymnasium.World.build`.
As Mocap type object, it will generate two objects, one is the mocap object, the other
is the object that is attached to the mocap object, this is due to the mocap's mechanism
of mujoco.
"""
if hasattr(self, 'num'):
assert (
len(rots) == self.num
), 'The number of rotations should be equal to the number of obstacles.'
for i in range(self.num):
mocap_name = f'{self.name[:-1]}{i}mocap'
obj_name = f'{self.name[:-1]}{i}obj'
layout_name = f'{self.name[:-1]}{i}'
configs = self.get_config(xy_pos=layout[layout_name], rot=rots[i])
config['free_geoms'][obj_name] = configs['obj']
config['free_geoms'][obj_name].update({'name': obj_name})
config['mocaps'][mocap_name] = configs['mocap']
config['mocaps'][mocap_name].update({'name': mocap_name})
else:
assert len(rots) == 1, 'The number of rotations should be 1.'
mocap_name = f'{self.name[:-1]}mocap'
obj_name = f'{self.name[:-1]}obj'
layout_name = self.name[:-1]
configs = self.get_config(xy_pos=layout[layout_name], rot=rots[0])
config['free_geoms'][obj_name] = configs['obj']
config['free_geoms'][obj_name].update({'name': obj_name})
config['mocaps'][mocap_name] = configs['mocap']
config['mocaps'][mocap_name].update({'name': mocap_name})
def set_mocap_pos(self, name: str, value: np.ndarray) -> None:
"""Set the position of a mocap object.
Args:
name (str): Name of the mocap object.
value (np.ndarray): Target position of the mocap object.
"""
body_id = self.engine.model.body(name).id
mocap_id = self.engine.model.body_mocapid[body_id]
self.engine.data.mocap_pos[mocap_id] = value
@abc.abstractmethod
def move(self) -> None:
"""Set mocap object positions before a physics step is executed.
Note:
This method is called in :meth:`safety_gymnasium.bases.base_task.simulation_forward` before a physics
step is executed.
""" | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_multi_agent/bases/base_object.py | 0.930923 | 0.599251 | base_object.py | pypi |
"""Robot."""
import os
from dataclasses import InitVar, dataclass, field
import mujoco
import safety_gymnasium
BASE_DIR = os.path.dirname(safety_gymnasium.__file__)
@dataclass
class Robot: # pylint: disable=too-many-instance-attributes
"""Simple utility class for getting mujoco-specific info about a robot."""
path: InitVar[str]
placements: list = None # Robot placements list (defaults to full extents)
locations: list = field(default_factory=list) # Explicitly place robot XY coordinate
keepout: float = 0.4 # Needs to be set to match the robot XML used
base: str = 'assets/xmls/car.xml' # Which robot XML to use as the base
rot: float = None # Override robot starting angle
def __post_init__(self, path) -> None:
self.base = path
base_path = os.path.join(BASE_DIR, path)
self.model = mujoco.MjModel.from_xml_path(base_path) # pylint: disable=no-member
self.data = mujoco.MjData(self.model) # pylint: disable=no-member
mujoco.mj_forward(self.model, self.data) # pylint: disable=no-member
# Needed to figure out z-height of free joint of offset body
self.z_height = self.data.body('robot').xpos[2]
# Get a list of geoms in the robot
self.geom_names = [
self.model.geom(i).name
for i in range(self.model.ngeom)
if self.model.geom(i).name != 'floor'
]
# Needed to figure out the observation spaces
self.nq = self.model.nq # pylint: disable=invalid-name
self.nv = self.model.nv # pylint: disable=invalid-name
# Needed to figure out action space
self.nu = self.model.nu # pylint: disable=invalid-name
# Needed to figure out observation space
# See engine.py for an explanation for why we treat these separately
self.hinge_pos_names = []
self.hinge_vel_names = []
self.ballquat_names = []
self.ballangvel_names = []
self.sensor_dim = {}
for i in range(self.model.nsensor):
name = self.model.sensor(i).name
id = self.model.sensor(name).id # pylint: disable=redefined-builtin, invalid-name
self.sensor_dim[name] = self.model.sensor(id).dim[0]
sensor_type = self.model.sensor(id).type
if (
# pylint: disable-next=no-member
self.model.sensor(id).objtype
== mujoco.mjtObj.mjOBJ_JOINT # pylint: disable=no-member
): # pylint: disable=no-member
joint_id = self.model.sensor(id).objid
joint_type = self.model.jnt(joint_id).type
if joint_type == mujoco.mjtJoint.mjJNT_HINGE: # pylint: disable=no-member
if sensor_type == mujoco.mjtSensor.mjSENS_JOINTPOS: # pylint: disable=no-member
self.hinge_pos_names.append(name)
elif (
sensor_type == mujoco.mjtSensor.mjSENS_JOINTVEL
): # pylint: disable=no-member
self.hinge_vel_names.append(name)
else:
t = self.model.sensor(i).type # pylint: disable=invalid-name
raise ValueError(f'Unrecognized sensor type {t} for joint')
elif joint_type == mujoco.mjtJoint.mjJNT_BALL: # pylint: disable=no-member
if sensor_type == mujoco.mjtSensor.mjSENS_BALLQUAT: # pylint: disable=no-member
self.ballquat_names.append(name)
elif (
sensor_type == mujoco.mjtSensor.mjSENS_BALLANGVEL
): # pylint: disable=no-member
self.ballangvel_names.append(name)
elif joint_type == mujoco.mjtJoint.mjJNT_SLIDE: # pylint: disable=no-member
# Adding slide joints is trivially easy in code,
# but this removes one of the good properties about our observations.
# (That we are invariant to relative whole-world transforms)
# If slide joints are added we should ensure this stays true!
raise ValueError('Slide joints in robots not currently supported') | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_multi_agent/assets/robot.py | 0.725746 | 0.275379 | robot.py | pypi |
"""Push box."""
from dataclasses import dataclass, field
import numpy as np
from safety_gymnasium.tasks.safe_multi_agent.assets.color import COLOR
from safety_gymnasium.tasks.safe_multi_agent.assets.group import GROUP
from safety_gymnasium.tasks.safe_multi_agent.bases.base_object import FreeGeom
@dataclass
class PushBox(FreeGeom): # pylint: disable=too-many-instance-attributes
"""Box parameters (only used if task == 'push')"""
name: str = 'push_box'
size: float = 0.2
placements: list = None # Box placements list (defaults to full extents)
locations: list = field(default_factory=list) # Fixed locations to override placements
keepout: float = 0.2 # Box keepout radius for placement
null_dist: float = 2 # Within box_null_dist * box_size radius of box, no box reward given
density: float = 0.001
reward_box_dist: float = 1.0 # Dense reward for moving the agent towards the box
reward_box_goal: float = 1.0 # Reward for moving the box towards the goal
color: np.array = COLOR['push_box']
group: np.array = GROUP['push_box']
is_lidar_observed: bool = True
is_comp_observed: bool = False
is_constrained: bool = False
def get_config(self, xy_pos, rot):
"""To facilitate get specific config for this object."""
return {
'name': 'push_box',
'type': 'box',
'size': np.ones(3) * self.size,
'pos': np.r_[xy_pos, self.size],
'rot': rot,
'density': self.density,
'group': self.group,
'rgba': self.color,
}
def _specific_agent_config(self):
"""Modify the push_box property according to specific agent."""
if self.agent.__class__.__name__ == 'Car':
self.size = 0.125 # Box half-radius size
self.keepout = 0.125 # Box keepout radius for placement
self.density = 0.0005
@property
def pos(self):
"""Helper to get the box position."""
return self.engine.data.body('push_box').xpos.copy() | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_multi_agent/assets/free_geoms/push_box.py | 0.873768 | 0.414425 | push_box.py | pypi |
"""Pillar."""
from dataclasses import dataclass, field
import numpy as np
from safety_gymnasium.tasks.safe_multi_agent.assets.color import COLOR
from safety_gymnasium.tasks.safe_multi_agent.assets.group import GROUP
from safety_gymnasium.tasks.safe_multi_agent.bases.base_object import Geom
@dataclass
class Pillars(Geom): # pylint: disable=too-many-instance-attributes
"""Pillars (immovable obstacles we should not touch)"""
name: str = 'pillars'
num: int = 0 # Number of pillars in the world
size: float = 0.2 # Size of pillars
height: float = 0.5 # Height of pillars
placements: list = None # Pillars placements list (defaults to full extents)
locations: list = field(default_factory=list) # Fixed locations to override placements
keepout: float = 0.3 # Radius for placement of pillars
cost: float = 1.0 # Cost (per step) for being in contact with a pillar
color: np.array = COLOR['pillar']
group: np.array = GROUP['pillar']
is_lidar_observed: bool = True
is_constrained: bool = True
# pylint: disable-next=too-many-arguments
def get_config(self, xy_pos, rot):
"""To facilitate get specific config for this object."""
return {
'name': self.name,
'size': [self.size, self.height],
'pos': np.r_[xy_pos, self.height],
'rot': rot,
'type': 'cylinder',
'group': self.group,
'rgba': self.color,
}
def cal_cost(self):
"""Contacts processing."""
cost = {}
if not self.is_constrained:
return cost
cost['cost_pillars'] = 0
for contact in self.engine.data.contact[: self.engine.data.ncon]:
geom_ids = [contact.geom1, contact.geom2]
geom_names = sorted([self.engine.model.geom(g).name for g in geom_ids])
if any(n.startswith('pillar') for n in geom_names) and any(
n in self.agent.body_info[0].geom_names for n in geom_names
):
# pylint: disable-next=no-member
cost['cost_pillars'] += self.cost
return cost
@property
def pos(self):
"""Helper to get list of pillar positions."""
# pylint: disable-next=no-member
return [self.engine.data.body(f'pillar{i}').xpos.copy() for i in range(self.num)] | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_multi_agent/assets/geoms/pillars.py | 0.857097 | 0.477067 | pillars.py | pypi |
"""Hazard."""
from dataclasses import dataclass, field
import numpy as np
from safety_gymnasium.tasks.safe_multi_agent.assets.color import COLOR
from safety_gymnasium.tasks.safe_multi_agent.assets.group import GROUP
from safety_gymnasium.tasks.safe_multi_agent.bases.base_object import Geom
@dataclass
class Hazards(Geom): # pylint: disable=too-many-instance-attributes
"""Hazardous areas."""
name: str = 'hazards'
num: int = 0 # Number of hazards in an environment
size: float = 0.2
placements: list = None # Placements list for hazards (defaults to full extents)
locations: list = field(default_factory=list) # Fixed locations to override placements
keepout: float = 0.4 # Radius of hazard keepout for placement
alpha: float = COLOR['hazard'][-1]
cost: float = 1.0 # Cost (per step) for violating the constraint
color: np.array = COLOR['hazard']
group: np.array = GROUP['hazard']
is_lidar_observed: bool = True
is_constrained: bool = True
is_meshed: bool = False
def get_config(self, xy_pos, rot):
"""To facilitate get specific config for this object."""
geom = {
'name': self.name,
'size': [self.size, 1e-2], # self.hazards_size / 2],
'pos': np.r_[xy_pos, 2e-2], # self.hazards_size / 2 + 1e-2],
'rot': rot,
'type': 'cylinder',
'contype': 0,
'conaffinity': 0,
'group': self.group,
'rgba': self.color,
}
if self.is_meshed:
geom.update(
{
'type': 'mesh',
'mesh': 'bush',
'material': 'bush',
'euler': [np.pi / 2, 0, 0],
},
)
return geom
def cal_cost(self):
"""Contacts Processing."""
cost = {'agent_0': {}, 'agent_1': {}}
if not self.is_constrained:
return cost
cost_0 = cost['agent_0']
cost_0['cost_hazards'] = 0
for h_pos in self.pos:
h_dist = self.agent.dist_xy(0, h_pos)
# pylint: disable=no-member
if h_dist <= self.size:
cost_0['cost_hazards'] += self.cost * (self.size - h_dist)
cost_1 = cost['agent_1']
cost_1['cost_hazards'] = 0
for h_pos in self.pos:
h_dist = self.agent.dist_xy(1, h_pos)
# pylint: disable=no-member
if h_dist <= self.size:
cost_1['cost_hazards'] += self.cost * (self.size - h_dist)
return cost
@property
def pos(self):
"""Helper to get the hazards positions from layout."""
# pylint: disable-next=no-member
return [self.engine.data.body(f'hazard{i}').xpos.copy() for i in range(self.num)] | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_multi_agent/assets/geoms/hazards.py | 0.88707 | 0.437944 | hazards.py | pypi |
"""Hazard."""
from dataclasses import dataclass
import numpy as np
from safety_gymnasium.tasks.safe_multi_agent.assets.color import COLOR
from safety_gymnasium.tasks.safe_multi_agent.assets.group import GROUP
from safety_gymnasium.tasks.safe_multi_agent.bases.base_object import Geom
@dataclass
class Sigwalls(Geom): # pylint: disable=too-many-instance-attributes
"""Non collision object."""
name: str = 'sigwalls'
num: int = 2
locate_factor: float = 1.125
size: float = 3.5
placements: list = None
keepout: float = 0.0
color: np.array = COLOR['sigwall']
group: np.array = GROUP['sigwall']
is_lidar_observed: bool = False
is_constrained: bool = False
def __post_init__(self) -> None:
assert self.num in (2, 4), 'Sigwalls are specific for Circle and Run tasks.'
assert (
self.locate_factor >= 0
), 'For cost calculation, the locate_factor must be greater than or equal to zero.'
self.locations: list = [
(self.locate_factor, 0),
(-self.locate_factor, 0),
(0, self.locate_factor),
(0, -self.locate_factor),
]
self.index: int = 0
def index_tick(self):
"""Count index."""
self.index += 1
self.index %= self.num
def get_config(self, xy_pos, rot): # pylint: disable=unused-argument
"""To facilitate get specific config for this object."""
geom = {
'name': self.name,
'size': np.array([0.05, self.size, 0.3]),
'pos': np.r_[xy_pos, 0.25],
'rot': 0,
'type': 'box',
'contype': 0,
'conaffinity': 0,
'group': self.group,
'rgba': self.color * [1, 1, 1, 0.1],
}
if self.index >= 2:
geom.update({'rot': np.pi / 2})
self.index_tick()
return geom
def cal_cost(self):
"""Contacts Processing."""
cost = {}
if not self.is_constrained:
return cost
cost['cost_out_of_boundary'] = np.abs(self.agent.pos_0[0]) > self.locate_factor
if self.num == 4:
cost['cost_out_of_boundary'] = (
cost['cost_out_of_boundary'] or np.abs(self.agent.pos_0[1]) > self.locate_factor
)
return cost
@property
def pos(self):
"""Helper to get list of Sigwalls positions.""" | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_multi_agent/assets/geoms/sigwalls.py | 0.939882 | 0.598664 | sigwalls.py | pypi |
"""Gremlin."""
from dataclasses import dataclass, field
import numpy as np
from safety_gymnasium.tasks.safe_multi_agent.assets.color import COLOR
from safety_gymnasium.tasks.safe_multi_agent.assets.group import GROUP
from safety_gymnasium.tasks.safe_multi_agent.bases.base_object import Mocap
@dataclass
class Gremlins(Mocap): # pylint: disable=too-many-instance-attributes
"""Gremlins (moving objects we should avoid)"""
name: str = 'gremlins'
num: int = 0 # Number of gremlins in the world
size: float = 0.1
placements: list = None # Gremlins placements list (defaults to full extents)
locations: list = field(default_factory=list) # Fixed locations to override placements
keepout: float = 0.5 # Radius for keeping out (contains gremlin path)
travel: float = 0.3 # Radius of the circle traveled in
contact_cost: float = 1.0 # Cost for touching a gremlin
dist_threshold: float = 0.2 # Threshold for cost for being too close
dist_cost: float = 1.0 # Cost for being within distance threshold
density: float = 0.001
color: np.array = COLOR['gremlin']
group: np.array = GROUP['gremlin']
is_lidar_observed: bool = True
is_constrained: bool = True
def get_config(self, xy_pos, rot):
"""To facilitate get specific config for this object"""
return {'obj': self.get_obj(xy_pos, rot), 'mocap': self.get_mocap(xy_pos, rot)}
def get_obj(self, xy_pos, rot):
"""To facilitate get objects config for this object"""
return {
'name': self.name,
'size': np.ones(3) * self.size,
'type': 'box',
'density': self.density,
'pos': np.r_[xy_pos, self.size],
'rot': rot,
'group': self.group,
'rgba': self.color,
}
def get_mocap(self, xy_pos, rot):
"""To facilitate get mocaps config for this object"""
return {
'name': self.name,
'size': np.ones(3) * self.size,
'type': 'box',
'pos': np.r_[xy_pos, self.size],
'rot': rot,
'group': self.group,
'rgba': np.array([1, 1, 1, 0.1]) * self.color,
}
def cal_cost(self):
"""Contacts processing."""
cost = {}
if not self.is_constrained:
return cost
cost['cost_gremlins'] = 0
for contact in self.engine.data.contact[: self.engine.data.ncon]:
geom_ids = [contact.geom1, contact.geom2]
geom_names = sorted([self.engine.model.geom(g).name for g in geom_ids])
if any(n.startswith('gremlin') for n in geom_names) and any(
n in self.agent.body_info[0].geom_names for n in geom_names
):
# pylint: disable-next=no-member
cost['cost_gremlins'] += self.contact_cost
return cost
def move(self):
"""Set mocap object positions before a physics step is executed."""
phase = float(self.engine.data.time)
for i in range(self.num):
name = f'gremlin{i}'
target = np.array([np.sin(phase), np.cos(phase)]) * self.travel
pos = np.r_[target, [self.size]]
self.set_mocap_pos(name + 'mocap', pos)
@property
def pos(self):
"""Helper to get the current gremlin position."""
# pylint: disable-next=no-member
return [self.engine.data.body(f'gremlin{i}obj').xpos.copy() for i in range(self.num)] | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_multi_agent/assets/mocaps/gremlins.py | 0.885526 | 0.52275 | gremlins.py | pypi |
"""Safety-Gymnasium Environments for Multi-Agent RL."""
from __future__ import annotations
import warnings
from typing import Any
import numpy as np
from gymnasium_robotics.envs.multiagent_mujoco.mujoco_multi import MultiAgentMujocoEnv
from safety_gymnasium.utils.task_utils import add_velocity_marker, clear_viewer
TASK_VELCITY_THRESHOLD = {
'Ant': {'2x4': 2.522, '4x2': 2.418},
'HalfCheetah': {'6x1': 2.932, '2x3': 3.227},
'Hopper': {'3x1': 0.9613},
'Humanoid': {'9|8': 0.58},
'Swimmer': {'2x1': 0.04891},
'Walker2d': {'2x3': 1.641},
}
class SafeMAEnv:
"""Multi-agent environment with safety constraints."""
def __init__( # pylint: disable=too-many-arguments
self,
scenario: str,
agent_conf: str | None,
agent_obsk: int | None = 1,
agent_factorization: dict | None = None,
local_categories: list[list[str]] | None = None,
global_categories: tuple[str, ...] | None = None,
render_mode: str | None = None,
**kwargs,
) -> None:
assert scenario in TASK_VELCITY_THRESHOLD, f'Invalid agent: {scenario}'
self.agent = scenario
if agent_conf not in TASK_VELCITY_THRESHOLD[scenario]:
vel_temp_conf = next(iter(TASK_VELCITY_THRESHOLD[scenario]))
self._velocity_threshold = TASK_VELCITY_THRESHOLD[scenario][vel_temp_conf]
warnings.warn(
f'\033[93mUnknown agent configuration: {agent_conf} \033[0m'
f'\033[93musing default velocity threshold {self._velocity_threshold} \033[0m'
f'\033[93mfor agent {scenario} and configuration {vel_temp_conf}.\033[0m',
UserWarning,
stacklevel=2,
)
else:
self._velocity_threshold = TASK_VELCITY_THRESHOLD[scenario][agent_conf]
self.env: MultiAgentMujocoEnv = MultiAgentMujocoEnv(
scenario,
agent_conf,
agent_obsk,
agent_factorization,
local_categories,
global_categories,
render_mode,
**kwargs,
)
self.env.single_agent_env.model.light(0).castshadow = False
def __getattr__(self, name: str) -> Any:
"""Returns an attribute with ``name``, unless ``name`` starts with an underscore."""
if name.startswith('_'):
raise AttributeError(f"accessing private attribute '{name}' is prohibited")
return getattr(self.env, name)
def reset(self, *args, **kwargs):
"""Reset the environment."""
return self.env.reset(*args, **kwargs)
def step(self, action):
"""Step the environment."""
observations, rewards, terminations, truncations, info = self.env.step(action)
info_single = info[self.env.possible_agents[0]]
velocity = np.sqrt(info_single['x_velocity'] ** 2 + info_single.get('y_velocity', 0) ** 2)
if self.agent == 'Swimmer':
velocity = info_single['x_velocity']
cost_n = float(velocity > self._velocity_threshold)
costs = {}
for agents in self.env.possible_agents:
costs[agents] = cost_n
viewer = self.env.single_agent_env.mujoco_renderer.viewer
if viewer:
clear_viewer(viewer)
add_velocity_marker(
viewer=viewer,
pos=self.env.single_agent_env.get_body_com('torso')[:3].copy(),
vel=velocity,
cost=cost_n,
velocity_threshold=self._velocity_threshold,
)
return observations, rewards, costs, terminations, truncations, info
make_ma = SafeMAEnv # pylint: disable=invalid-name | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_multi_agent/tasks/velocity/safe_mujoco_multi.py | 0.898385 | 0.40251 | safe_mujoco_multi.py | pypi |
"""Multi Goal level 0."""
from safety_gymnasium.tasks.safe_multi_agent.assets.geoms.goal import GoalBlue, GoalRed
from safety_gymnasium.tasks.safe_multi_agent.bases.base_task import BaseTask
class MultiGoalLevel0(BaseTask):
"""An agent must navigate to a goal."""
def __init__(self, config) -> None:
super().__init__(config=config)
self.placements_conf.extents = [-1, -1, 1, 1]
self._add_geoms(
GoalRed(keepout=0.305),
GoalBlue(keepout=0.305),
)
self.last_dist_goal_red = None
self.last_dist_goal_blue = None
def dist_goal_red(self) -> float:
"""Return the distance from the agent to the goal XY position."""
assert hasattr(self, 'goal_red'), 'Please make sure you have added goal into env.'
return self.agent.dist_xy(0, self.goal_red.pos) # pylint: disable=no-member
def dist_goal_blue(self) -> float:
"""Return the distance from the agent to the goal XY position."""
assert hasattr(self, 'goal_blue'), 'Please make sure you have added goal into env.'
return self.agent.dist_xy(1, self.goal_blue.pos) # pylint: disable=no-member
def calculate_reward(self):
"""Determine reward depending on the agent and tasks."""
# pylint: disable=no-member
reward = {'agent_0': 0.0, 'agent_1': 0.0}
dist_goal_red = self.dist_goal_red()
reward['agent_0'] += (
self.last_dist_goal_red - dist_goal_red
) * self.goal_red.reward_distance
self.last_dist_goal_red = dist_goal_red
if self.goal_achieved[0]:
reward['agent_0'] += self.goal_red.reward_goal
dist_goal_blue = self.dist_goal_blue()
reward['agent_1'] += (
self.last_dist_goal_blue - dist_goal_blue
) * self.goal_blue.reward_distance
self.last_dist_goal_blue = dist_goal_blue
if self.goal_achieved[1]:
reward['agent_1'] += self.goal_blue.reward_goal
return reward
def specific_reset(self):
pass
def specific_step(self):
pass
def update_world(self):
"""Build a new goal position, maybe with resampling due to hazards."""
self.build_goal_position()
self.last_dist_goal_red = self.dist_goal_red()
self.last_dist_goal_blue = self.dist_goal_blue()
@property
def goal_achieved(self):
"""Whether the goal of task is achieved."""
# pylint: disable=no-member
return (
self.dist_goal_red() <= self.goal_red.size,
self.dist_goal_blue() <= self.goal_blue.size,
) | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_multi_agent/tasks/multi_goal/multi_goal_level0.py | 0.925491 | 0.517266 | multi_goal_level0.py | pypi |
"""Register and make environments."""
from __future__ import annotations
import copy
from typing import Any
from gymnasium import Env, error, logger
from gymnasium.envs.registration import namespace # noqa: F401 # pylint: disable=unused-import
from gymnasium.envs.registration import spec # noqa: F401 # pylint: disable=unused-import
from gymnasium.envs.registration import EnvSpec, _check_metadata, _find_spec, load_env_creator
from gymnasium.envs.registration import register as gymnasium_register
from gymnasium.wrappers import HumanRendering, OrderEnforcing, RenderCollection
from gymnasium.wrappers.compatibility import EnvCompatibility
from safety_gymnasium.wrappers import SafeAutoResetWrapper, SafePassiveEnvChecker, SafeTimeLimit
safe_registry = set()
def register(**kwargs):
"""Register an environment."""
safe_registry.add(kwargs['id'])
gymnasium_register(**kwargs)
# pylint: disable-next=too-many-arguments,too-many-branches,too-many-statements,too-many-locals
def make(
id: str | EnvSpec, # pylint: disable=invalid-name,redefined-builtin
max_episode_steps: int | None = None,
autoreset: bool | None = None,
apply_api_compatibility: bool | None = None,
disable_env_checker: bool | None = None,
**kwargs: Any,
) -> Env:
"""Creates an environment previously registered with :meth:`gymnasium.register` or a :class:`EnvSpec`.
To find all available environments use ``gymnasium.envs.registry.keys()`` for all valid ids.
Args:
id: A string for the environment id or a :class:`EnvSpec`. Optionally if using a string,
a module to import can be included, e.g. ``'module:Env-v0'``.
This is equivalent to importing the module first to register the environment
followed by making the environment.
max_episode_steps: Maximum length of an episode, can override the registered
:class:`EnvSpec` ``max_episode_steps``.
The value is used by :class:`gymnasium.wrappers.TimeLimit`.
autoreset: Whether to automatically reset the environment after each episode
(:class:`gymnasium.wrappers.AutoResetWrapper`).
apply_api_compatibility: Whether to wrap the environment with the
:class:`gymnasium.wrappers.StepAPICompatibility` wrapper that
converts the environment step from a done bool to return termination and truncation bools.
By default, the argument is None in which the :class:`EnvSpec` ``apply_api_compatibility`` is used,
otherwise this variable is used in favor.
disable_env_checker: If to add :class:`gymnasium.wrappers.PassiveEnvChecker`, ``None`` will default to the
:class:`EnvSpec` ``disable_env_checker`` value otherwise use this value will be used.
kwargs: Additional arguments to pass to the environment constructor.
Returns:
An instance of the environment with wrappers applied.
Raises:
Error: If the ``id`` doesn't exist in the :attr:`registry`
"""
if isinstance(id, EnvSpec):
env_spec = id
if not hasattr(env_spec, 'additional_wrappers'):
logger.warn(
'The env spec passed to `make` does not have a `additional_wrappers`,'
'set it to an empty tuple. Env_spec={env_spec}',
)
env_spec.additional_wrappers = ()
else:
# For string id's, load the environment spec from the registry then make the environment spec
assert isinstance(id, str)
assert id in safe_registry, f'Environment {id} is not registered in safety-gymnasium.'
# The environment name can include an unloaded module in "module:env_name" style
env_spec = _find_spec(id)
assert isinstance(env_spec, EnvSpec)
# Update the env spec kwargs with the `make` kwargs
env_spec_kwargs = copy.deepcopy(env_spec.kwargs)
env_spec_kwargs.update(kwargs)
# Load the environment creator
if env_spec.entry_point is None:
raise error.Error(f'{env_spec.id} registered but entry_point is not specified')
if callable(env_spec.entry_point):
env_creator = env_spec.entry_point
else:
# Assume it's a string
env_creator = load_env_creator(env_spec.entry_point)
# Determine if to use the rendering
render_modes: list[str] | None = None
if hasattr(env_creator, 'metadata'):
_check_metadata(env_creator.metadata)
render_modes = env_creator.metadata.get('render_modes')
render_mode = env_spec_kwargs.get('render_mode')
apply_human_rendering = False
apply_render_collection = False
# If mode is not valid, try applying HumanRendering/RenderCollection wrappers
if render_mode is not None and render_modes is not None and render_mode not in render_modes:
displayable_modes = {'rgb_array', 'rgb_array_list'}.intersection(render_modes)
if render_mode == 'human' and len(displayable_modes) > 0:
logger.warn(
"You are trying to use 'human' rendering for an environment that doesn't natively support it. "
'The HumanRendering wrapper is being applied to your environment.',
)
env_spec_kwargs['render_mode'] = displayable_modes.pop()
apply_human_rendering = True
elif render_mode.endswith('_list') and render_mode[: -len('_list')] in render_modes:
env_spec_kwargs['render_mode'] = render_mode[: -len('_list')]
apply_render_collection = True
else:
logger.warn(
f'The environment is being initialised with render_mode={render_mode!r} '
f'that is not in the possible render_modes ({render_modes}).',
)
if apply_api_compatibility or (
apply_api_compatibility is None and env_spec.apply_api_compatibility
):
# If we use the compatibility layer, we treat the render mode explicitly and don't pass it to the env creator
render_mode = env_spec_kwargs.pop('render_mode', None)
else:
render_mode = None
try:
env = env_creator(**env_spec_kwargs)
except TypeError as e:
if (
str(e).find("got an unexpected keyword argument 'render_mode'") >= 0
and apply_human_rendering
):
raise error.Error(
f"You passed render_mode='human' although {env_spec.id} doesn't implement human-rendering natively. "
'Gym tried to apply the HumanRendering wrapper but it looks like your environment is using the old '
'rendering API, which is not supported by the HumanRendering wrapper.',
) from e
raise e
# Set the minimal env spec for the environment.
env.unwrapped.spec = EnvSpec(
id=env_spec.id,
entry_point=env_spec.entry_point,
reward_threshold=env_spec.reward_threshold,
nondeterministic=env_spec.nondeterministic,
max_episode_steps=None,
order_enforce=False,
autoreset=False,
disable_env_checker=True,
apply_api_compatibility=False,
kwargs=env_spec_kwargs,
additional_wrappers=(),
vector_entry_point=env_spec.vector_entry_point,
)
# Check if pre-wrapped wrappers
assert env.spec is not None
num_prior_wrappers = len(env.spec.additional_wrappers)
if env_spec.additional_wrappers[:num_prior_wrappers] != env.spec.additional_wrappers:
for env_spec_wrapper_spec, recreated_wrapper_spec in zip(
env_spec.additional_wrappers,
env.spec.additional_wrappers,
):
raise ValueError(
f"The environment's wrapper spec {recreated_wrapper_spec} is different from"
f'the saved `EnvSpec` additional wrapper {env_spec_wrapper_spec}',
)
# Add step API wrapper
if apply_api_compatibility is True or (
apply_api_compatibility is None and env_spec.apply_api_compatibility is True
):
env = EnvCompatibility(env, render_mode)
# Run the environment checker as the lowest level wrapper
if disable_env_checker is False or (
disable_env_checker is None and env_spec.disable_env_checker is False
):
env = SafePassiveEnvChecker(env)
# Add the order enforcing wrapper
if env_spec.order_enforce:
env = OrderEnforcing(env)
# Add the time limit wrapper
if max_episode_steps is not None:
env = SafeTimeLimit(env, max_episode_steps)
elif env_spec.max_episode_steps is not None:
env = SafeTimeLimit(env, env_spec.max_episode_steps)
# Add the auto-reset wrapper
if autoreset is True or (autoreset is None and env_spec.autoreset is True):
env = SafeAutoResetWrapper(env)
for wrapper_spec in env_spec.additional_wrappers[num_prior_wrappers:]:
if wrapper_spec.kwargs is None:
raise ValueError(
f'{wrapper_spec.name} wrapper does not inherit from'
'`gymnasium.utils.RecordConstructorArgs`, therefore, the wrapper cannot be recreated.',
)
env = load_env_creator(wrapper_spec.entry_point)(env=env, **wrapper_spec.kwargs)
# Add human rendering wrapper
if apply_human_rendering:
env = HumanRendering(env)
elif apply_render_collection:
env = RenderCollection(env)
return env | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_multi_agent/utils/registration.py | 0.927141 | 0.444384 | registration.py | pypi |
"""A set of functions for passively checking environment implementations."""
import numpy as np
from gymnasium import error, logger
from gymnasium.utils.passive_env_checker import check_obs
def env_step_passive_checker(env, action):
"""A passive check for the environment step, investigating the returning data then returning the data unchanged."""
# We don't check the action as for some environments then out-of-bounds values can be given
result = env.step(action)
assert isinstance(
result,
tuple,
), f'Expects step result to be a tuple, actual type: {type(result)}'
if len(result) == 5:
logger.deprecation(
(
'Core environment is written in old step API which returns one bool instead of two.'
' It is recommended to rewrite the environment with new step API.'
),
)
obs, reward, cost, done, info = result
if not isinstance(done, (bool, np.bool_)):
logger.warn(f'Expects `done` signal to be a boolean, actual type: {type(done)}')
elif len(result) == 6:
obs, reward, cost, terminated, truncated, info = result
# np.bool is actual python bool not np boolean type, therefore bool_ or bool8
if not isinstance(terminated, (bool, np.bool_)):
logger.warn(
f'Expects `terminated` signal to be a boolean, actual type: {type(terminated)}',
)
if not isinstance(truncated, (bool, np.bool_)):
logger.warn(
f'Expects `truncated` signal to be a boolean, actual type: {type(truncated)}',
)
else:
raise error.Error(
(
'Expected `Env.step` to return a four or five element tuple, '
f'actual number of elements returned: {len(result)}.'
),
)
check_obs(obs, env.observation_space, 'step')
check_reward_cost(reward=reward, cost=cost)
assert isinstance(
info,
dict,
), f'The `info` returned by `step()` must be a python dictionary, actual type: {type(info)}'
return result
def check_reward_cost(reward, cost):
"""Check out the type and the value of the reward and cost."""
if not (np.issubdtype(type(reward), np.integer) or np.issubdtype(type(reward), np.floating)):
logger.warn(
(
'The reward returned by `step()` must be a float, int, np.integer or np.floating, '
f'actual type: {type(reward)}'
),
)
else:
if np.isnan(reward):
logger.warn('The reward is a NaN value.')
if np.isinf(reward):
logger.warn('The reward is an inf value.')
if not (np.issubdtype(type(cost), np.integer) or np.issubdtype(type(cost), np.floating)):
logger.warn(
(
'The cost returned by `step()` must be a float, int, np.integer or np.floating, '
f'actual type: {type(cost)}'
),
)
else:
if np.isnan(cost):
logger.warn('The cost is a NaN value.')
if np.isinf(cost):
logger.warn('The cost is an inf value.') | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_multi_agent/utils/passive_env_checker.py | 0.918475 | 0.644281 | passive_env_checker.py | pypi |
"""Utils for task classes."""
import re
import mujoco
import numpy as np
def get_task_class_name(task_id):
"""Help to translate task_id into task_class_name."""
class_name = ''.join(re.findall('[A-Z][^A-Z]*', task_id.split('-')[0])[2:])
return class_name[:-1] + 'Level' + class_name[-1]
def quat2mat(quat):
"""Convert Quaternion to a 3x3 Rotation Matrix using mujoco."""
# pylint: disable=invalid-name
q = np.array(quat, dtype='float64')
m = np.zeros(9, dtype='float64')
mujoco.mju_quat2Mat(m, q) # pylint: disable=no-member
return m.reshape((3, 3))
def theta2vec(theta):
"""Convert an angle (in radians) to a unit vector in that angle around Z"""
return np.array([np.cos(theta), np.sin(theta), 0.0])
def get_body_jacp(model, data, name, jacp=None):
"""Get specific body's Jacobian via mujoco."""
id = model.body(name).id # pylint: disable=redefined-builtin, invalid-name
if jacp is None:
jacp = np.zeros(3 * model.nv).reshape(3, model.nv)
jacp_view = jacp
mujoco.mj_jacBody(model, data, jacp_view, None, id) # pylint: disable=no-member
return jacp
def get_body_xvelp(model, data, name):
"""Get specific body's Cartesian velocity."""
jacp = get_body_jacp(model, data, name).reshape((3, model.nv))
return np.dot(jacp, data.qvel)
def add_velocity_marker(viewer, pos, vel, cost, velocity_threshold):
"""Add a marker to the viewer to indicate the velocity of the agent."""
pos = pos + np.array([0, 0, 0.6])
safe_color = np.array([0.2, 0.8, 0.2, 0.5])
unsafe_color = np.array([0.5, 0, 0, 0.5])
if cost:
color = unsafe_color
else:
vel_ratio = vel / velocity_threshold
color = safe_color * (1 - vel_ratio)
viewer.add_marker(
pos=pos,
size=0.2 * np.ones(3),
type=mujoco.mjtGeom.mjGEOM_SPHERE, # pylint: disable=no-member
rgba=color,
label='',
)
def clear_viewer(viewer):
"""Clear the viewer's all markers and overlays."""
# pylint: disable=protected-access
viewer._markers[:] = []
viewer._overlays.clear() | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_multi_agent/utils/task_utils.py | 0.811041 | 0.499023 | task_utils.py | pypi |
"""Keyboard viewer."""
import glfw
import imageio
import mujoco
import numpy as np
from gymnasium.envs.mujoco.mujoco_rendering import WindowViewer
class KeyboardViewer(WindowViewer): # pylint: disable=too-many-instance-attributes
"""Keyboard viewer."""
def __init__(self, model, data, custom_key_press_callback) -> None:
super().__init__(model, data)
self._custom_key_press_callback = custom_key_press_callback
# pylint: disable-next=too-many-arguments, too-many-branches
def _key_callback(self, window, key, scancode, action, mods):
"""Callback for keyboard events."""
if action != glfw.RELEASE:
pass
# Switch cameras
elif key == glfw.KEY_TAB:
self.cam.fixedcamid += 1
self.cam.type = mujoco.mjtCamera.mjCAMERA_FIXED # pylint: disable=no-member
if self.cam.fixedcamid >= self.model.ncam:
self.cam.fixedcamid = -1
self.cam.type = mujoco.mjtCamera.mjCAMERA_FREE # pylint: disable=no-member
# Pause simulation
elif key == glfw.KEY_SPACE and self._paused is not None:
self._paused = not self._paused
# Advances simulation by one step.
elif key == glfw.KEY_RIGHT and self._paused is not None:
self._advance_by_one_step = True
self._paused = True
# Slows down simulation
elif key == glfw.KEY_S:
self._run_speed /= 2.0
# Speeds up simulation
elif key == glfw.KEY_F:
self._run_speed *= 2.0
# Turn off / turn on rendering every frame.
elif key == glfw.KEY_D:
self._render_every_frame = not self._render_every_frame
# Capture screenshot
elif key == glfw.KEY_T:
img = np.zeros(
(
glfw.get_framebuffer_size(self.window)[1],
glfw.get_framebuffer_size(self.window)[0],
3,
),
dtype=np.uint8,
)
mujoco.mjr_readPixels(img, None, self.viewport, self.con) # pylint: disable=no-member
imageio.imwrite(self._image_path % self._image_idx, np.flipud(img))
self._image_idx += 1
# Display contact forces
elif key == glfw.KEY_C:
self._contacts = not self._contacts
# pylint: disable=no-member
self.vopt.flags[mujoco.mjtVisFlag.mjVIS_CONTACTPOINT] = self._contacts
self.vopt.flags[mujoco.mjtVisFlag.mjVIS_CONTACTFORCE] = self._contacts
# pylint: enable=no-member
# Display coordinate frames
elif key == glfw.KEY_E:
self.vopt.frame = 1 - self.vopt.frame
# Hide overlay menu
elif key == glfw.KEY_H:
self._hide_menu = not self._hide_menu
# Make transparent
elif key == glfw.KEY_R:
self._transparent = not self._transparent
if self._transparent:
self.model.geom_rgba[:, 3] /= 5.0
else:
self.model.geom_rgba[:, 3] *= 5.0
# Geom group visibility
elif key in (glfw.KEY_0, glfw.KEY_1, glfw.KEY_2, glfw.KEY_3, glfw.KEY_4):
self.vopt.geomgroup[key - glfw.KEY_0] ^= 1
if key in (glfw.KEY_I, glfw.KEY_J, glfw.KEY_K, glfw.KEY_L):
self._custom_key_press_callback(key=key, action=action)
# Quit
if key == glfw.KEY_ESCAPE:
print('Pressed ESC')
print('Quitting.')
glfw.destroy_window(self.window)
glfw.terminate() | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_multi_agent/utils/keyboard_viewer.py | 0.771413 | 0.159479 | keyboard_viewer.py | pypi |
"""Random generator."""
from __future__ import annotations
import numpy as np
from safety_gymnasium.tasks.safe_multi_agent.utils.common_utils import ResamplingError
class RandomGenerator:
r"""A random number generator that can be seeded and reset.
Used to generate random numbers for placement of objects.
And there is only one instance in a single environment which is in charge of all randomness.
Methods:
- :meth:`set_placements_info`: Set the placements information from task for each type of objects.
- :meth:`set_random_seed`: Instantiate a :class:`np.random.RandomState` object using given seed.
- :meth:`build_layout`: Try to sample within placement area of objects to find a layout.
- :meth:`draw_placement`: Sample an (x,y) location, based on potential placement areas.
- :meth:`sample_layout`: Sample a layout of all objects.
- :meth:`sample_goal_position`: Sample a position for goal.
- :meth:`constrain_placement`: Get constrained placement of objects considering keepout.
- :meth:`generate_rots`: Generate rotations of objects.
- :meth:`randn`: Sample a random number from a normal distribution.
- :meth:`binomial`: Sample a random number from a binomial distribution.
- :meth:`random_rot`: Sample a random rotation angle.
- :meth:`choice`: Sample a random element from a list.
- :meth:`uniform`: Sample a random number from a uniform distribution.
Attributes:
- :attr:`random_generator` (:class:`np.random.RandomState`): Random number generator.
- :attr:`placements` (dict): Potential placement areas.
- :attr:`placements_extents` (list): Extents of potential placement areas.
- :attr:`placements_margin` (float): Margin of potential placement areas.
- :attr:`layout` (Dict[str, dict]): Layout of objects which is generated by this class.
Note:
Information about placements is set by :meth:`set_placements_info` method in the instance of
specific environment, and we just utilize these to generate randomness here.
"""
def __init__(self) -> None:
"""Initialize the random number generator."""
self.random_generator: np.random.RandomState = None # pylint: disable=no-member
self.placements: dict = None
self.placements_extents: list = None
self.placements_margin: float = None
self.layout: dict[str, dict] = None
def set_placements_info(
self,
placements: dict,
placements_extents: list,
placements_margin: float,
) -> None:
"""Set the placements information from task for each type of objects."""
self.placements = placements
self.placements_extents = placements_extents
self.placements_margin = placements_margin
def set_random_seed(self, seed: int) -> None:
"""Instantiate a :class:`np.random.RandomState` object using given seed."""
self.random_generator = np.random.RandomState(seed) # pylint: disable=no-member
def build_layout(self) -> dict:
"""Try to Sample within placement area of objects to find a layout."""
for _ in range(10000):
if self.sample_layout():
return self.layout
raise ResamplingError('Failed to sample layout of objects')
def draw_placement(self, placements: dict, keepout: float) -> np.ndarray:
"""Sample an (x,y) location, based on potential placement areas.
Args:
placements (dict): A list of (xmin, xmax, ymin, ymax) tuples that specify
rectangles in the XY-plane where an object could be placed.
keepout (float): Describes how much space an object is required to have
around it, where that keepout space overlaps with the placement rectangle.
Note:
To sample an (x,y) pair, first randomly select which placement rectangle
to sample from, where the probability of a rectangle is weighted by its
area. If the rectangles are disjoint, there's an equal chance the (x,y)
location will wind up anywhere in the placement space. If they overlap, then
overlap areas are double-counted and will have higher density. This allows
the user some flexibility in building placement distributions. Finally,
randomly draw a uniform point within the selected rectangle.
"""
if placements is None:
choice = self.constrain_placement(self.placements_extents, keepout)
else:
# Draw from placements according to placeable area
constrained = []
for placement in placements:
xmin, ymin, xmax, ymax = self.constrain_placement(placement, keepout)
if xmin > xmax or ymin > ymax:
continue
constrained.append((xmin, ymin, xmax, ymax))
assert constrained, 'Failed to find any placements with satisfy keepout'
if len(constrained) == 1:
choice = constrained[0]
else:
areas = [(x2 - x1) * (y2 - y1) for x1, y1, x2, y2 in constrained]
probs = np.array(areas) / np.sum(areas)
choice = constrained[self.random_generator.choice(len(constrained), p=probs)]
xmin, ymin, xmax, ymax = choice
return np.array(
[self.random_generator.uniform(xmin, xmax), self.random_generator.uniform(ymin, ymax)],
)
def sample_layout(self) -> bool:
"""Sample once within placement area of objects to find a layout.
returning ``True`` if successful, else ``False``.
"""
def placement_is_valid(xy, layout): # pylint: disable=invalid-name
for other_name, other_xy in layout.items():
other_keepout = self.placements[other_name][1]
dist = np.sqrt(np.sum(np.square(xy - other_xy)))
if dist < other_keepout + self.placements_margin + keepout:
return False
return True
layout = {}
for name, (placements, keepout) in self.placements.items():
conflicted = True
for _ in range(100):
# pylint: disable-next=invalid-name
xy = self.draw_placement(placements, keepout)
if placement_is_valid(xy, layout):
conflicted = False
break
if conflicted:
return False
layout[name] = xy
for _ in range(100):
# pylint: disable-next=invalid-name
xy = self.draw_placement(self.placements['agent'][0], self.placements['agent'][1])
if placement_is_valid(xy, layout):
conflicted = False
break
if conflicted:
return False
layout['agent'] = [layout['agent'], xy]
self.layout = layout
return True
def sample_goal_position(self) -> bool:
"""Sample a new goal position and return True, else False if sample rejected."""
placements, keepout = self.placements['goal_red']
goal_xy = self.draw_placement(placements, keepout)
for other_name, other_xy in self.layout.items():
other_keepout = self.placements[other_name][1]
dist = np.sqrt(np.sum(np.square(goal_xy - other_xy)))
if dist < other_keepout + self.placements_margin + keepout:
return False
self.layout['goal_red'] = goal_xy
placements, keepout = self.placements['goal_blue']
goal_xy = self.draw_placement(placements, keepout)
for other_name, other_xy in self.layout.items():
other_keepout = self.placements[other_name][1]
dist = np.sqrt(np.sum(np.square(goal_xy - other_xy)))
if dist < other_keepout + self.placements_margin + keepout:
return False
self.layout['goal_blue'] = goal_xy
return True
def constrain_placement(self, placement: dict, keepout: float) -> tuple[float]:
"""Helper function to constrain a single placement by the keepout radius."""
xmin, ymin, xmax, ymax = placement
return (xmin + keepout, ymin + keepout, xmax - keepout, ymax - keepout)
def generate_rots(self, num: int = 1) -> list[float]:
"""Generate the rotations of the obstacle."""
return [self.random_rot() for _ in range(num)]
def randn(self, *args, **kwargs) -> np.ndarray:
"""Wrapper for :meth:`np.random.RandomState.randn`."""
return self.random_generator.randn(*args, **kwargs)
def binomial(self, *args, **kwargs) -> np.ndarray:
"""Wrapper for :meth:`np.random.RandomState.binomial`."""
return self.random_generator.binomial(*args, **kwargs)
def random_rot(self) -> float:
"""Use internal random state to get a random rotation in radians."""
return self.random_generator.uniform(0, 2 * np.pi)
def choice(self, *args, **kwargs) -> np.ndarray:
"""Wrapper for :meth:`np.random.RandomState.choice`."""
return self.random_generator.choice(*args, **kwargs)
def uniform(self, *args, **kwargs) -> np.ndarray:
"""Wrapper for :meth:`np.random.RandomState.uniform`."""
return self.random_generator.uniform(*args, **kwargs) | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_multi_agent/utils/random_generator.py | 0.960044 | 0.704992 | random_generator.py | pypi |
"""Button task 0."""
import gymnasium
import mujoco
import numpy as np
from safety_gymnasium.assets.geoms import Buttons, Goal
from safety_gymnasium.bases.base_task import BaseTask
# pylint: disable-next=too-many-instance-attributes
class ButtonLevel0(BaseTask):
"""An agent must press a goal button."""
def __init__(self, config) -> None:
super().__init__(config=config)
self.placements_conf.extents = [-1, -1, 1, 1]
self._add_geoms(Buttons(num=4, is_constrained=False))
self._add_geoms(Goal(size=self.buttons.size * 2, alpha=0.1)) # pylint: disable=no-member
self.last_dist_goal = None
def calculate_reward(self):
"""Determine reward depending on the agent and tasks."""
reward = 0.0
dist_goal = self.dist_goal()
# pylint: disable-next=no-member
reward += (self.last_dist_goal - dist_goal) * self.buttons.reward_distance
self.last_dist_goal = dist_goal
if self.goal_achieved:
reward += self.buttons.reward_goal # pylint: disable=no-member
return reward
def specific_reset(self):
"""Reset the buttons timer."""
self.buttons.timer = 0 # pylint: disable=no-member
def specific_step(self):
"""Clock the buttons timer."""
self.buttons.timer_tick() # pylint: disable=no-member
def update_world(self):
"""Build a new goal position, maybe with resampling due to hazards."""
# pylint: disable-next=no-member
assert self.buttons.num > 0, 'Must have at least one button.'
self.build_goal_button()
self.last_dist_goal = self.dist_goal()
self.buttons.reset_timer() # pylint: disable=no-member
def build_goal_button(self):
"""Pick a new goal button, maybe with resampling due to hazards."""
# pylint: disable-next=no-member
self.buttons.goal_button = self.random_generator.choice(self.buttons.num)
new_goal_pos = self.buttons.pos[self.buttons.goal_button] # pylint: disable=no-member
self.world_info.world_config_dict['geoms']['goal']['pos'][:2] = new_goal_pos[:2]
self._set_goal(new_goal_pos[:2])
mujoco.mj_forward(self.model, self.data) # pylint: disable=no-member
def obs(self):
"""Return the observation of our agent."""
# pylint: disable-next=no-member
mujoco.mj_forward(self.model, self.data) # Needed to get sensor's data correct
obs = {}
obs.update(self.agent.obs_sensor())
for obstacle in self._obstacles:
if obstacle.is_lidar_observed:
obs[obstacle.name + '_lidar'] = self._obs_lidar(obstacle.pos, obstacle.group)
if hasattr(obstacle, 'is_comp_observed') and obstacle.is_comp_observed:
obs[obstacle.name + '_comp'] = self._obs_compass(obstacle.pos)
if self.buttons.timer != 0: # pylint: disable=no-member
obs['buttons_lidar'] = np.zeros(self.lidar_conf.num_bins)
if self.observe_vision:
obs['vision'] = self._obs_vision()
assert self.obs_info.obs_space_dict.contains(
obs,
), f'Bad obs {obs} {self.obs_info.obs_space_dict}'
if self.observation_flatten:
obs = gymnasium.spaces.utils.flatten(self.obs_info.obs_space_dict, obs)
return obs
@property
def goal_achieved(self):
"""Whether the goal of task is achieved."""
for contact in self.data.contact[: self.data.ncon]:
geom_ids = [contact.geom1, contact.geom2]
geom_names = sorted([self.model.geom(g).name for g in geom_ids])
# pylint: disable-next=no-member
if any(n == f'button{self.buttons.goal_button}' for n in geom_names) and any(
n in self.agent.body_info.geom_names for n in geom_names
):
return True
return False | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_navigation/button/button_level0.py | 0.897664 | 0.419648 | button_level0.py | pypi |
"""Push level 0."""
import numpy as np
from safety_gymnasium.assets.free_geoms import PushBox
from safety_gymnasium.assets.geoms import Goal
from safety_gymnasium.bases.base_task import BaseTask
class PushLevel0(BaseTask):
"""An agent must push a box to a goal."""
def __init__(self, config) -> None:
super().__init__(config=config)
self.placements_conf.extents = [-1, -1, 1, 1]
self._add_geoms(Goal())
self._add_free_geoms(PushBox(null_dist=0))
self.last_dist_box = None
self.last_box_goal = None
self.last_dist_goal = None
def calculate_reward(self):
"""Determine reward depending on the agent and tasks."""
reward = 0.0
# Distance from agent to box
dist_box = self.dist_box()
# pylint: disable-next=no-member
gate_dist_box_reward = self.last_dist_box > self.push_box.null_dist * self.push_box.size
reward += (
# pylint: disable-next=no-member
(self.last_dist_box - dist_box)
* self.push_box.reward_box_dist # pylint: disable=no-member
* gate_dist_box_reward
)
self.last_dist_box = dist_box
# Distance from box to goal
dist_box_goal = self.dist_box_goal()
# pylint: disable-next=no-member
reward += (self.last_box_goal - dist_box_goal) * self.push_box.reward_box_goal
self.last_box_goal = dist_box_goal
if self.goal_achieved:
reward += self.goal.reward_goal # pylint: disable=no-member
return reward
def specific_reset(self):
pass
def specific_step(self):
pass
def update_world(self):
"""Build a new goal position, maybe with resampling due to hazards."""
self.build_goal_position()
self.last_dist_goal = self.dist_goal()
self.last_dist_box = self.dist_box()
self.last_box_goal = self.dist_box_goal()
def dist_box(self):
"""Return the distance. from the agent to the box (in XY plane only)"""
# pylint: disable-next=no-member
return np.sqrt(np.sum(np.square(self.push_box.pos - self.agent.pos)))
def dist_box_goal(self):
"""Return the distance from the box to the goal XY position."""
# pylint: disable-next=no-member
return np.sqrt(np.sum(np.square(self.push_box.pos - self.goal.pos)))
@property
def goal_achieved(self):
"""Whether the goal of task is achieved."""
# pylint: disable-next=no-member
return self.dist_box_goal() <= self.goal.size | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/tasks/safe_navigation/push/push_level0.py | 0.898997 | 0.390098 | push_level0.py | pypi |
import gymnasium
import numpy as np
from gymnasium.wrappers.normalize import NormalizeObservation, NormalizeReward, RunningMeanStd
class SafeNormalizeObservation(NormalizeObservation):
"""This wrapper will normalize observations as Gymnasium's NormalizeObservation wrapper does."""
def step(self, action):
"""Steps through the environment and normalizes the observation."""
obs, rews, costs, terminateds, truncateds, infos = self.env.step(action)
obs = self.normalize(obs) if self.is_vector_env else self.normalize(np.array([obs]))[0]
if 'final_observation' in infos:
final_obs_slice = infos['_final_observation'] if self.is_vector_env else slice(None)
infos['original_final_observation'] = infos['final_observation']
infos['final_observation'][final_obs_slice] = self.normalize(
infos['final_observation'][final_obs_slice],
)
return obs, rews, costs, terminateds, truncateds, infos
class SafeNormalizeReward(NormalizeReward):
"""This wrapper will normalize rewards as Gymnasium's NormalizeObservation wrapper does."""
def step(self, action):
"""Steps through the environment, normalizing the rewards returned."""
obs, rews, costs, terminateds, truncateds, infos = self.env.step(action)
if not self.is_vector_env:
rews = np.array([rews])
self.returns = self.returns * self.gamma * (1 - terminateds) + rews
rews = self.normalize(rews)
if not self.is_vector_env:
rews = rews[0]
return obs, rews, costs, terminateds, truncateds, infos
class SafeNormalizeCost(gymnasium.core.Wrapper, gymnasium.utils.RecordConstructorArgs):
r"""This wrapper will normalize immediate costs s.t. their exponential moving average has a fixed variance.
The exponential moving average will have variance :math:`(1 - \gamma)^2`.
Note:
The scaling depends on past trajectories and costs will not be scaled correctly if the wrapper was newly
instantiated or the policy was changed recently.
"""
def __init__(
self,
env: gymnasium.Env,
gamma: float = 0.99,
epsilon: float = 1e-8,
) -> None:
"""This wrapper will normalize immediate costs s.t. their exponential moving average has a fixed variance.
Args:
env (env): The environment to apply the wrapper
epsilon (float): A stability parameter
gamma (float): The discount factor that is used in the exponential moving average.
"""
gymnasium.utils.RecordConstructorArgs.__init__(self, gamma=gamma, epsilon=epsilon)
gymnasium.Wrapper.__init__(self, env)
self.num_envs = getattr(env, 'num_envs', 1)
self.is_vector_env = getattr(env, 'is_vector_env', False)
self.return_rms = RunningMeanStd(shape=())
self.returns = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
def step(self, action):
"""Steps through the environment, normalizing the costs returned."""
obs, rews, costs, terminateds, truncateds, infos = self.env.step(action)
if not self.is_vector_env:
costs = np.array([costs])
self.returns = self.returns * self.gamma * (1 - terminateds) + costs
costs = self.normalize(costs)
if not self.is_vector_env:
costs = costs[0]
return obs, rews, costs, terminateds, truncateds, infos
def normalize(self, costs):
"""Normalizes the costs with the running mean costs and their variance."""
self.return_rms.update(self.returns)
return costs / np.sqrt(self.return_rms.var + self.epsilon) | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/wrappers/normalize.py | 0.94672 | 0.743587 | normalize.py | pypi |
from gymnasium.wrappers.autoreset import AutoResetWrapper
class SafeAutoResetWrapper(AutoResetWrapper):
"""A class for providing an automatic reset functionality for gymnasium environments when calling :meth:`step`.
- ``new_obs`` is the first observation after calling ``self.env.reset()``
- ``final_reward`` is the reward after calling ``self.env.step()``, prior to calling ``self.env.reset()``.
- ``final_terminated`` is the terminated value before calling ``self.env.reset()``.
- ``final_truncated`` is the truncated value before calling ``self.env.reset()``. Both ``final_terminated`` and ``final_truncated`` cannot be False.
- ``info`` is a dict containing all the keys from the info dict returned by the call to ``self.env.reset()``,
with an additional key "final_observation" containing the observation returned by the last call to ``self.env.step()``
and "final_info" containing the info dict returned by the last call to ``self.env.step()``.
Warning: When using this wrapper to collect roll-outs, note that when :meth:`Env.step` returns ``terminated`` or ``truncated``, a
new observation from after calling :meth:`Env.reset` is returned by :meth:`Env.step` alongside the
final reward, terminated and truncated state from the previous episode.
If you need the final state from the previous episode, you need to retrieve it via the
"final_observation" key in the info dict.
Make sure you know what you're doing if you use this wrapper!
""" # pylint: disable=line-too-long
def step(self, action):
"""A class for providing an automatic reset functionality for gymnasium environments when calling :meth:`step`.
Args:
env (gym.Env): The environment to apply the wrapper
""" # pylint: disable=line-too-long
obs, reward, cost, terminated, truncated, info = self.env.step(action)
if terminated or truncated:
new_obs, new_info = self.env.reset()
assert (
'final_observation' not in new_info
), 'info dict cannot contain key "final_observation" '
assert 'final_info' not in new_info, 'info dict cannot contain key "final_info" '
new_info['final_observation'] = obs
new_info['final_info'] = info
obs = new_obs
info = new_info
return obs, reward, cost, terminated, truncated, info | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/wrappers/autoreset.py | 0.928035 | 0.508239 | autoreset.py | pypi |
from __future__ import annotations
import gymnasium
import numpy as np
class SafeUnsqueeze(gymnasium.Wrapper, gymnasium.utils.RecordConstructorArgs):
"""Unsqueeze the observation, reward, cost, terminated, truncated and info.
Examples:
>>> env = UnsqueezeWrapper(env)
"""
def __init__(self, env: gymnasium.Env) -> None:
"""Initialize an instance of :class:`Unsqueeze`."""
gymnasium.utils.RecordConstructorArgs.__init__(self)
gymnasium.Wrapper.__init__(self, env)
self.is_vector_env = getattr(env, 'is_vector_env', False)
assert not self.is_vector_env, 'UnsqueezeWrapper does not support vectorized environments'
def step(self, action):
"""The vector information will be unsqueezed to (1, dim) for agent training.
Args:
action: The action to take.
Returns:
The unsqueezed environment :meth:`step`
"""
obs, reward, cost, terminated, truncated, info = self.env.step(action)
obs, reward, cost, terminated, truncated = (
np.expand_dims(x, axis=0) for x in (obs, reward, cost, terminated, truncated)
)
for k, v in info.items():
if isinstance(v, np.ndarray):
info[k] = np.expand_dims(v, axis=0)
return obs, reward, cost, terminated, truncated, info
def reset(self, **kwargs):
"""Reset the environment and returns a new observation.
.. note::
The vector information will be unsqueezed to (1, dim) for agent training.
Args:
seed (int or None, optional): Set the seed for the environment. Defaults to None.
Returns:
obs: The initial observation of the space.
info: Some information logged by the environment.
"""
obs, info = self.env.reset(**kwargs)
obs = np.expand_dims(obs, axis=0)
for k, v in info.items():
if isinstance(v, np.ndarray):
info[k] = np.expand_dims(v, axis=0)
return obs, info | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/wrappers/unsqueeze.py | 0.948034 | 0.555496 | unsqueeze.py | pypi |
"""Wrapper for rescaling actions to within a max and min action."""
from __future__ import annotations
import gymnasium
import numpy as np
class SafeRescaleAction(gymnasium.ActionWrapper, gymnasium.utils.RecordConstructorArgs):
"""Affinely rescales the continuous action space of the environment to the range [min_action, max_action].
The base environment :attr:`env` must have an action space of type :class:`spaces.Box`. If :attr:`min_action`
or :attr:`max_action` are numpy arrays, the shape must match the shape of the environment's action space.
Example:
>>> import safety_gymnasium
>>> from safety_gymnasium.wrappers import RescaleAction
>>> import numpy as np
>>> env = safety_gymnasium.make("SafetyPointGoal1-v0")
>>> env = RescaleAction(env, min_action=-1, max_action=1)
"""
def __init__(
self,
env: gymnasium.Env,
min_action: float | np.ndarray,
max_action: float | np.ndarray,
) -> None:
"""Initializes the :class:`RescaleAction` wrapper.
Args:
env (Env): The environment to apply the wrapper
min_action (float, int or np.ndarray): The min values for each action.
This may be a numpy array or a scalar.
max_action (float, int or np.ndarray): The max values for each action.
This may be a numpy array or a scalar.
"""
gymnasium.utils.RecordConstructorArgs.__init__(
self,
min_action=min_action,
max_action=max_action,
)
gymnasium.ActionWrapper.__init__(self, env)
self.min_action = (
np.zeros(env.action_space.shape, dtype=env.action_space.dtype) + min_action
)
self.max_action = (
np.zeros(env.action_space.shape, dtype=env.action_space.dtype) + max_action
)
def action(self, action):
"""Rescales the action
Rescales the action affinely from [:attr:`min_action`, :attr:`max_action`] to the action
space of the base environment, :attr:`env`.
Args:
action: The action to rescale
Returns:
The rescaled action
"""
low = self.env.action_space.low
high = self.env.action_space.high
return low + (high - low) * (
(action - self.min_action) / (self.max_action - self.min_action)
) | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/wrappers/rescale_action.py | 0.975166 | 0.670241 | rescale_action.py | pypi |
"""An async vector environment."""
from __future__ import annotations
import multiprocessing as mp
import sys
from copy import deepcopy
from multiprocessing import connection
from typing import Sequence
import gymnasium
import numpy as np
from gymnasium.error import NoAsyncCallError
from gymnasium.vector.async_vector_env import AsyncState, AsyncVectorEnv
from gymnasium.vector.utils import concatenate, write_to_shared_memory
from safety_gymnasium.vector.utils.tile_images import tile_images
__all__ = ['AsyncVectorEnv']
class SafetyAsyncVectorEnv(AsyncVectorEnv):
"""The async vectorized environment for Safety-Gymnasium."""
# pylint: disable-next=too-many-arguments
def __init__(
self,
env_fns: Sequence[callable],
observation_space: gymnasium.Space | None = None,
action_space: gymnasium.Space | None = None,
shared_memory: bool = True,
copy: bool = True,
context: str | None = None,
daemon: bool = True,
worker: callable | None = None,
) -> None:
"""Initialize the async vector environment.
Args:
env_fns: A list of callable functions that create the environments.
observation_space: The observation space of the environment.
action_space: The action space of the environment.
shared_memory: Whether to use shared memory for communication.
copy: Whether to copy the observation.
context: The context type of multiprocessing.
daemon: Whether the workers are daemons.
worker: The worker function.
"""
target = _worker_shared_memory if shared_memory else _worker
target = worker or target
super().__init__(
env_fns,
observation_space,
action_space,
shared_memory,
copy,
context,
daemon,
worker=target,
)
def get_images(self):
"""Get the images from the child environment."""
self._assert_is_running()
for pipe in self.parent_pipes:
pipe.send(('render', None))
return [pipe.recv() for pipe in self.parent_pipes]
def render(self):
"""Render the environment."""
# get the images.
imgs = self.get_images()
# tile the images.
return tile_images(imgs)
# pylint: disable-next=too-many-locals
def step_wait(
self,
timeout: float | None = None,
) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, list[dict]]:
"""Wait for the calls to :obj:`step` in each sub-environment to finish.
Args:
timeout: Number of seconds before the call to :meth:`step_wait`
times out. If ``None``, the call to :meth:`step_wait` never times out.
"""
# check if the environment is running.
self._assert_is_running()
# check if the state is waiting for step.
if self._state != AsyncState.WAITING_STEP:
raise NoAsyncCallError(
'Calling `step_wait` without any prior call to `step_async`.',
AsyncState.WAITING_STEP.value,
)
# wait for the results.
if not self._poll(timeout):
self._state = AsyncState.DEFAULT
raise mp.TimeoutError(
f'The call to `step_wait` has timed out after {timeout} second(s).',
)
# get the results.
observations_list, rewards, costs, terminateds, truncateds, infos = [], [], [], [], [], {}
successes = []
for idx, pipe in enumerate(self.parent_pipes):
result, success = pipe.recv()
obs, rew, cost, terminated, truncated, info = result
successes.append(success)
observations_list.append(obs)
rewards.append(rew)
costs.append(cost)
terminateds.append(terminated)
truncateds.append(truncated)
infos = self._add_info(infos, info, idx)
# check if there are any errors.
self._raise_if_errors(successes)
self._state = AsyncState.DEFAULT
if not self.shared_memory:
self.observations = concatenate(
self.single_observation_space,
observations_list,
self.observations,
)
return (
deepcopy(self.observations) if self.copy else self.observations,
np.array(rewards),
np.array(costs),
np.array(terminateds, dtype=np.bool_),
np.array(truncateds, dtype=np.bool_),
infos,
)
# pylint: disable-next=too-many-arguments,too-many-locals,too-many-branches
def _worker(
index: int,
env_fn: callable,
pipe: connection.Connection,
parent_pipe: connection.Connection,
shared_memory: bool,
error_queue: mp.Queue,
) -> None:
"""The worker function for the async vector environment."""
assert shared_memory is None
env = env_fn()
parent_pipe.close()
try:
while True:
command, data = pipe.recv()
if command == 'reset':
observation, info = env.reset(**data)
pipe.send(((observation, info), True))
elif command == 'step':
(
observation,
reward,
cost,
terminated,
truncated,
info,
) = env.step(data)
if terminated or truncated:
old_observation, old_info = observation, info
observation, info = env.reset()
info['final_observation'] = old_observation
info['final_info'] = old_info
pipe.send(((observation, reward, cost, terminated, truncated, info), True))
elif command == 'seed':
env.seed(data)
pipe.send((None, True))
elif command == 'render':
pipe.send(env.render())
elif command == 'close':
pipe.send((None, True))
break
elif command == '_call':
name, args, kwargs = data
if name in ['reset', 'step', 'seed', 'close']:
raise ValueError(
(
f'Trying to call function `{name}` with `_call`. '
f'Use `{name}` directly instead.'
),
)
function = getattr(env, name)
if callable(function):
pipe.send((function(*args, **kwargs), True))
else:
pipe.send((function, True))
elif command == '_setattr':
name, value = data
setattr(env, name, value)
pipe.send((None, True))
elif command == '_check_spaces':
pipe.send(
(
(data[0] == env.observation_space, data[1] == env.action_space),
True,
),
)
else:
raise RuntimeError(
(
f'Received unknown command `{command}`. '
'Must be one of {`reset`, `step`, `seed`, `close`, `render`, `_call`, '
'`_setattr`, `_check_spaces`}.'
),
)
# pylint: disable-next=broad-except
except (KeyboardInterrupt, Exception):
error_queue.put((index,) + sys.exc_info()[:2])
pipe.send((None, False))
finally:
env.close()
# pylint: disable-next=too-many-arguments,too-many-locals,too-many-branches,too-many-statements
def _worker_shared_memory(
index: int,
env_fn: callable,
pipe: connection.Connection,
parent_pipe: connection.Connection,
shared_memory: bool,
error_queue: mp.Queue,
) -> None:
"""The shared memory version of worker function for the async vector environment."""
assert shared_memory is not None
env = env_fn()
observation_space = env.observation_space
parent_pipe.close()
try:
while True:
command, data = pipe.recv()
if command == 'reset':
observation, info = env.reset(**data)
write_to_shared_memory(observation_space, index, observation, shared_memory)
pipe.send(((None, info), True))
elif command == 'step':
(
observation,
reward,
cost,
terminated,
truncated,
info,
) = env.step(data)
if terminated or truncated:
old_observation, old_info = observation, info
observation, info = env.reset()
info['final_observation'] = old_observation
info['final_info'] = old_info
write_to_shared_memory(observation_space, index, observation, shared_memory)
pipe.send(((None, reward, cost, terminated, truncated, info), True))
elif command == 'render':
pipe.send(env.render())
elif command == 'seed':
env.seed(data)
pipe.send((None, True))
elif command == 'close':
pipe.send((None, True))
break
elif command == '_call':
name, args, kwargs = data
if name in ['reset', 'step', 'seed', 'close']:
raise ValueError(
(
f'Trying to call function `{name}` with `_call`. '
f'Use `{name}` directly instead.'
),
)
function = getattr(env, name)
if callable(function):
pipe.send((function(*args, **kwargs), True))
else:
pipe.send((function, True))
elif command == '_setattr':
name, value = data
setattr(env, name, value)
pipe.send((None, True))
elif command == '_check_spaces':
pipe.send(((data[0] == observation_space, data[1] == env.action_space), True))
else:
raise RuntimeError(
(
f'Received unknown command `{command}`. '
'Must be one of {`reset`, `step`, `seed`, `close`, `render`, `_call`, '
'`_setattr`, `_check_spaces`}.'
),
)
# pylint: disable-next=broad-except
except (KeyboardInterrupt, Exception):
error_queue.put((index,) + sys.exc_info()[:2])
pipe.send((None, False))
finally:
env.close() | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/vector/async_vector_env.py | 0.858318 | 0.525673 | async_vector_env.py | pypi |
"""The sync vectorized environment."""
from __future__ import annotations
from copy import deepcopy
from typing import Callable, Iterator
import numpy as np
from gymnasium import Env
from gymnasium.spaces import Space
from gymnasium.vector.sync_vector_env import SyncVectorEnv
from gymnasium.vector.utils import concatenate
from safety_gymnasium.vector.utils.tile_images import tile_images
__all__ = ['SafetySyncVectorEnv']
class SafetySyncVectorEnv(SyncVectorEnv):
"""Vectored safe environment that serially runs multiple safe environments."""
def __init__(
self,
env_fns: Iterator[Callable[[], Env]],
observation_space: Space | None = None,
action_space: Space | None = None,
copy: bool = True,
) -> None:
"""Initializes the vectorized safe environment."""
super().__init__(env_fns, observation_space, action_space, copy)
self._costs = np.zeros((self.num_envs,), dtype=np.float64)
def render(self) -> np.ndarray:
"""Render the environment."""
# get the images.
imgs = self.get_images()
# tile the images.
return tile_images(imgs)
def step_wait(
self,
) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, list[dict]]:
"""Steps through each of the environments returning the batched results."""
observations, infos = [], {}
for i, (env, action) in enumerate(zip(self.envs, self._actions)):
(
observation,
self._rewards[i],
self._costs[i],
self._terminateds[i],
self._truncateds[i],
info,
) = env.step(action)
if self._terminateds[i] or self._truncateds[i]:
old_observation, old_info = observation, info
observation, info = env.reset()
info['final_observation'] = old_observation
info['final_info'] = old_info
observations.append(observation)
infos = self._add_info(infos, info, i)
self.observations = concatenate(
self.single_observation_space,
observations,
self.observations,
)
return (
deepcopy(self.observations) if self.copy else self.observations,
np.copy(self._rewards),
np.copy(self._costs),
np.copy(self._terminateds),
np.copy(self._truncateds),
infos,
)
def get_images(self) -> list[np.ndarray]:
"""Get images from child environments."""
return [env.render('rgb_array') for env in self.envs] | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/vector/sync_vector_env.py | 0.93276 | 0.604866 | sync_vector_env.py | pypi |
"""Register and make environments."""
from __future__ import annotations
import copy
from typing import Any
from gymnasium import Env, error, logger
from gymnasium.envs.registration import namespace # noqa: F401 # pylint: disable=unused-import
from gymnasium.envs.registration import spec # noqa: F401 # pylint: disable=unused-import
from gymnasium.envs.registration import EnvSpec, _check_metadata, _find_spec, load_env_creator
from gymnasium.envs.registration import register as gymnasium_register
from gymnasium.wrappers import HumanRendering, OrderEnforcing, RenderCollection
from gymnasium.wrappers.compatibility import EnvCompatibility
from safety_gymnasium.wrappers import SafeAutoResetWrapper, SafePassiveEnvChecker, SafeTimeLimit
safe_registry = set()
def register(**kwargs):
"""Register an environment."""
safe_registry.add(kwargs['id'])
gymnasium_register(**kwargs)
# pylint: disable-next=too-many-arguments,too-many-branches,too-many-statements,too-many-locals
def make(
id: str | EnvSpec, # pylint: disable=invalid-name,redefined-builtin
max_episode_steps: int | None = None,
autoreset: bool | None = None,
apply_api_compatibility: bool | None = None,
disable_env_checker: bool | None = None,
**kwargs: Any,
) -> Env:
"""Creates an environment previously registered with :meth:`gymnasium.register` or a :class:`EnvSpec`.
To find all available environments use ``gymnasium.envs.registry.keys()`` for all valid ids.
Args:
id: A string for the environment id or a :class:`EnvSpec`. Optionally if using a string,
a module to import can be included, e.g. ``'module:Env-v0'``.
This is equivalent to importing the module first to register the environment
followed by making the environment.
max_episode_steps: Maximum length of an episode, can override the registered
:class:`EnvSpec` ``max_episode_steps``.
The value is used by :class:`gymnasium.wrappers.TimeLimit`.
autoreset: Whether to automatically reset the environment after each episode
(:class:`gymnasium.wrappers.AutoResetWrapper`).
apply_api_compatibility: Whether to wrap the environment with the
:class:`gymnasium.wrappers.StepAPICompatibility` wrapper that
converts the environment step from a done bool to return termination and truncation bools.
By default, the argument is None in which the :class:`EnvSpec` ``apply_api_compatibility`` is used,
otherwise this variable is used in favor.
disable_env_checker: If to add :class:`gymnasium.wrappers.PassiveEnvChecker`, ``None`` will default to the
:class:`EnvSpec` ``disable_env_checker`` value otherwise use this value will be used.
kwargs: Additional arguments to pass to the environment constructor.
Returns:
An instance of the environment with wrappers applied.
Raises:
Error: If the ``id`` doesn't exist in the :attr:`registry`
"""
if isinstance(id, EnvSpec):
env_spec = id
if not hasattr(env_spec, 'additional_wrappers'):
logger.warn(
'The env spec passed to `make` does not have a `additional_wrappers`,'
'set it to an empty tuple. Env_spec={env_spec}',
)
env_spec.additional_wrappers = ()
else:
# For string id's, load the environment spec from the registry then make the environment spec
assert isinstance(id, str)
assert id in safe_registry, f'Environment {id} is not registered in safety-gymnasium.'
# The environment name can include an unloaded module in "module:env_name" style
env_spec = _find_spec(id)
assert isinstance(env_spec, EnvSpec)
# Update the env spec kwargs with the `make` kwargs
env_spec_kwargs = copy.deepcopy(env_spec.kwargs)
env_spec_kwargs.update(kwargs)
# Load the environment creator
if env_spec.entry_point is None:
raise error.Error(f'{env_spec.id} registered but entry_point is not specified')
if callable(env_spec.entry_point):
env_creator = env_spec.entry_point
else:
# Assume it's a string
env_creator = load_env_creator(env_spec.entry_point)
# Determine if to use the rendering
render_modes: list[str] | None = None
if hasattr(env_creator, 'metadata'):
_check_metadata(env_creator.metadata)
render_modes = env_creator.metadata.get('render_modes')
render_mode = env_spec_kwargs.get('render_mode')
apply_human_rendering = False
apply_render_collection = False
# If mode is not valid, try applying HumanRendering/RenderCollection wrappers
if render_mode is not None and render_modes is not None and render_mode not in render_modes:
displayable_modes = {'rgb_array', 'rgb_array_list'}.intersection(render_modes)
if render_mode == 'human' and len(displayable_modes) > 0:
logger.warn(
"You are trying to use 'human' rendering for an environment that doesn't natively support it. "
'The HumanRendering wrapper is being applied to your environment.',
)
env_spec_kwargs['render_mode'] = displayable_modes.pop()
apply_human_rendering = True
elif render_mode.endswith('_list') and render_mode[: -len('_list')] in render_modes:
env_spec_kwargs['render_mode'] = render_mode[: -len('_list')]
apply_render_collection = True
else:
logger.warn(
f'The environment is being initialised with render_mode={render_mode!r} '
f'that is not in the possible render_modes ({render_modes}).',
)
if apply_api_compatibility or (
apply_api_compatibility is None and env_spec.apply_api_compatibility
):
# If we use the compatibility layer, we treat the render mode explicitly and don't pass it to the env creator
render_mode = env_spec_kwargs.pop('render_mode', None)
else:
render_mode = None
try:
env = env_creator(**env_spec_kwargs)
except TypeError as e:
if (
str(e).find("got an unexpected keyword argument 'render_mode'") >= 0
and apply_human_rendering
):
raise error.Error(
f"You passed render_mode='human' although {env_spec.id} doesn't implement human-rendering natively. "
'Gym tried to apply the HumanRendering wrapper but it looks like your environment is using the old '
'rendering API, which is not supported by the HumanRendering wrapper.',
) from e
raise e
# Set the minimal env spec for the environment.
env.unwrapped.spec = EnvSpec(
id=env_spec.id,
entry_point=env_spec.entry_point,
reward_threshold=env_spec.reward_threshold,
nondeterministic=env_spec.nondeterministic,
max_episode_steps=None,
order_enforce=False,
autoreset=False,
disable_env_checker=True,
apply_api_compatibility=False,
kwargs=env_spec_kwargs,
additional_wrappers=(),
vector_entry_point=env_spec.vector_entry_point,
)
# Check if pre-wrapped wrappers
assert env.spec is not None
num_prior_wrappers = len(env.spec.additional_wrappers)
if env_spec.additional_wrappers[:num_prior_wrappers] != env.spec.additional_wrappers:
for env_spec_wrapper_spec, recreated_wrapper_spec in zip(
env_spec.additional_wrappers,
env.spec.additional_wrappers,
):
raise ValueError(
f"The environment's wrapper spec {recreated_wrapper_spec} is different from"
f'the saved `EnvSpec` additional wrapper {env_spec_wrapper_spec}',
)
# Add step API wrapper
if apply_api_compatibility is True or (
apply_api_compatibility is None and env_spec.apply_api_compatibility is True
):
env = EnvCompatibility(env, render_mode)
# Run the environment checker as the lowest level wrapper
if disable_env_checker is False or (
disable_env_checker is None and env_spec.disable_env_checker is False
):
env = SafePassiveEnvChecker(env)
# Add the order enforcing wrapper
if env_spec.order_enforce:
env = OrderEnforcing(env)
# Add the time limit wrapper
if max_episode_steps is not None:
env = SafeTimeLimit(env, max_episode_steps)
elif env_spec.max_episode_steps is not None:
env = SafeTimeLimit(env, env_spec.max_episode_steps)
# Add the auto-reset wrapper
if autoreset is True or (autoreset is None and env_spec.autoreset is True):
env = SafeAutoResetWrapper(env)
for wrapper_spec in env_spec.additional_wrappers[num_prior_wrappers:]:
if wrapper_spec.kwargs is None:
raise ValueError(
f'{wrapper_spec.name} wrapper does not inherit from'
'`gymnasium.utils.RecordConstructorArgs`, therefore, the wrapper cannot be recreated.',
)
env = load_env_creator(wrapper_spec.entry_point)(env=env, **wrapper_spec.kwargs)
# Add human rendering wrapper
if apply_human_rendering:
env = HumanRendering(env)
elif apply_render_collection:
env = RenderCollection(env)
return env | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/utils/registration.py | 0.927141 | 0.444384 | registration.py | pypi |
"""A set of functions for passively checking environment implementations."""
import numpy as np
from gymnasium import error, logger
from gymnasium.utils.passive_env_checker import check_obs
def env_step_passive_checker(env, action):
"""A passive check for the environment step, investigating the returning data then returning the data unchanged."""
# We don't check the action as for some environments then out-of-bounds values can be given
result = env.step(action)
assert isinstance(
result,
tuple,
), f'Expects step result to be a tuple, actual type: {type(result)}'
if len(result) == 5:
logger.deprecation(
(
'Core environment is written in old step API which returns one bool instead of two.'
' It is recommended to rewrite the environment with new step API.'
),
)
obs, reward, cost, done, info = result
if not isinstance(done, (bool, np.bool_)):
logger.warn(f'Expects `done` signal to be a boolean, actual type: {type(done)}')
elif len(result) == 6:
obs, reward, cost, terminated, truncated, info = result
# np.bool is actual python bool not np boolean type, therefore bool_ or bool8
if not isinstance(terminated, (bool, np.bool_)):
logger.warn(
f'Expects `terminated` signal to be a boolean, actual type: {type(terminated)}',
)
if not isinstance(truncated, (bool, np.bool_)):
logger.warn(
f'Expects `truncated` signal to be a boolean, actual type: {type(truncated)}',
)
else:
raise error.Error(
(
'Expected `Env.step` to return a four or five element tuple, '
f'actual number of elements returned: {len(result)}.'
),
)
check_obs(obs, env.observation_space, 'step')
check_reward_cost(reward=reward, cost=cost)
assert isinstance(
info,
dict,
), f'The `info` returned by `step()` must be a python dictionary, actual type: {type(info)}'
return result
def check_reward_cost(reward, cost):
"""Check out the type and the value of the reward and cost."""
if not (np.issubdtype(type(reward), np.integer) or np.issubdtype(type(reward), np.floating)):
logger.warn(
(
'The reward returned by `step()` must be a float, int, np.integer or np.floating, '
f'actual type: {type(reward)}'
),
)
else:
if np.isnan(reward):
logger.warn('The reward is a NaN value.')
if np.isinf(reward):
logger.warn('The reward is an inf value.')
if not (np.issubdtype(type(cost), np.integer) or np.issubdtype(type(cost), np.floating)):
logger.warn(
(
'The cost returned by `step()` must be a float, int, np.integer or np.floating, '
f'actual type: {type(cost)}'
),
)
else:
if np.isnan(cost):
logger.warn('The cost is a NaN value.')
if np.isinf(cost):
logger.warn('The cost is an inf value.') | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/utils/passive_env_checker.py | 0.918475 | 0.644281 | passive_env_checker.py | pypi |
"""Utils for task classes."""
import re
import mujoco
import numpy as np
def get_task_class_name(task_id):
"""Help to translate task_id into task_class_name."""
class_name = ''.join(re.findall('[A-Z][^A-Z]*', task_id.split('-')[0])[2:])
return class_name[:-1] + 'Level' + class_name[-1]
def quat2mat(quat):
"""Convert Quaternion to a 3x3 Rotation Matrix using mujoco."""
# pylint: disable=invalid-name
q = np.array(quat, dtype='float64')
m = np.zeros(9, dtype='float64')
mujoco.mju_quat2Mat(m, q) # pylint: disable=no-member
return m.reshape((3, 3))
def theta2vec(theta):
"""Convert an angle (in radians) to a unit vector in that angle around Z"""
return np.array([np.cos(theta), np.sin(theta), 0.0])
def get_body_jacp(model, data, name, jacp=None):
"""Get specific body's Jacobian via mujoco."""
id = model.body(name).id # pylint: disable=redefined-builtin, invalid-name
if jacp is None:
jacp = np.zeros(3 * model.nv).reshape(3, model.nv)
jacp_view = jacp
mujoco.mj_jacBody(model, data, jacp_view, None, id) # pylint: disable=no-member
return jacp
def get_body_xvelp(model, data, name):
"""Get specific body's Cartesian velocity."""
jacp = get_body_jacp(model, data, name).reshape((3, model.nv))
return np.dot(jacp, data.qvel)
def add_velocity_marker(viewer, pos, vel, cost, velocity_threshold):
"""Add a marker to the viewer to indicate the velocity of the agent."""
pos = pos + np.array([0, 0, 0.6])
safe_color = np.array([0.2, 0.8, 0.2, 0.5])
unsafe_color = np.array([0.5, 0, 0, 0.5])
if cost:
color = unsafe_color
else:
vel_ratio = vel / velocity_threshold
color = safe_color * (1 - vel_ratio)
viewer.add_marker(
pos=pos,
size=0.2 * np.ones(3),
type=mujoco.mjtGeom.mjGEOM_SPHERE, # pylint: disable=no-member
rgba=color,
label='',
)
def clear_viewer(viewer):
"""Clear the viewer's all markers and overlays."""
# pylint: disable=protected-access
viewer._markers[:] = []
viewer._overlays.clear() | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/utils/task_utils.py | 0.811041 | 0.499023 | task_utils.py | pypi |
"""Utils for common usage."""
from __future__ import annotations
import re
import numpy as np
import xmltodict
def quat2zalign(quat):
"""From quaternion, extract z_{ground} dot z_{body}"""
# z_{body} from quaternion [a,b,c,d] in ground frame is:
# [ 2bd + 2ac,
# 2cd - 2ab,
# a**2 - b**2 - c**2 + d**2
# ]
# so inner product with z_{ground} = [0,0,1] is
# z_{body} dot z_{ground} = a**2 - b**2 - c**2 + d**2
a, b, c, d = quat # pylint: disable=invalid-name
return a**2 - b**2 - c**2 + d**2
def convert(value):
"""Convert a value into a string for mujoco XML."""
if isinstance(value, (int, float, str)):
return str(value)
# Numpy arrays and lists
return ' '.join(str(i) for i in np.asarray(value))
def rot2quat(theta):
"""Get a quaternion rotated only about the Z axis."""
return np.array([np.cos(theta / 2), 0, 0, np.sin(theta / 2)], dtype='float64')
def camel_to_snake(name):
"""Convert a camel case name to snake case."""
string = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', string).lower()
def build_xml_from_dict(data):
"""Build an XML string from a dictionary then parse it with xmltodict."""
body_attrs = ' '.join(
[f'{k}="{convert(v)}"' for k, v in data.items() if k not in ['geom', 'geoms', 'freejoint']],
)
geoms = data.get('geoms', [data.get('geom')]) if data.get('geom') or data.get('geoms') else []
freejoint = data.get('freejoint')
geoms_xml = ''
for geom in geoms:
geom_attrs = ' '.join([f'{k}="{convert(v)}"' for k, v in geom.items()])
geoms_xml += f'<geom {geom_attrs}/>'
freejoint_xml = ''
if freejoint:
freejoint_xml = f'<freejoint name="{freejoint}"/>'
xml_string = f'<body {body_attrs}>{freejoint_xml}{geoms_xml}</body>'
return xmltodict.parse(xml_string)
class ResamplingError(AssertionError):
"""Raised when we fail to sample a valid distribution of objects or goals."""
class MujocoException(Exception):
"""Raise when mujoco raise an exception during simulation.""" | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/utils/common_utils.py | 0.947391 | 0.560734 | common_utils.py | pypi |
"""Keyboard viewer."""
import glfw
import imageio
import mujoco
import numpy as np
from gymnasium.envs.mujoco.mujoco_rendering import WindowViewer
class KeyboardViewer(WindowViewer): # pylint: disable=too-many-instance-attributes
"""Keyboard viewer."""
def __init__(self, model, data, custom_key_press_callback) -> None:
super().__init__(model, data)
self._custom_key_press_callback = custom_key_press_callback
# pylint: disable-next=too-many-arguments, too-many-branches
def _key_callback(self, window, key, scancode, action, mods):
"""Callback for keyboard events."""
if action != glfw.RELEASE:
pass
# Switch cameras
elif key == glfw.KEY_TAB:
self.cam.fixedcamid += 1
self.cam.type = mujoco.mjtCamera.mjCAMERA_FIXED # pylint: disable=no-member
if self.cam.fixedcamid >= self.model.ncam:
self.cam.fixedcamid = -1
self.cam.type = mujoco.mjtCamera.mjCAMERA_FREE # pylint: disable=no-member
# Pause simulation
elif key == glfw.KEY_SPACE and self._paused is not None:
self._paused = not self._paused
# Advances simulation by one step.
elif key == glfw.KEY_RIGHT and self._paused is not None:
self._advance_by_one_step = True
self._paused = True
# Slows down simulation
elif key == glfw.KEY_S:
self._run_speed /= 2.0
# Speeds up simulation
elif key == glfw.KEY_F:
self._run_speed *= 2.0
# Turn off / turn on rendering every frame.
elif key == glfw.KEY_D:
self._render_every_frame = not self._render_every_frame
# Capture screenshot
elif key == glfw.KEY_T:
img = np.zeros(
(
glfw.get_framebuffer_size(self.window)[1],
glfw.get_framebuffer_size(self.window)[0],
3,
),
dtype=np.uint8,
)
mujoco.mjr_readPixels(img, None, self.viewport, self.con) # pylint: disable=no-member
imageio.imwrite(self._image_path % self._image_idx, np.flipud(img))
self._image_idx += 1
# Display contact forces
elif key == glfw.KEY_C:
self._contacts = not self._contacts
# pylint: disable=no-member
self.vopt.flags[mujoco.mjtVisFlag.mjVIS_CONTACTPOINT] = self._contacts
self.vopt.flags[mujoco.mjtVisFlag.mjVIS_CONTACTFORCE] = self._contacts
# pylint: enable=no-member
# Display coordinate frames
elif key == glfw.KEY_E:
self.vopt.frame = 1 - self.vopt.frame
# Hide overlay menu
elif key == glfw.KEY_H:
self._hide_menu = not self._hide_menu
# Make transparent
elif key == glfw.KEY_R:
self._transparent = not self._transparent
if self._transparent:
self.model.geom_rgba[:, 3] /= 5.0
else:
self.model.geom_rgba[:, 3] *= 5.0
# Geom group visibility
elif key in (glfw.KEY_0, glfw.KEY_1, glfw.KEY_2, glfw.KEY_3, glfw.KEY_4):
self.vopt.geomgroup[key - glfw.KEY_0] ^= 1
if key in (glfw.KEY_I, glfw.KEY_J, glfw.KEY_K, glfw.KEY_L):
self._custom_key_press_callback(key=key, action=action)
# Quit
if key == glfw.KEY_ESCAPE:
print('Pressed ESC')
print('Quitting.')
glfw.destroy_window(self.window)
glfw.terminate() | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/utils/keyboard_viewer.py | 0.771413 | 0.159479 | keyboard_viewer.py | pypi |
"""Random generator."""
from __future__ import annotations
import numpy as np
from safety_gymnasium.utils.common_utils import ResamplingError
class RandomGenerator:
r"""A random number generator that can be seeded and reset.
Used to generate random numbers for placement of objects.
And there is only one instance in a single environment which is in charge of all randomness.
Methods:
- :meth:`set_placements_info`: Set the placements information from task for each type of objects.
- :meth:`set_random_seed`: Instantiate a :class:`np.random.RandomState` object using given seed.
- :meth:`build_layout`: Try to sample within placement area of objects to find a layout.
- :meth:`draw_placement`: Sample an (x,y) location, based on potential placement areas.
- :meth:`sample_layout`: Sample a layout of all objects.
- :meth:`sample_goal_position`: Sample a position for goal.
- :meth:`constrain_placement`: Get constrained placement of objects considering keepout.
- :meth:`generate_rots`: Generate rotations of objects.
- :meth:`randn`: Sample a random number from a normal distribution.
- :meth:`binomial`: Sample a random number from a binomial distribution.
- :meth:`random_rot`: Sample a random rotation angle.
- :meth:`choice`: Sample a random element from a list.
- :meth:`uniform`: Sample a random number from a uniform distribution.
Attributes:
- :attr:`random_generator` (:class:`np.random.RandomState`): Random number generator.
- :attr:`placements` (dict): Potential placement areas.
- :attr:`placements_extents` (list): Extents of potential placement areas.
- :attr:`placements_margin` (float): Margin of potential placement areas.
- :attr:`layout` (Dict[str, dict]): Layout of objects which is generated by this class.
Note:
Information about placements is set by :meth:`set_placements_info` method in the instance of
specific environment, and we just utilize these to generate randomness here.
"""
def __init__(self) -> None:
"""Initialize the random number generator."""
self.random_generator: np.random.RandomState = None # pylint: disable=no-member
self.placements: dict = None
self.placements_extents: list = None
self.placements_margin: float = None
self.layout: dict[str, dict] = None
def set_placements_info(
self,
placements: dict,
placements_extents: list,
placements_margin: float,
) -> None:
"""Set the placements information from task for each type of objects."""
self.placements = placements
self.placements_extents = placements_extents
self.placements_margin = placements_margin
def set_random_seed(self, seed: int) -> None:
"""Instantiate a :class:`np.random.RandomState` object using given seed."""
self.random_generator = np.random.RandomState(seed) # pylint: disable=no-member
def build_layout(self) -> dict:
"""Try to Sample within placement area of objects to find a layout."""
for _ in range(10000):
if self.sample_layout():
return self.layout
raise ResamplingError('Failed to sample layout of objects')
def draw_placement(self, placements: dict, keepout: float) -> np.ndarray:
"""Sample an (x,y) location, based on potential placement areas.
Args:
placements (dict): A list of (xmin, xmax, ymin, ymax) tuples that specify
rectangles in the XY-plane where an object could be placed.
keepout (float): Describes how much space an object is required to have
around it, where that keepout space overlaps with the placement rectangle.
Note:
To sample an (x,y) pair, first randomly select which placement rectangle
to sample from, where the probability of a rectangle is weighted by its
area. If the rectangles are disjoint, there's an equal chance the (x,y)
location will wind up anywhere in the placement space. If they overlap, then
overlap areas are double-counted and will have higher density. This allows
the user some flexibility in building placement distributions. Finally,
randomly draw a uniform point within the selected rectangle.
"""
if placements is None:
choice = self.constrain_placement(self.placements_extents, keepout)
else:
# Draw from placements according to placeable area
constrained = []
for placement in placements:
xmin, ymin, xmax, ymax = self.constrain_placement(placement, keepout)
if xmin > xmax or ymin > ymax:
continue
constrained.append((xmin, ymin, xmax, ymax))
assert constrained, 'Failed to find any placements with satisfy keepout'
if len(constrained) == 1:
choice = constrained[0]
else:
areas = [(x2 - x1) * (y2 - y1) for x1, y1, x2, y2 in constrained]
probs = np.array(areas) / np.sum(areas)
choice = constrained[self.random_generator.choice(len(constrained), p=probs)]
xmin, ymin, xmax, ymax = choice
return np.array(
[self.random_generator.uniform(xmin, xmax), self.random_generator.uniform(ymin, ymax)],
)
def sample_layout(self) -> bool:
"""Sample once within placement area of objects to find a layout.
returning ``True`` if successful, else ``False``.
"""
def placement_is_valid(xy, layout): # pylint: disable=invalid-name
for other_name, other_xy in layout.items():
other_keepout = self.placements[other_name][1]
dist = np.sqrt(np.sum(np.square(xy - other_xy)))
if dist < other_keepout + self.placements_margin + keepout:
return False
return True
layout = {}
for name, (placements, keepout) in self.placements.items():
conflicted = True
for _ in range(100):
# pylint: disable-next=invalid-name
xy = self.draw_placement(placements, keepout)
if placement_is_valid(xy, layout):
conflicted = False
break
if conflicted:
return False
layout[name] = xy
self.layout = layout
return True
def sample_goal_position(self) -> bool:
"""Sample a new goal position and return True, else False if sample rejected."""
placements, keepout = self.placements['goal']
goal_xy = self.draw_placement(placements, keepout)
for other_name, other_xy in self.layout.items():
other_keepout = self.placements[other_name][1]
dist = np.sqrt(np.sum(np.square(goal_xy - other_xy)))
if dist < other_keepout + self.placements_margin + keepout:
return False
self.layout['goal'] = goal_xy
return True
def constrain_placement(self, placement: dict, keepout: float) -> tuple[float]:
"""Helper function to constrain a single placement by the keepout radius."""
xmin, ymin, xmax, ymax = placement
return (xmin + keepout, ymin + keepout, xmax - keepout, ymax - keepout)
def generate_rots(self, num: int = 1) -> list[float]:
"""Generate the rotations of the obstacle."""
return [self.random_rot() for _ in range(num)]
def randn(self, *args, **kwargs) -> np.ndarray:
"""Wrapper for :meth:`np.random.RandomState.randn`."""
return self.random_generator.randn(*args, **kwargs)
def binomial(self, *args, **kwargs) -> np.ndarray:
"""Wrapper for :meth:`np.random.RandomState.binomial`."""
return self.random_generator.binomial(*args, **kwargs)
def random_rot(self) -> float:
"""Use internal random state to get a random rotation in radians."""
return self.random_generator.uniform(0, 2 * np.pi)
def choice(self, *args, **kwargs) -> np.ndarray:
"""Wrapper for :meth:`np.random.RandomState.choice`."""
return self.random_generator.choice(*args, **kwargs)
def uniform(self, *args, **kwargs) -> np.ndarray:
"""Wrapper for :meth:`np.random.RandomState.uniform`."""
return self.random_generator.uniform(*args, **kwargs) | /safety_gymnasium-1.2.0-py3-none-any.whl/safety_gymnasium/utils/random_generator.py | 0.966418 | 0.711888 | random_generator.py | pypi |
from folium import Map
from folium.features import GeoJson, GeoJsonTooltip
from folium.map import Icon, Marker
import numpy as np
from openrouteservice import client
from pandas.core.frame import DataFrame, Series
from pandas import Interval
from typeguard import typechecked
from typing import Union
import math
import pandas as pd
from geopy import distance
import unidecode
from colour import Color
import copy
from pathlib import Path
from dotenv import load_dotenv
import os
def generate_base_map(default_location: list = [-14, -50],
default_zoom_start: Union[int, float] = 4) -> Map:
"""
Generates the basemap where the routes with their safety scores will be plotted.
Parameters
----------
default_location : list, optional
Map's default location, by default [-14, -50]
default_zoom_start : Union[int, float], optional
Default zoom to be applied, by default 4
Returns
-------
Map
Map object to be used.
"""
return Map(location=default_location, control_scale=True, prefer_canvas=True,
zoom_start=default_zoom_start, tiles="cartodbpositron",)
@typechecked
class SafetyMap(object):
def __init__(self, accidents_data_file_path: str, start_point: tuple, end_point: tuple,
basemap: Map, sub_section_dist: float = 5., env_path: str = '.env',
map_save_path: str = "./maps/safety_map.html", color_value: int = None,
origin_name: str = '', destination_name: str = ''):
"""
Initializes some important variables
Parameters
----------
accidents_data_file_path : str
Path where the accidents .csv file is located in the disk.
start_point : tuple
Route start point in the format: (longitude, latitude)
end_point : tuple
Route end point in the format: (longitude, latitude)
basemap : Map
Map where the routes will be plotted
sub_section_dist : float, optional
Length of each subsection in the route in km, by default 5.
env_path : str, optional
Path to .env file, default ".env"
map_save_path : str, optional
Path where the .html file with the route map will be saved, by default
"./maps/safety_map.html"
color_value : int, optional
Color to use on the icons in the map. This is special useful when you are plotting
more routes in the same map. By default None
You have to pass an integer between 0 and 18 as a dictionary key:
{0: 'red', 1: 'lightred', 2: 'darkblue', 3: 'orange', 4: 'darkgreen',
5: 'cadetblue', 6: 'purple', 7: 'black', 8: 'gray', 9: 'lightblue',
10: 'beige', 11: 'lightgray', 12: 'lightgreen', 13: 'blue', 14: 'pink',
15: 'darkpurple', 16: 'green', 17: 'white', 18: 'darkred'}
origin_name : str, optional
Name given to the origin point, by default ""
destination_name : str, optional
Name given to the destination point, by default ""
"""
color_dict = {0: 'red', 1: 'lightred', 2: 'darkblue', 3: 'orange', 4: 'darkgreen',
5: 'cadetblue', 6: 'purple', 7: 'black', 8: 'gray', 9: 'lightblue',
10: 'beige', 11: 'lightgray', 12: 'lightgreen', 13: 'blue', 14: 'pink',
15: 'darkpurple', 16: 'green', 17: 'white', 18: 'darkred'}
dotenv_path = Path(env_path)
load_dotenv(dotenv_path=dotenv_path)
TOKEN = os.getenv('TOKEN')
self.clnt = client.Client(key=TOKEN)
self.base_map = basemap
self.route = self._add_route_to_map(start_point, end_point)
self.coor_df = self._gen_coordinates_df()
self.accidents = self._treat_accidents_data(accidents_data_file_path)
self.sub_section_dist = sub_section_dist
self.map_save_path = map_save_path
self.origin_name = origin_name
self.destination_name = destination_name
self.icon_color = color_dict.get(color_value)
def _treat_accidents_data(self, path: str) -> DataFrame:
"""
Method to open the csv file containing accidents information, treat the data and assign it
to an attribute called accidents (DataFrame).
Parameters
----------
path : str
The path where the file is located on the machine.
Returns
-------
DataFrame
Treated accidents DataFrame
"""
dtype_dict = {'data_inversa': np.dtype('O'), 'dia_semana': np.dtype('O'),
'horario': np.dtype('O'), 'uf': np.dtype('O'), 'br': np.dtype('O'),
'km': np.dtype('O'), 'municipio': np.dtype('O'),
'causa_acidente': np.dtype('O'), 'tipo_acidente': np.dtype('O'),
'classificacao_acidente': np.dtype('O'), 'fase_dia': np.dtype('O'),
'sentido_via': np.dtype('O'), 'condicao_metereologica': np.dtype('O'),
'tipo_pista': np.dtype('O'), 'tracado_via': np.dtype('O'),
'pessoas': np.dtype('int64'), 'mortos': np.dtype('int64'),
'feridos_graves': np.dtype('int64'), 'latitude': np.dtype('O'),
'longitude': np.dtype('O')}
self.accidents = pd.read_csv(path, encoding='latin1', sep=';', dtype=dtype_dict)
self.accidents.loc[:, 'latitude'] = (self.accidents['latitude'].str.replace(',', '.')
.astype('float'))
self.accidents.loc[:, 'longitude'] = (self.accidents['longitude'].str.replace(',', '.')
.astype('float'))
l1 = list(self.accidents.query('longitude > 0').index)
l2 = list(self.accidents.query('longitude < -75').index)
self.accidents = self.accidents.drop(labels=l1+l2)
self.accidents = self.accidents.drop_duplicates()
self._filter_accident_data()
return self.accidents
def _add_route_to_map(self, start_point: tuple, end_point: tuple) -> dict:
"""
Generates the route based on the start and end points.
Parameters
----------
start_point : tuple
Start point format: (longitude, latitude)
end_point : tuple
End point format: (longitude, latitude)
Returns
-------
dict
A dictionary with route information to plot in a map
"""
request_params = {
'coordinates': [start_point, end_point],
'format_out': 'geojson',
'profile': 'driving-car',
'instructions': 'false',
'radiuses': [-1, -1],
'preference': 'recommended',
}
self.route = self.clnt.directions(**request_params)
self.route_distance = (self.route['features'][0]['properties']
['summary']['distance'])
self.trip_duration = (self.route['features'][0]['properties']
['summary']['duration'])
return self.route
def format_duration(self) -> str:
"""
Format trip duration that is in second for hours, minutes and seconds
Returns
-------
str
String with the trip duration formated: HH:mm:ss
"""
if self.trip_duration is not None:
duration_h = self.trip_duration / 3600
hours = math.trunc(duration_h)
duration_m = (duration_h - hours) * 60
minutes = math.trunc(duration_m)
seconds = math.trunc(((duration_m) - minutes) * 60)
return f"{hours:02}:{minutes:02}:{seconds:02}"
def _gen_coordinates_df(self) -> DataFrame:
"""
Generate coordinates DataFrame based on route dictionary. This method gets the coordinates
and extracts the latitude and longitude.
It also calculates the distante between point in the route to generate the subsections and
categorize them in groups. The coord_df contains the distance between coordinates to
calculate the sections of the route.
Returns
-------
DataFrame
Coordinates DataFrame with the route's latitude and longitude, also it has the distance
between the coordinate and the subsequent point. Based on the cumulative distance the
route's subsections are created.
"""
coor = self.route['features'][0]['geometry']['coordinates']
self.coor_df = pd.DataFrame(coor).rename(columns={0: 'olong', 1: 'olat'})
self.coor_df[['dlong', 'dlat']] = self.coor_df[['olong', 'olat']].shift(-1)
self.coor_df['origin_tuple'] = list(self.coor_df[['olat', 'olong']]
.itertuples(index=False, name=None))
self.coor_df['destination_tuple'] = list(self.coor_df[['dlat', 'dlong']]
.itertuples(index=False, name=None))
self.coor_df = self.coor_df.dropna()
distance_list = []
for _, row in self.coor_df.iterrows():
origin = row.origin_tuple
destination = row.destination_tuple
distance_list.append(distance.distance(origin, destination).km)
self.coor_df = self.coor_df.assign(route_dist=distance_list)
self.coor_df = self.coor_df.assign(cum_sum=self.coor_df['route_dist']
.cumsum())
return self.coor_df
def _gen_sections(self) -> DataFrame:
"""
Method to create intervals with step `self.sub_section_dist`. Each group is one subsection
of the route.
Returns
-------
DataFrame
sections DataFrame with the following information: coordinates' latitude and longitude,
cum_sum_min (the minimum distance of that route section), cum_sum_max (the maximum
distance of that route section), sections (the first km and the last km included in
that section, origin (section first coordinate), destination (section last coordinate))
"""
max_dis = self.coor_df['cum_sum'].max()
interval = pd.interval_range(start=0, end=max_dis + self.sub_section_dist,
freq=self.sub_section_dist, closed='left')
self.coor_df['sections'] = pd.cut(self.coor_df['cum_sum'], bins=interval)
coor_sum_min = (self.coor_df.groupby(by='sections').agg({'cum_sum': 'min'})
.reset_index().rename(columns={'cum_sum': 'cum_sum_min'}))
coor_sum_max = (self.coor_df.groupby(by='sections').agg({'cum_sum': 'max'})
.reset_index().rename(columns={'cum_sum': 'cum_sum_max'}))
cols_min = ['sections', 'cum_sum_min', 'olong', 'olat', 'origin_tuple']
cols_max = ['cum_sum_max' if i == 'cum_sum_min' else i for i in cols_min]
coor_sum_min = (coor_sum_min.merge(self.coor_df, left_on='cum_sum_min',
right_on='cum_sum')
.rename(columns={'sections_x': 'sections'})[cols_min])
coor_sum_max = (coor_sum_max.merge(self.coor_df, left_on='cum_sum_max',
right_on='cum_sum')
.rename(columns={'sections_x': 'sections'})[cols_max])
rename_dict = {'olong_x': 'olong', 'olat_x': 'olat', 'origin_tuple_x': 'origin',
'olong_y': 'dlong', 'olat_y': 'dlat', 'origin_tuple_y': 'destination'}
return coor_sum_min.merge(coor_sum_max, on='sections').rename(columns=rename_dict)
@staticmethod
def _normalize_string(string: str) -> str:
"""
Normalizes strings removing accentuation, lowering them and joining them with underline (_)
Parameters
----------
string : str
String to be normalized
Returns
-------
str
Normalized string
"""
string_no_accents = unidecode.unidecode(string)
string_lower = string_no_accents.lower()
string_without_space = string_lower.split(' ')
return '_'.join(string_without_space)
def _classes_accidents(self, accident: str) -> int:
"""
Creates a score for the route's section. Those scores are arbitrary and can be tuned for
what makes more sense
Parameters
----------
accident : str
Accident class. 'sem_vitimas' when there are no victims; 'com_vitimas_feridas' when
there are injured victims; 'com_vitimas_fatais' when there are fatal victims.
Returns
-------
int
The accident score based on its class
Raises
------
Exception
Raises an exception if an unexpected class is passed as a parameter.
"""
accident = self._normalize_string(accident)
if accident == 'sem_vitimas':
return 1
elif accident == 'com_vitimas_feridas':
return 5
elif accident == 'com_vitimas_fatais':
return 10
else:
raise Exception("Accident class doesn't mapped in the original dataset!")
def _filter_accident_data(self) -> DataFrame:
"""
Filters the accidents DataFrame based on the route coordinates. In other words, it gets the
accidents points near the route only.
Returns
-------
DataFrame
Filtered accidents DataFrame
"""
min_olong = np.round(self.coor_df['olong'].min(), 3)
max_olong = np.round(self.coor_df['olong'].max(), 3)
min_olat = np.round(self.coor_df['olat'].min(), 3)
max_olat = np.round(self.coor_df['olat'].max(), 3)
query = (f'({min_olat} <= latitude <= {max_olat}) and '
f'({min_olong} <= longitude <= {max_olong})')
filtered = self.accidents.query(query)
if not filtered.empty:
self.accidents = filtered
return self.accidents
@staticmethod
def _days_from_accident(df_date_col: Series) -> Series:
"""
Calculates how many days has passed from the accident (based on the date of the last
accident on dataset)
Parameters
----------
df_date_col : Series
Accident dates column
Returns
-------
Series
Column with days from accident
"""
max_date = df_date_col.max()
return (np.datetime64(max_date) - pd.to_datetime(df_date_col)).apply(lambda x: x.days)
@staticmethod
def _haversine(Olat: float, Olon: float, Dlat: Series, Dlon: Series) -> Series:
"""
Calculates haversine distance. For more information look at:
https://en.wikipedia.org/wiki/Haversine_formula
Parameters
----------
Olat : float
Origin latitude
Olon : float
Origin longitude
Dlat : Series
Destiny latitude
Dlon : Series
Destiny longitude
Returns
-------
Series
Distance Series
"""
radius = 6371. # km
d_lat = np.radians(Dlat - Olat)
d_lon = np.radians(Dlon - Olon)
a = (np.sin(d_lat / 2.) * np.sin(d_lat / 2.) + np.cos(np.radians(Olat)) *
np.cos(np.radians(Dlat)) * np.sin(d_lon / 2.) * np.sin(d_lon / 2.))
c = 2. * np.arctan2(np.sqrt(a), np.sqrt(1. - a))
return radius * c
def _rank_subsections(self, df: DataFrame, flag: str) -> DataFrame:
"""
Generates the score for each route subsection.
Parameters
----------
df : DataFrame
DataFrame with coordinate, subsections and distances
flag : str
Flag to indicate if df rows represent each point in the route or if they are the route's
subsections. Possible values are: 'point' ou 'route'
Returns
-------
DataFrame
DataFrame with the scores for the route's sections
Raises
------
Exception
If the flag is not set to 'point' or 'route'
"""
last_val = int(df['sections'].values[-1].right)
rank_df_list = []
for i in range(0, last_val, int(self.sub_section_dist)):
interval = Interval(float(i), float(i + 5.0), closed='left')
filtered_route = self.route_df[self.route_df['sections'] == interval]
rank_accidents = self.score_accidents.copy()
distances_list = []
for _, row in filtered_route.iterrows():
distances = self._haversine(row['origin_tuple'][0], row['origin_tuple'][1],
self.score_accidents.loc[:, 'latitude'],
self.score_accidents.loc[:, 'longitude'])
distances_list.append(distances)
filtered_sections = df[df['sections'] == interval]
if flag == 'point':
rank_df = filtered_sections[['sections', 'origin', 'destination']]
elif flag == 'route':
rank_df = filtered_sections[['sections', 'origin_tuple']]
else:
raise Exception("The flag used is not a valid option!!!")
distances_list.append(rank_accidents['score'])
df_dist = pd.concat(distances_list, axis=1)
rank = df_dist[(df_dist.iloc[:, :-1] <= 1).sum(axis=1) > 0]['score'].sum()
rank_df = rank_df.assign(score=rank)
rank_df_list.append(rank_df)
return pd.concat(rank_df_list)
def _getcolor(self, rank: float) -> str:
"""
Generates the color for the subsection on the route based on its score.
Parameters
----------
rank : float
Subsection's score
Returns
-------
str
Hexadecimal color or grey if the subsection has no score.
"""
max_score = int(np.ceil(self.final_rank_sections['score'].max()))
try:
colors = list(Color('green').range_to(Color('red'), max_score))
except ValueError:
return 'grey'
else:
colors = [color.get_web() for color in colors]
if rank == 0:
return 'grey'
else:
return colors[int(rank)]
def _plot_route_score(self):
"""
Plots the subsections in the route on the map with different colors based on the the score
of each subsection.
"""
rank_json = copy.deepcopy(self.route)
properties = rank_json['features'][0]['properties']
rank_json['features'] = []
last_val = int(self.sections_df['sections'].values[-1].right)
p_type = 'Feature'
for i in range(0, last_val, int(self.sub_section_dist)):
interval = Interval(float(i), float(i) + 5.0, closed='left')
subsection = self.final_rank_route[self.final_rank_route['sections'] == interval]
coor_list = subsection['origin_tuple'].apply(lambda x: list(x[::-1])).to_list()
bbox = self.route['bbox']
id = str(i)
rank_value = int(subsection['score'].unique()[0])
color = subsection['score'].apply(self._getcolor).unique()[0]
properties['score'] = rank_value
properties['color'] = color
append_dict = {'bbox': bbox, 'type': p_type, 'properties': properties,
'id': id, 'geometry': {}}
append_dict['geometry']['type'] = self.route['features'][0]['geometry']['type']
append_dict['geometry']['coordinates'] = coor_list
rank_json['features'].append(copy.deepcopy(append_dict))
GeoJson(data=rank_json, overlay=True, smooth_factor=2,
style_function=lambda x: {'color': x['properties']['color'], 'weight': 5,
'fillOpacity': 1},
highlight_function=lambda x: {'weight': 10, 'color': x['properties']['color']},
tooltip=GeoJsonTooltip(fields=['score'], aliases=["section risk's score: "],
labels=True, sticky=True,
toLocaleString=True)).add_to(self.base_map)
self.base_map.save(self.map_save_path)
def _calculate_final_score(self) -> float:
"""
Calculates the route's final score. To do this the scores of each subsection are summed and
them divided by the route distance in kilometers. This is a way to normalize the final
score. So, if two routes have the same summed score, the smaller one will have the higher
final score.
Returns
-------
float
The final score calculated as stated above.
"""
sum_score = self.final_rank_sections['score'].sum()
self.score = sum_score / (self.route_distance / 1000)
return self.score
def _plot_final_score(self):
"""
Plots the final score on a marker on the map. To open the popup with the message, the user
needs to click in the marker located approximately on the middle of the route.
"""
origin_label = ': ' + self.origin_name if self.origin_name else ''
destination_label = ': ' + self.destination_name if self.destination_name else ''
begin_color = self.icon_color or 'green'
end_color = self.icon_color or 'red'
score_color = self.icon_color or 'blue'
score = str(np.round(self.score, 2))
popup = ('<h3 align="center" style="font-size:16px">Route final score: '
f'<b>{score}</b></h3>')
tooltip = '<strong>Click here to see route score</strong>'
middle_pos = int(len(self.final_rank_route) / 2)
marker_pos = self.final_rank_route.loc[middle_pos, 'origin_tuple']
begin = self.final_rank_route.loc[0, 'origin_tuple']
begin_formated = tuple(map(lambda coor: round(coor, 3), begin))
end = self.final_rank_route.loc[len(self.final_rank_route) - 1, 'origin_tuple']
end_formated = tuple(map(lambda coor: round(coor, 3), end))
tooltip_begin = ('<h3 align="center" style="font-size:14px">Route Begin Point'
f'<b>{origin_label}</b></h3>')
tooltip_end = ('<h3 align="center" style="font-size:14px">Route End Point'
f'<b>{destination_label}</b></h3>')
(Marker(location=begin, tooltip=tooltip_begin, popup=f'{begin_formated}',
icon=Icon(color=begin_color, icon='fa-truck', prefix='fa')).add_to(self.base_map))
(Marker(location=end, tooltip=tooltip_end, popup=f'{end_formated}',
icon=Icon(color=end_color, icon='fa-truck', prefix='fa')).add_to(self.base_map))
(Marker(location=marker_pos, tooltip=tooltip, popup=popup,
icon=Icon(color=score_color, icon='stats')).add_to(self.base_map))
def _calculate_score_weight(self):
"""
Calculates the weight to multiply the class score based on how many days the accident
occurred from the last date in the dataset. If the accident is recent the weight is near 1,
if it occurred long time ago the weight is near 0.1.
"""
W_max = self.score_accidents['days_from_accident'].max()
W_min = self.score_accidents['days_from_accident'].min()
self.score_accidents['W'] = ((W_max - self.score_accidents['days_from_accident']) /
(W_max - W_min))
self.score_accidents['W'] = self.score_accidents['W'].apply(lambda x: x + 0.1
if x == 0 else x)
self.score_accidents['score'] = self.score_accidents['classes'] * self.score_accidents['W']
def path_risk_score(self, save_map: bool = False) -> DataFrame:
"""
This method call the others above to generate the subsections, calculate the scores and plot
all on the map
Parameters
----------
save_map : bool, optional
If True, the map is save in .html format on the disk, by default False
Returns
-------
DataFrame
Final DataFrame with the score for each subsection on the route.
"""
self.score_accidents = self.accidents[['data_inversa', 'latitude', 'longitude',
'classificacao_acidente']]
classes = self.score_accidents['classificacao_acidente'].apply(self._classes_accidents)
self.score_accidents = (self.score_accidents.assign(classes=classes)
.drop(columns='classificacao_acidente'))
days_from = self._days_from_accident(self.score_accidents['data_inversa'])
self.score_accidents['days_from_accident'] = days_from
self._calculate_score_weight()
self.score_accidents['lat_long'] = (list(self.score_accidents[['latitude', 'longitude']]
.itertuples(index=False, name=None)))
self.sections_df = self._gen_sections()
self.route_df = self.coor_df[['origin_tuple', 'destination_tuple', 'sections']]
self.final_rank_sections = self._rank_subsections(self.sections_df, flag='point')
self.final_rank_route = self._rank_subsections(self.coor_df, flag='route')
self._calculate_final_score()
print(f'The final route score is {self.score:.2f}.')
if save_map:
print('Plotting route and final score on map...')
self._plot_final_score()
self._plot_route_score()
return self.final_rank_sections
if __name__ == "__main__":
import time
t0 = time.time()
# Extrema: -22.864969298862736, -46.35471817331918
# Nova Rio: -22.864365417300693, -43.60680685910165
inicio = (-46.35471817331918, -22.864969298862736)
fim = (-43.60680685910165, -22.864365417300693)
data_path = "./extract_data/accidents_final.csv"
basemap = generate_base_map()
s = SafetyMap(accidents_data_file_path=data_path, start_point=inicio, end_point=fim,
basemap=basemap, map_save_path="./maps/teste_com_dados2.html",
env_path='./.env')
s.path_risk_score(save_map=True)
t1 = time.time()
print(f'Tempo necessário: {t1 - t0} segundos') | /safety-road-mapping-0.1.7.tar.gz/safety-road-mapping-0.1.7/safety_road_mapping/safety.py | 0.948637 | 0.306164 | safety.py | pypi |
[](https://pyup.io/safety/)
[](https://pypi.python.org/pypi/safety)
[](https://travis-ci.org/pyupio/safety)
[](https://pyup.io/repos/github/pyupio/safety/)
Safety checks Python dependencies for known security vulnerabilities and suggests the proper remediations for vulnerabilities detected. Safety can be run on developer machines, in CI/CD pipelines and on production systems.
By default it uses the open Python vulnerability database [Safety DB](https://github.com/pyupio/safety-db), which is **licensed for non-commercial use only**.
For all commercial projects, Safely must be upgraded to use a [PyUp API](https://pyup.io) using the `--key` option.
# Using Safety as a GitHub Action
Safety can be integrated into your existing GitHub CI pipeline as an action. Just add the following as a step in your workflow YAML file after setting your `SAFETY_API_KEY` secret on GitHub under Settings -> Secrets -> Actions:
```yaml
- uses: pyupio/safety@2.3.1
with:
api-key: ${{ secrets.SAFETY_API_KEY }}
```
(Don't have an API Key? You can sign up for one with [PyUp](https://pyup.io).)
This will run Safety in auto-detect mode which figures out your project's structure and the best configuration to run in automatically. It'll fail your CI pipeline if any vulnerable packages are found.
If you have something more complicated such as a monorepo; or once you're finished testing, read the [Action Documentation](https://docs.pyup.io/docs/github-actions-safety) for more details on configuring Safety as an action.
# Installation
Install `safety` with pip. Keep in mind that we support only Python 3.6 and up.
```bash
pip install safety
```
# Documentation
For detailed documentation, please see [Safety's documentation portal](https://docs.pyup.io/docs/getting-started-with-safety-cli).
# Basic Usage
To check your currently selected virtual environment for dependencies with known security
vulnerabilites, run:
```bash
safety check
```
You should get a report similar to this:
```bash
+=================================================================================+
/$$$$$$ /$$
/$$__ $$ | $$
/$$$$$$$ /$$$$$$ | $$ \__//$$$$$$ /$$$$$$ /$$ /$$
/$$_____/ |____ $$| $$$$ /$$__ $$|_ $$_/ | $$ | $$
| $$$$$$ /$$$$$$$| $$_/ | $$$$$$$$ | $$ | $$ | $$
\____ $$ /$$__ $$| $$ | $$_____/ | $$ /$$| $$ | $$
/$$$$$$$/| $$$$$$$| $$ | $$$$$$$ | $$$$/| $$$$$$$
|_______/ \_______/|__/ \_______/ \___/ \____ $$
/$$ | $$
| $$$$$$/
by pyup.io \______/
+=================================================================================+
REPORT
Safety v2.0.0 is scanning for Vulnerabilities...
Scanning dependencies in your environment:
-> /scanned-path/
Using non-commercial database
Found and scanned 295 packages
Timestamp 2022-06-28 15:42:04
0 vulnerabilities found
0 vulnerabilities ignored
+=================================================================================+
No known security vulnerabilities found.
+=================================================================================+
```
Now, let's install something insecure:
```bash
pip install insecure-package
```
*Yeah, you can really install that.*
Run `safety check` again:
```bash
+=================================================================================+
Safety v2.0.0.dev6 is scanning for Vulnerabilities...
Scanning dependencies in your environment:
-> /scanned-path/
Using non-commercial database
Found and scanned 295 packages
Timestamp 2022-06-28 15:42:04
1 vulnerabilities found
0 vulnerabilities ignored
+=================================================================================+
VULNERABILITIES FOUND
+=================================================================================+
-> Vulnerability found in insecure-package version 0.1.0
Vulnerability ID: 25853
Affected spec: <0.2.0
ADVISORY: This is an insecure package with lots of exploitable
security vulnerabilities.
Fixed versions:
PVE-2021-25853
For more information, please visit
https://pyup.io/vulnerabilities/PVE-2021-25853/25853/
Scan was completed.
+=================================================================================+
```
## Starter documentation
### Configuring the target of the scan
Safety can scan requirements.txt files, the local environemnt as well as direct input piped into Safety.
To scan a requirements file:
```bash
safety check -r requirements.txt
```
To scan the local enviroment:
```bash
safety check
```
Safety is also able to read from stdin with the `--stdin` flag set.
```
cat requirements.txt | safety check --stdin
```
or the output of `pip freeze`:
```
pip freeze | safety check --stdin
```
or to check a single package:
```
echo "insecure-package==0.1" | safety check --stdin
```
*For more examples, take a look at the [options](#options) section.*
### Specifying the output format of the scan
Safety can output the scan results in a variety of formats and outputs. This includes: screen, text, JSON, and bare outputs. Using the ```--output``` flag to configure this output. The default output is to the screen.
```--output json``` will output JSON for further processing and analysis.
```--output text``` can be used to save the scan to file to later auditing.
```--output bare``` simply prints out the packages that have known vulnerabilities
### Exit codes
Safety by default emits exit codes based on the result of the code, allowing you to run safety inside of CI/CD processes. If no vulnerabilities were found the exit code will be 0. In cases of a vulnerability being found, non-zero exit codes will be returned.
### Scan a Python-based Docker image
To scan a docker image `IMAGE_TAG`, you can run
```console
docker run -it --rm ${IMAGE_TAG} /bin/bash -c "pip install safety && safety check"
```
## Using Safety in Docker
Safety can be easily executed as Docker container. It can be used just as
described in the [examples](#examples) section.
```console
echo "insecure-package==0.1" | docker run -i --rm pyupio/safety safety check --stdin
cat requirements.txt | docker run -i --rm pyupio/safety safety check --stdin
```
## Using the Safety binaries
The Safety [binaries](https://github.com/pyupio/safety/releases) provide some
[extra security](https://pyup.io/posts/patched-vulnerability/).
After installation, they can be used just like the regular command line version
of Safety.
## Using Safety with a CI service
Safety works great in your CI pipeline. It returns by default meaningful non-zero exit codes:
| CODE NAME | MEANING | VALUE |
| ------------- |:-------------:| -----:|
| EXIT_CODE_OK | Successful scan | 0 |
| EXIT_CODE_FAILURE | An unexpected issue happened, please run the debug mode and write to us | 1 |
| EXIT_CODE_VULNERABILITIES_FOUND | Safety found vulnerabilities | 64 |
| EXIT_CODE_INVALID_API_KEY | The API KEY used is invalid | 65 |
| EXIT_CODE_TOO_MANY_REQUESTS | You are making too many request, please wait around 40 seconds | 66 |
| EXIT_CODE_UNABLE_TO_LOAD_LOCAL_VULNERABILITY_DB | The local vulnerability database is malformed | 67 |
| EXIT_CODE_UNABLE_TO_FETCH_VULNERABILITY_DB | Client network or server issues trying to fetch the database | 68 |
| EXIT_CODE_MALFORMED_DB | The fetched vulnerability database is malformed or in the review command case, the report to review is malformed | 69 |
if you want Safety continues on error (always return zero exit code), you can use `--continue-on-error` flag
Run it before or after your tests. If Safety finds something, your tests will fail.
**Travis CI**
```yaml
install:
- pip install safety
script:
- safety check
```
**Gitlab CI**
```yaml
safety:
script:
- pip install safety
- safety check
```
**Tox**
```ini
[tox]
envlist = py37
[testenv]
deps =
safety
pytest
commands =
safety check
pytest
```
**Deep GitHub Integration**
If you are looking for a deep integration with your GitHub repositories: Safety is available as a
part of [pyup.io](https://pyup.io/), called [Safety CI](https://pyup.io/safety/ci/). Safety CI
checks your commits and pull requests for dependencies with known security vulnerabilities
and displays a status on GitHub.

# Using Safety in production
Safety is free and open source (MIT Licensed). The data it relies on from the free Safety-db database is license for non-commercial use only, is limited and only updated once per month.
**All commercial projects and teams must sign up for a paid plan at [PyUp.io](https://pyup.io)**
## Options
### `--key`
*API Key for pyup.io's vulnerability database. This can also be set as `SAFETY_API_KEY` environment variable.*
**Example**
```bash
safety check --key=12345-ABCDEFGH
```
___
### `--db`
*Path to a directory with a local vulnerability database including `insecure.json` and `insecure_full.json`*
**Example**
```bash
safety check --db=/home/safety-db/data
```
### `--proxy-host`
*Proxy host IP or DNS*
### `--proxy-port`
*Proxy port number*
### `--proxy-protocol`
*Proxy protocol (https or http)*
___
### `--output json`
*Output a complete report with the vulnerabilities in JSON format.*
The report may be used too with the review command.
if you are using the PyUp commercial database, Safety will use the same JSON structure but with all the full data for commercial users.
**Example**
```bash
safety check --output json
```
```json
{
"report_meta": {
"scan_target": "environment",
"scanned": [
"/usr/local/lib/python3.9/site-packages"
],
"api_key": false,
"packages_found": 1,
"timestamp": "2022-03-23 01:41:25",
"safety_version": "2.0.0.dev6"
},
"scanned_packages": {
"insecure-package": {
"name": "insecure-package",
"version": "0.1.0"
}
},
"affected_packages": {
"insecure-package": {
"name": "insecure-package",
"version": "0.1.0",
"found": "/usr/local/lib/python3.9/site-packages",
"insecure_versions": [],
"secure_versions": [],
"latest_version_without_known_vulnerabilities": null,
"latest_version": null,
"more_info_url": "None"
}
},
"announcements": [],
"vulnerabilities": [
{
"name": "insecure-package",
"ignored": false,
"reason": "",
"expires": "",
"vulnerable_spec": "<0.2.0",
"all_vulnerable_specs": [
"<0.2.0"
],
"analyzed_version": "0.1.0",
"advisory": "This is an insecure package with lots of exploitable security vulnerabilities.",
"vulnerability_id": "25853",
"is_transitive": false,
"published_date": null,
"fixed_versions": [],
"closest_versions_without_known_vulnerabilities": [],
"resources": [],
"CVE": {
"name": "PVE-2021-25853",
"cvssv2": null,
"cvssv3": null
},
"affected_versions": [],
"more_info_url": "None"
}
],
"ignored_vulnerabilities": [],
"remediations": {
"insecure-package": {
"vulns_found": 1,
"version": "0.1.0",
"recommended": null,
"other_recommended_versions": [],
"more_info_url": "None"
}
}
}
```
___
### `--full-report`
*Full reports includes a security advisory. It also shows CVSS values for CVEs (requires a premium PyUp subscription).*
**Example**
```bash
safety check --full-report
```
### `--output bare`
*Output vulnerable packages only. Useful in combination with other tools.*
**Example**
```bash
safety check --output bare
```
```
cryptography django
```
___
### `--stdin`
*Read input from stdin.*
**Example**
```bash
cat requirements.txt | safety check --stdin
```
```bash
pip freeze | safety check --stdin
```
```bash
echo "insecure-package==0.1" | safety check --stdin
```
___
### `--file`, `-r`
*Read input from one (or multiple) requirement files.*
**Example**
```bash
safety check -r requirements.txt
```
```bash
safety check --file=requirements.txt
```
```bash
safety check -r req_dev.txt -r req_prod.txt
```
___
### `--ignore`, `-i`
*Ignore one (or multiple) vulnerabilities by ID*
**Example**
```bash
safety check -i 1234
```
```bash
safety check --ignore=1234
```
```bash
safety check -i 1234 -i 4567 -i 89101
```
### `--output`, `-o`
*Save the report to a file*
**Example**
```bash
safety check --output text > insecure_report.txt
```
```bash
safety check --output json > insecure_report.json
```
___
# Review
If you save the report in JSON format you can review in the report format again.
## Options
### `--file`, `-f` (REQUIRED)
*Read an insecure report.*
**Example**
```bash
safety review -f insecure.json
```
```bash
safety review --file=insecure.json
```
___
### `--full-report`
*Full reports include a security advisory (if available).*
**Example**
```bash
safety review -r insecure.json --full-report
```
___
### `--bare`
*Output vulnerable packages only.*
**Example**
```bash
safety review --file report.json --output bare
```
```
django
```
___
# License
Display packages licenses information (requires a premium PyUp subscription).
## Options
### `--key` (REQUIRED)
*API Key for pyup.io's licenses database. Can be set as `SAFETY_API_KEY` environment variable.*
**Example**
```bash
safety license --key=12345-ABCDEFGH
```
*Shows the license of each package in the current environment*
### `--output json` (Optional)
This license command can also be used in conjuction with optional arguments `--output bare` and `--output json` for structured, parsable outputs that can be fed into other tools and pipelines.
___
### `--db`
*Path to a directory with a local licenses database `licenses.json`*
**Example**
```bash
safety license --key=12345-ABCDEFGH --db /home/safety-db/data
```
___
### `--file`, `-r`
*Read input from one (or multiple) requirement files.*
**Example**
```bash
safety license --key=12345-ABCDEFGH -r requirements.txt
```
```bash
safety license --key=12345-ABCDEFGH --file=requirements.txt
```
```bash
safety license --key=12345-ABCDEFGH -r req_dev.txt -r req_prod.txt
```
___
### `--proxy-host`, `-ph`
*Proxy host IP or DNS*
### `--proxy-port`, `-pp`
*Proxy port number*
### `--proxy-protocol`, `-pr`
*Proxy protocol (https or http)*
**Example**
```bash
safety license --key=12345-ABCDEFGH -ph 127.0.0.1 -pp 8080 -pr https
```
___
# Python 2.7
This tool requires latest Python patch versions starting with version 3.6. We
did support Python 2.7 in the past but, as for other Python 3.x minor versions,
it reached its End-Of-Life and as such we are not able to support it anymore.
We understand you might still have Python < 3.6 projects running. At the same
time, Safety itself has a commitment to encourage developers to keep their
software up-to-date, and it would not make sense for us to work with officially
unsupported Python versions, or even those that reached their end of life.
If you still need to run Safety from a Python 2.7 environment, please use
version 1.8.7 available at PyPi. Alternatively, you can run Safety from a
Python 3 environment to check the requirements file for your Python 2.7
project.
| /safety-2.3.1.tar.gz/safety-2.3.1/README.md | 0.405802 | 0.959039 | README.md | pypi |
from collections import OrderedDict
from copy import deepcopy
from pprint import pformat
from safetydance import step, step_data
from rest_framework.test import APIClient
import pytest
http_client = step_data(APIClient, initializer=APIClient)
http_response = step_data(None)
@step
def defaults(**kwargs):
http_client.defaults = {**http_client.defaults, **kwargs}
@step
def force_authenticate(*args, **kwargs):
"""Can be used by rest_framework.test.APIClient"""
http_client.force_authenticate(*args, **kwargs)
@step
def force_login(*args, **kwargs):
"""Can be used by django.test.Client"""
http_client.force_login(*args, **kwargs)
@step
def login(*args, **kwargs):
http_client.login(*args, **kwargs)
@step
def delete(*args, **kwargs):
'''Perform HTTP DELETE'''
http_response = http_client.delete(*args, **kwargs)
@step
def get(*args, **kwargs):
'''Perform HTTP GET'''
http_response = http_client.get(*args, **kwargs)
@step
def head(*args, **kwargs):
'''Perform HTTP HEAD'''
http_response = http_client.head(*args, **kwargs)
@step
def get_created(*args, **kwargs):
'''Perform HTTP GET of `location` header.'''
http_response = http_client.get(
http_response['location'],
*args,
**kwargs)
@step
def options(*args, **kwargs):
http_response = http_client.options(*args, **kwargs)
@step
def post(*args, **kwargs):
http_response = http_client.post(*args, **kwargs)
@step
def put(*args, **kwargs):
http_response = http_client.put(*args, **kwargs)
@step
def status_code_is(expected):
'''
Check that the expected status code matches the received status code
'''
assert http_response.status_code == expected,\
pformat(http_response.data) if http_response.data is not None else ""
@step
def status_code_is_one_of(*expected):
'''
Check that the expected status code matches the received status code
'''
assert http_response.status_code in expected,\
pformat(http_response.data) if http_response.data is not None else ""
@step
def content_type_is(expected):
assert 'Content-Type' in http_response
assert http_response['Content-Type'].startswith(expected)
@step
def response_json_is(expected):
'''
Check that the expected response json body matches the received response
body.
'''
content_type_is('application/json')
observed = http_response.json()
assert json_values_match(expected, observed)
@step
def response_data_is(expected, excluded_fields=None):
'''
Check that the expected response json body matches the received response
body.
'''
def clean_item(obj):
obj.pop('id', None)
obj.pop('url', None)
if excluded_fields is not None and isinstance(excluded_fields, list):
for k in excluded_fields:
obj.pop(k, None)
return obj
self.content_type_is('application/json')
observed = clean_item(deepcopy(http_response.data))
expected = clean_item(deepcopy(expected))
assert expected == observed
@step
def assert_data(expected, observed, excluded_fields=None):
'''
Check that the expected response json body matches the received response
body.
'''
def clean_item(obj):
obj.pop('id', None)
obj.pop('url', None)
if excluded_fields is not None and isinstance(excluded_fields, list):
for k in excluded_fields:
obj.pop(k, None)
return obj
observed = clean_item(deepcopy(observed))
expected = clean_item(deepcopy(expected))
assert expected == observed
@step
def response_data_list_is(list_expected, excluded_fields=None):
'''
Check that the expected response json body matches the received response
body.
'''
def clean_item(obj):
obj.pop('id', None)
obj.pop('url', None)
if excluded_fields is not None and isinstance(excluded_fields, list):
for k in excluded_fields:
obj.pop(k, None)
return obj
self.content_type_is('application/json')
_list_observed = deepcopy(http_response.data)
list_expected = deepcopy(list_expected)
list_observed = []
is_enveloped = None
for item in _list_observed:
if is_enveloped is None:
if 'etag' in item:
is_enveloped = True
else:
is_enveloped = False
if is_enveloped:
item = item['content']
elif isinstance(item, OrderedDict):
item = dict(item)
list_observed.append(clean_item(item))
for i, item in enumerate(list_expected):
list_expected[i] = clean_item(item)
assert list_expected == list_observed
@step
def response_url_is(url_expected):
assert http_response.url == url_expected
@step
def response_location_header_is(location_expected):
header = http_response['location']
assert header == location_expected
def json_values_match(expected, observed):
'''
Recursively walk the expected json value validating that the observed json
value matches. Returns False on any missing or mismatched value.
'''
if isinstance(expected, list):
return lists_match(expected, observed)
if isinstance(expected, dict):
return dictionaries_match(expected, observed)
return expected == observed
def lists_match(expected, observed):
'''
Recursively walk the expected list validating that the observed list
matches. Returns False on any missing or mismatched value.
'''
if not isinstance(observed, list):
return False
if len(expected) != len(observed):
return False
for i in range(0, len(expected)):
if not json_values_match(expected[i], observed[i]):
return False
return True
def dictionaries_match(expected, observed):
'''
Recursively walk the expected dictionary validating that the observed
dictionary matches. Returns False on any missing key or mismatched
value.
Note that keys that occur in observed or its nested dictionary objects
that don't occur in expected or its matching nested dictionary objects
are ignored.
'''
if observed is None:
return False
for key, value in expected.items():
if not json_values_match(value, observed.get(key)):
return False
return True | /safetydance-django-0.0.6.tar.gz/safetydance-django-0.0.6/src/safetydance_django/steps.py | 0.713432 | 0.249935 | steps.py | pypi |
# Automatic Safeway coupon clipper
[][pypi]
[][pypi]
[][gh-actions]
[][codecov]
[][repo]
**safeway-coupons** is a script that will log in to an account on safeway.com,
and attempt to select all of the "Safeway for U" electronic coupons on the site
so they don't have to each be clicked manually.
## Design notes
Safeway's sign in page is protected by a web application firewall (WAF).
safeway-coupons performs authentication using a headless instance of Google
Chrome. Authentication may fail based on your IP's reputation, either by
presenting a CAPTCHA or denying sign in attempts altogether. safeway-coupons
currently does not have support for prompting the user to solve CAPTCHAs.
Once a signed in session is established, coupon clipping is performed using HTTP
requests via [requests][requests].
## Installation and usage with Docker
A Docker container is provided which runs safeway-coupons with cron. The cron
schedule and your Safeway account details may be configured using environment
variables, or with an accounts file.
Example `docker-compose.yaml` with configuration via environment variables:
```yaml
version: "3.7"
services:
safeway-coupons:
image: ghcr.io/smkent/safeway-coupons:latest
environment:
CRON_SCHEDULE: "0 2 * * *" # Run at 2:00 AM UTC each day
# TZ: Antarctica/McMurdo # Optional time zone to use instead of UTC
SMTPHOST: your.smtp.host
SAFEWAY_ACCOUNT_USERNAME: your.safeway.account.email@example.com
SAFEWAY_ACCOUNT_PASSWORD: very_secret
SAFEWAY_ACCOUNT_MAIL_FROM: your.email@example.com
SAFEWAY_ACCOUNT_MAIL_TO: your.email@example.com
# EXTRA_ARGS: --debug # Optional
restart: unless-stopped
```
Example `docker-compose.yaml` with configuration via accounts file:
```yaml
version: "3.7"
services:
safeway-coupons:
image: ghcr.io/smkent/safeway-coupons:latest
environment:
CRON_SCHEDULE: "0 2 * * *" # Run at 2:00 AM UTC each day
# TZ: Antarctica/McMurdo # Optional time zone to use instead of UTC
SMTPHOST: your.smtp.host
SAFEWAY_ACCOUNTS_FILE: /accounts_file
# EXTRA_ARGS: --debug # Optional
restart: unless-stopped
volumes:
- path/to/safeway_accounts_file:/accounts_file:ro
```
Start the container by running:
```console
docker-compose up -d
```
Debugging information can be viewed in the container log:
```console
docker-compose logs -f
```
## Installation from PyPI
### Prerequisites
* Google Chrome (for authentication performed via Selenium).
* Optional: `sendmail` (for email support)
### Installation
[safeway-coupons is available on PyPI][pypi]:
```console
pip install safeway-coupons
```
### Usage
For best results, run this program once a day or so with a cron daemon.
For full usage options, run
```console
safeway-coupons --help
```
### Configuration
**safeway-coupons** can clip coupons for one or more Safeway accounts in a
single run, depending on the configuration method used.
If a sender email address is configured, a summary email will be sent for each
Safeway account via `sendmail`. The email recipient defaults to the Safeway
account email address, but can be overridden for each account.
Accounts are searched via the following methods in the listed order. Only one
account configuration method may be used at a time.
#### With environment variables
A single Safeway account can be configured with environment variables:
* `SAFEWAY_ACCOUNT_USERNAME`: Account email address (required)
* `SAFEWAY_ACCOUNT_PASSWORD`: Account password (required)
* `SAFEWAY_ACCOUNT_MAIL_FROM`: Sender address for email summary
* `SAFEWAY_ACCOUNT_MAIL_TO`: Recipient address for email summary
#### With config file
Multiple Safeway accounts can be provided in an ini-style config file, with a
section for each account. For example:
```ini
email_sender = sender@example.com ; optional
[safeway.account@example.com] ; required
password = 12345 ; required
notify = your.email@example.com ; optional
```
Provide the path to your config file using the `-c` or `--accounts-config`
option:
```console
safeway-coupons -c path/to/config/file
```
## Development
### [Poetry][poetry] installation
Via [`pipx`][pipx]:
```console
pip install pipx
pipx install poetry
pipx inject poetry poetry-dynamic-versioning poetry-pre-commit-plugin
```
Via `pip`:
```console
pip install poetry
poetry self add poetry-dynamic-versioning poetry-pre-commit-plugin
```
### Development tasks
* Setup: `poetry install`
* Run static checks: `poetry run poe lint` or
`poetry run pre-commit run --all-files`
* Run static checks and tests: `poetry run poe test`
---
Created from [smkent/cookie-python][cookie-python] using
[cookiecutter][cookiecutter]
[codecov]: https://codecov.io/gh/smkent/safeway-coupons
[cookie-python]: https://github.com/smkent/cookie-python
[cookiecutter]: https://github.com/cookiecutter/cookiecutter
[gh-actions]: https://github.com/smkent/safeway-coupons/actions?query=branch%3Amain
[pipx]: https://pypa.github.io/pipx/
[poetry]: https://python-poetry.org/docs/#installation
[pypi]: https://pypi.org/project/safeway-coupons/
[repo]: https://github.com/smkent/safeway-coupons
[requests]: https://requests.readthedocs.io/en/latest/
| /safeway_coupons-0.2.6.tar.gz/safeway_coupons-0.2.6/README.md | 0.650911 | 0.794026 | README.md | pypi |
from typing import List
from . import messages, tools
from .tools import expect, session
REQUIRED_FIELDS_TRANSACTION = ("inputs", "outputs", "transactions")
REQUIRED_FIELDS_INPUT = ("path", "prev_hash", "prev_index", "type")
@expect(messages.CardanoAddress, field="address")
def get_address(client, address_n, show_display=False):
return client.call(
messages.CardanoGetAddress(address_n=address_n, show_display=show_display)
)
@expect(messages.CardanoPublicKey)
def get_public_key(client, address_n):
return client.call(messages.CardanoGetPublicKey(address_n=address_n))
@session
def sign_tx(
client,
inputs: List[messages.CardanoTxInputType],
outputs: List[messages.CardanoTxOutputType],
transactions: List[bytes],
network,
):
response = client.call(
messages.CardanoSignTx(
inputs=inputs,
outputs=outputs,
transactions_count=len(transactions),
network=network,
)
)
while isinstance(response, messages.CardanoTxRequest):
tx_index = response.tx_index
transaction_data = bytes.fromhex(transactions[tx_index])
ack_message = messages.CardanoTxAck(transaction=transaction_data)
response = client.call(ack_message)
return response
def create_input(input) -> messages.CardanoTxInputType:
if not all(input.get(k) is not None for k in REQUIRED_FIELDS_INPUT):
raise ValueError("The input is missing some fields")
path = input["path"]
return messages.CardanoTxInputType(
address_n=tools.parse_path(path),
prev_hash=bytes.fromhex(input["prev_hash"]),
prev_index=input["prev_index"],
type=input["type"],
)
def create_output(output) -> messages.CardanoTxOutputType:
if not output.get("amount") or not (output.get("address") or output.get("path")):
raise ValueError("The output is missing some fields")
if output.get("path"):
path = output["path"]
return messages.CardanoTxOutputType(
address_n=tools.parse_path(path), amount=int(output["amount"])
)
return messages.CardanoTxOutputType(
address=output["address"], amount=int(output["amount"])
) | /safewise-1.1.0.tar.gz/safewise-1.1.0/safewiselib/cardano.py | 0.887168 | 0.228705 | cardano.py | pypi |
import os
import click
from mnemonic import Mnemonic
from . import device
from .exceptions import Cancelled
from .messages import PinMatrixRequestType, WordRequestType
PIN_MATRIX_DESCRIPTION = """
Use the numeric keypad to describe number positions. The layout is:
7 8 9
4 5 6
1 2 3
""".strip()
RECOVERY_MATRIX_DESCRIPTION = """
Use the numeric keypad to describe positions.
For the word list use only left and right keys.
Use backspace to correct an entry.
The keypad layout is:
7 8 9 7 | 9
4 5 6 4 | 6
1 2 3 1 | 3
""".strip()
PIN_GENERIC = None
PIN_CURRENT = PinMatrixRequestType.Current
PIN_NEW = PinMatrixRequestType.NewFirst
PIN_CONFIRM = PinMatrixRequestType.NewSecond
def echo(*args, **kwargs):
return click.echo(*args, err=True, **kwargs)
def prompt(*args, **kwargs):
return click.prompt(*args, err=True, **kwargs)
class ClickUI:
def __init__(self, always_prompt=False):
self.pinmatrix_shown = False
self.prompt_shown = False
self.always_prompt = always_prompt
def button_request(self, code):
if not self.prompt_shown:
echo("Please confirm action on your SafeWISE device")
if not self.always_prompt:
self.prompt_shown = True
def get_pin(self, code=None):
if code == PIN_CURRENT:
desc = "current PIN"
elif code == PIN_NEW:
desc = "new PIN"
elif code == PIN_CONFIRM:
desc = "new PIN again"
else:
desc = "PIN"
if not self.pinmatrix_shown:
echo(PIN_MATRIX_DESCRIPTION)
if not self.always_prompt:
self.pinmatrix_shown = True
while True:
try:
pin = prompt("Please enter {}".format(desc), hide_input=True)
except click.Abort:
raise Cancelled from None
if not pin.isdigit():
echo("Non-numerical PIN provided, please try again")
else:
return pin
def get_passphrase(self):
if os.getenv("PASSPHRASE") is not None:
echo("Passphrase required. Using PASSPHRASE environment variable.")
return os.getenv("PASSPHRASE")
while True:
try:
passphrase = prompt(
"Passphrase required",
hide_input=True,
default="",
show_default=False,
)
second = prompt(
"Confirm your passphrase",
hide_input=True,
default="",
show_default=False,
)
if passphrase == second:
return passphrase
else:
echo("Passphrase did not match. Please try again.")
except click.Abort:
raise Cancelled from None
def mnemonic_words(expand=False, language="english"):
if expand:
wordlist = Mnemonic(language).wordlist
else:
wordlist = set()
def expand_word(word):
if not expand:
return word
if word in wordlist:
return word
matches = [w for w in wordlist if w.startswith(word)]
if len(matches) == 1:
return word
echo("Choose one of: " + ", ".join(matches))
raise KeyError(word)
def get_word(type):
assert type == WordRequestType.Plain
while True:
try:
word = prompt("Enter one word of mnemonic")
return expand_word(word)
except KeyError:
pass
except click.Abort:
raise Cancelled from None
return get_word
def matrix_words(type):
while True:
try:
ch = click.getchar()
except (KeyboardInterrupt, EOFError):
raise Cancelled from None
if ch in "\x04\x1b":
# Ctrl+D, Esc
raise Cancelled
if ch in "\x08\x7f":
# Backspace, Del
return device.RECOVERY_BACK
if type == WordRequestType.Matrix6 and ch in "147369":
return ch
if type == WordRequestType.Matrix9 and ch in "123456789":
return ch | /safewise-1.1.0.tar.gz/safewise-1.1.0/safewiselib/ui.py | 0.424651 | 0.233914 | ui.py | pypi |
import hashlib
from typing import NewType, Tuple
Point = NewType("Point", Tuple[int, int, int, int])
__version__ = "1.0.dev1"
b = 256
q = 2 ** 255 - 19
l = 2 ** 252 + 27742317777372353535851937790883648493
COORD_MASK = ~(1 + 2 + 4 + (1 << b - 1))
COORD_HIGH_BIT = 1 << b - 2
def H(m: bytes) -> bytes:
return hashlib.sha512(m).digest()
def pow2(x: int, p: int) -> int:
"""== pow(x, 2**p, q)"""
while p > 0:
x = x * x % q
p -= 1
return x
def inv(z: int) -> int:
"""$= z^{-1} mod q$, for z != 0"""
# Adapted from curve25519_athlon.c in djb's Curve25519.
z2 = z * z % q # 2
z9 = pow2(z2, 2) * z % q # 9
z11 = z9 * z2 % q # 11
z2_5_0 = (z11 * z11) % q * z9 % q # 31 == 2^5 - 2^0
z2_10_0 = pow2(z2_5_0, 5) * z2_5_0 % q # 2^10 - 2^0
z2_20_0 = pow2(z2_10_0, 10) * z2_10_0 % q # ...
z2_40_0 = pow2(z2_20_0, 20) * z2_20_0 % q
z2_50_0 = pow2(z2_40_0, 10) * z2_10_0 % q
z2_100_0 = pow2(z2_50_0, 50) * z2_50_0 % q
z2_200_0 = pow2(z2_100_0, 100) * z2_100_0 % q
z2_250_0 = pow2(z2_200_0, 50) * z2_50_0 % q # 2^250 - 2^0
return pow2(z2_250_0, 5) * z11 % q # 2^255 - 2^5 + 11 = q - 2
d = -121665 * inv(121666) % q
I = pow(2, (q - 1) // 4, q)
def xrecover(y: int) -> int:
xx = (y * y - 1) * inv(d * y * y + 1)
x = pow(xx, (q + 3) // 8, q)
if (x * x - xx) % q != 0:
x = (x * I) % q
if x % 2 != 0:
x = q - x
return x
By = 4 * inv(5)
Bx = xrecover(By)
B = Point((Bx % q, By % q, 1, (Bx * By) % q))
ident = Point((0, 1, 1, 0))
def edwards_add(P: Point, Q: Point) -> Point:
# This is formula sequence 'addition-add-2008-hwcd-3' from
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
(x1, y1, z1, t1) = P
(x2, y2, z2, t2) = Q
a = (y1 - x1) * (y2 - x2) % q
b = (y1 + x1) * (y2 + x2) % q
c = t1 * 2 * d * t2 % q
dd = z1 * 2 * z2 % q
e = b - a
f = dd - c
g = dd + c
h = b + a
x3 = e * f
y3 = g * h
t3 = e * h
z3 = f * g
return Point((x3 % q, y3 % q, z3 % q, t3 % q))
def edwards_double(P: Point) -> Point:
# This is formula sequence 'dbl-2008-hwcd' from
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
(x1, y1, z1, _) = P
a = x1 * x1 % q
b = y1 * y1 % q
c = 2 * z1 * z1 % q
# dd = -a
e = ((x1 + y1) * (x1 + y1) - a - b) % q
g = -a + b # dd + b
f = g - c
h = -a - b # dd - b
x3 = e * f
y3 = g * h
t3 = e * h
z3 = f * g
return Point((x3 % q, y3 % q, z3 % q, t3 % q))
def scalarmult(P: Point, e: int) -> Point:
if e == 0:
return ident
Q = scalarmult(P, e // 2)
Q = edwards_double(Q)
if e & 1:
Q = edwards_add(Q, P)
return Q
# Bpow[i] == scalarmult(B, 2**i)
Bpow = [] # type: List[Point]
def make_Bpow() -> None:
P = B
for _ in range(253):
Bpow.append(P)
P = edwards_double(P)
make_Bpow()
def scalarmult_B(e: int) -> Point:
"""
Implements scalarmult(B, e) more efficiently.
"""
# scalarmult(B, l) is the identity
e = e % l
P = ident
for i in range(253):
if e & 1:
P = edwards_add(P, Bpow[i])
e = e // 2
assert e == 0, e
return P
def encodeint(y: int) -> bytes:
return y.to_bytes(b // 8, "little")
def encodepoint(P: Point) -> bytes:
(x, y, z, _) = P
zi = inv(z)
x = (x * zi) % q
y = (y * zi) % q
xbit = (x & 1) << (b - 1)
y_result = y & ~xbit # clear x bit
y_result |= xbit # set corret x bit value
return encodeint(y_result)
def decodeint(s: bytes) -> int:
return int.from_bytes(s, "little")
def decodepoint(s: bytes) -> Point:
y = decodeint(s) & ~(1 << b - 1) # y without the highest bit
x = xrecover(y)
if x & 1 != bit(s, b - 1):
x = q - x
P = Point((x, y, 1, (x * y) % q))
if not isoncurve(P):
raise ValueError("decoding point that is not on curve")
return P
def decodecoord(s: bytes) -> int:
a = decodeint(s[: b // 8])
# clear mask bits
a &= COORD_MASK
# set high bit
a |= COORD_HIGH_BIT
return a
def bit(h: bytes, i: int) -> int:
return (h[i // 8] >> (i % 8)) & 1
def publickey_unsafe(sk: bytes) -> bytes:
"""
Not safe to use with secret keys or secret data.
See module docstring. This function should be used for testing only.
"""
h = H(sk)
a = decodecoord(h)
A = scalarmult_B(a)
return encodepoint(A)
def Hint(m: bytes) -> int:
return decodeint(H(m))
def signature_unsafe(m: bytes, sk: bytes, pk: bytes) -> bytes:
"""
Not safe to use with secret keys or secret data.
See module docstring. This function should be used for testing only.
"""
h = H(sk)
a = decodecoord(h)
r = Hint(h[b // 8 : b // 4] + m)
R = scalarmult_B(r)
S = (r + Hint(encodepoint(R) + pk + m) * a) % l
return encodepoint(R) + encodeint(S)
def isoncurve(P: Point) -> bool:
(x, y, z, t) = P
return (
z % q != 0
and x * y % q == z * t % q
and (y * y - x * x - z * z - d * t * t) % q == 0
)
class SignatureMismatch(Exception):
pass
def checkvalid(s: bytes, m: bytes, pk: bytes) -> None:
"""
Not safe to use when any argument is secret.
See module docstring. This function should be used only for
verifying public signatures of public messages.
"""
if len(s) != b // 4:
raise ValueError("signature length is wrong")
if len(pk) != b // 8:
raise ValueError("public-key length is wrong")
R = decodepoint(s[: b // 8])
A = decodepoint(pk)
S = decodeint(s[b // 8 : b // 4])
h = Hint(encodepoint(R) + pk + m)
(x1, y1, z1, _) = P = scalarmult_B(S)
(x2, y2, z2, _) = Q = edwards_add(R, scalarmult(A, h))
if (
not isoncurve(P)
or not isoncurve(Q)
or (x1 * z2 - x2 * z1) % q != 0
or (y1 * z2 - y2 * z1) % q != 0
):
raise SignatureMismatch("signature does not pass verification") | /safewise-1.1.0.tar.gz/safewise-1.1.0/safewiselib/_ed25519.py | 0.792946 | 0.535038 | _ed25519.py | pypi |
import os
import warnings
from . import messages as proto
from .exceptions import Cancelled
from .tools import expect, session
from .transport import enumerate_devices, get_transport
RECOVERY_BACK = "\x08" # backspace character, sent literally
class SafeWISEDevice:
"""
This class is deprecated. (There is no reason for it to exist in the first
place, it is nothing but a collection of two functions.)
Instead, please use functions from the ``safewiselib.transport`` module.
"""
@classmethod
def enumerate(cls):
warnings.warn("SafeWISEDevice is deprecated.", DeprecationWarning)
return enumerate_devices()
@classmethod
def find_by_path(cls, path):
warnings.warn("SafeWISEDevice is deprecated.", DeprecationWarning)
return get_transport(path, prefix_search=False)
@expect(proto.Success, field="message")
def apply_settings(
client,
label=None,
language=None,
use_passphrase=None,
homescreen=None,
passphrase_source=None,
auto_lock_delay_ms=None,
):
settings = proto.ApplySettings()
if label is not None:
settings.label = label
if language:
settings.language = language
if use_passphrase is not None:
settings.use_passphrase = use_passphrase
if homescreen is not None:
settings.homescreen = homescreen
if passphrase_source is not None:
settings.passphrase_source = passphrase_source
if auto_lock_delay_ms is not None:
settings.auto_lock_delay_ms = auto_lock_delay_ms
out = client.call(settings)
client.init_device() # Reload Features
return out
@expect(proto.Success, field="message")
def apply_flags(client, flags):
out = client.call(proto.ApplyFlags(flags=flags))
client.init_device() # Reload Features
return out
@expect(proto.Success, field="message")
def change_pin(client, remove=False):
ret = client.call(proto.ChangePin(remove=remove))
client.init_device() # Re-read features
return ret
@expect(proto.Success, field="message")
def set_u2f_counter(client, u2f_counter):
ret = client.call(proto.SetU2FCounter(u2f_counter=u2f_counter))
return ret
@expect(proto.Success, field="message")
def wipe(client):
ret = client.call(proto.WipeDevice())
client.init_device()
return ret
@expect(proto.Success, field="message")
def recover(
client,
word_count=24,
passphrase_protection=False,
pin_protection=True,
label=None,
language="english",
input_callback=None,
type=proto.RecoveryDeviceType.ScrambledWords,
dry_run=False,
):
if client.features.model == "1" and input_callback is None:
raise RuntimeError("Input callback required for SafeWISE")
if word_count not in (12, 18, 24):
raise ValueError("Invalid word count. Use 12/18/24")
if client.features.initialized and not dry_run:
raise RuntimeError(
"Device already initialized. Call device.wipe() and try again."
)
res = client.call(
proto.RecoveryDevice(
word_count=word_count,
passphrase_protection=bool(passphrase_protection),
pin_protection=bool(pin_protection),
label=label,
language=language,
enforce_wordlist=True,
type=type,
dry_run=dry_run,
)
)
while isinstance(res, proto.WordRequest):
try:
inp = input_callback(res.type)
res = client.call(proto.WordAck(word=inp))
except Cancelled:
res = client.call(proto.Cancel())
client.init_device()
return res
@expect(proto.Success, field="message")
@session
def reset(
client,
display_random=False,
strength=None,
passphrase_protection=False,
pin_protection=True,
label=None,
language="english",
u2f_counter=0,
skip_backup=False,
no_backup=False,
):
if client.features.initialized:
raise RuntimeError(
"Device is initialized already. Call wipe_device() and try again."
)
if strength is None:
if client.features.model == "1":
strength = 256
else:
strength = 128
# Begin with device reset workflow
msg = proto.ResetDevice(
display_random=bool(display_random),
strength=strength,
passphrase_protection=bool(passphrase_protection),
pin_protection=bool(pin_protection),
language=language,
label=label,
u2f_counter=u2f_counter,
skip_backup=bool(skip_backup),
no_backup=bool(no_backup),
)
resp = client.call(msg)
if not isinstance(resp, proto.EntropyRequest):
raise RuntimeError("Invalid response, expected EntropyRequest")
external_entropy = os.urandom(32)
# LOG.debug("Computer generated entropy: " + external_entropy.hex())
ret = client.call(proto.EntropyAck(entropy=external_entropy))
client.init_device()
return ret
@expect(proto.Success, field="message")
def backup(client):
ret = client.call(proto.BackupDevice())
return ret | /safewise-1.1.0.tar.gz/safewise-1.1.0/safewiselib/device.py | 0.592313 | 0.183887 | device.py | pypi |
from .. import protobuf as p
from .TxInputType import TxInputType
from .TxOutputBinType import TxOutputBinType
from .TxOutputType import TxOutputType
if __debug__:
try:
from typing import List
except ImportError:
List = None # type: ignore
class TransactionType(p.MessageType):
def __init__(
self,
version: int = None,
inputs: List[TxInputType] = None,
bin_outputs: List[TxOutputBinType] = None,
lock_time: int = None,
outputs: List[TxOutputType] = None,
inputs_cnt: int = None,
outputs_cnt: int = None,
extra_data: bytes = None,
extra_data_len: int = None,
expiry: int = None,
overwintered: bool = None,
version_group_id: int = None,
timestamp: int = None,
) -> None:
self.version = version
self.inputs = inputs if inputs is not None else []
self.bin_outputs = bin_outputs if bin_outputs is not None else []
self.lock_time = lock_time
self.outputs = outputs if outputs is not None else []
self.inputs_cnt = inputs_cnt
self.outputs_cnt = outputs_cnt
self.extra_data = extra_data
self.extra_data_len = extra_data_len
self.expiry = expiry
self.overwintered = overwintered
self.version_group_id = version_group_id
self.timestamp = timestamp
@classmethod
def get_fields(cls):
return {
1: ('version', p.UVarintType, 0),
2: ('inputs', TxInputType, p.FLAG_REPEATED),
3: ('bin_outputs', TxOutputBinType, p.FLAG_REPEATED),
4: ('lock_time', p.UVarintType, 0),
5: ('outputs', TxOutputType, p.FLAG_REPEATED),
6: ('inputs_cnt', p.UVarintType, 0),
7: ('outputs_cnt', p.UVarintType, 0),
8: ('extra_data', p.BytesType, 0),
9: ('extra_data_len', p.UVarintType, 0),
10: ('expiry', p.UVarintType, 0),
11: ('overwintered', p.BoolType, 0),
12: ('version_group_id', p.UVarintType, 0),
13: ('timestamp', p.UVarintType, 0),
} | /safewise-1.1.0.tar.gz/safewise-1.1.0/safewiselib/messages/TransactionType.py | 0.54577 | 0.158597 | TransactionType.py | pypi |
from .. import protobuf as p
from .MultisigRedeemScriptType import MultisigRedeemScriptType
if __debug__:
try:
from typing import List
except ImportError:
List = None # type: ignore
class TxInputType(p.MessageType):
def __init__(
self,
address_n: List[int] = None,
prev_hash: bytes = None,
prev_index: int = None,
script_sig: bytes = None,
sequence: int = None,
script_type: int = None,
multisig: MultisigRedeemScriptType = None,
amount: int = None,
decred_tree: int = None,
decred_script_version: int = None,
prev_block_hash_bip115: bytes = None,
prev_block_height_bip115: int = None,
) -> None:
self.address_n = address_n if address_n is not None else []
self.prev_hash = prev_hash
self.prev_index = prev_index
self.script_sig = script_sig
self.sequence = sequence
self.script_type = script_type
self.multisig = multisig
self.amount = amount
self.decred_tree = decred_tree
self.decred_script_version = decred_script_version
self.prev_block_hash_bip115 = prev_block_hash_bip115
self.prev_block_height_bip115 = prev_block_height_bip115
@classmethod
def get_fields(cls):
return {
1: ('address_n', p.UVarintType, p.FLAG_REPEATED),
2: ('prev_hash', p.BytesType, 0), # required
3: ('prev_index', p.UVarintType, 0), # required
4: ('script_sig', p.BytesType, 0),
5: ('sequence', p.UVarintType, 0), # default=4294967295
6: ('script_type', p.UVarintType, 0), # default=SPENDADDRESS
7: ('multisig', MultisigRedeemScriptType, 0),
8: ('amount', p.UVarintType, 0),
9: ('decred_tree', p.UVarintType, 0),
10: ('decred_script_version', p.UVarintType, 0),
11: ('prev_block_hash_bip115', p.BytesType, 0),
12: ('prev_block_height_bip115', p.UVarintType, 0),
} | /safewise-1.1.0.tar.gz/safewise-1.1.0/safewiselib/messages/TxInputType.py | 0.565539 | 0.158207 | TxInputType.py | pypi |
from .. import protobuf as p
if __debug__:
try:
from typing import List
except ImportError:
List = None # type: ignore
class NEMMosaicDefinition(p.MessageType):
def __init__(
self,
name: str = None,
ticker: str = None,
namespace: str = None,
mosaic: str = None,
divisibility: int = None,
levy: int = None,
fee: int = None,
levy_address: str = None,
levy_namespace: str = None,
levy_mosaic: str = None,
supply: int = None,
mutable_supply: bool = None,
transferable: bool = None,
description: str = None,
networks: List[int] = None,
) -> None:
self.name = name
self.ticker = ticker
self.namespace = namespace
self.mosaic = mosaic
self.divisibility = divisibility
self.levy = levy
self.fee = fee
self.levy_address = levy_address
self.levy_namespace = levy_namespace
self.levy_mosaic = levy_mosaic
self.supply = supply
self.mutable_supply = mutable_supply
self.transferable = transferable
self.description = description
self.networks = networks if networks is not None else []
@classmethod
def get_fields(cls):
return {
1: ('name', p.UnicodeType, 0),
2: ('ticker', p.UnicodeType, 0),
3: ('namespace', p.UnicodeType, 0),
4: ('mosaic', p.UnicodeType, 0),
5: ('divisibility', p.UVarintType, 0),
6: ('levy', p.UVarintType, 0),
7: ('fee', p.UVarintType, 0),
8: ('levy_address', p.UnicodeType, 0),
9: ('levy_namespace', p.UnicodeType, 0),
10: ('levy_mosaic', p.UnicodeType, 0),
11: ('supply', p.UVarintType, 0),
12: ('mutable_supply', p.BoolType, 0),
13: ('transferable', p.BoolType, 0),
14: ('description', p.UnicodeType, 0),
15: ('networks', p.UVarintType, p.FLAG_REPEATED),
} | /safewise-1.1.0.tar.gz/safewise-1.1.0/safewiselib/messages/NEMMosaicDefinition.py | 0.546738 | 0.168617 | NEMMosaicDefinition.py | pypi |
import logging
import sys
import time
from typing import Any, Dict, Iterable
from . import DEV_SafeWISE1, UDEV_RULES_STR, TransportException
from .protocol import ProtocolBasedTransport, ProtocolV1
LOG = logging.getLogger(__name__)
try:
import hid
except Exception as e:
LOG.info("HID transport is disabled: {}".format(e))
hid = None
HidDevice = Dict[str, Any]
HidDeviceHandle = Any
class HidHandle:
def __init__(
self, path: bytes, serial: str, probe_hid_version: bool = False
) -> None:
self.path = path
self.serial = serial
self.handle = None # type: HidDeviceHandle
self.hid_version = None if probe_hid_version else 2
def open(self) -> None:
self.handle = hid.device()
try:
self.handle.open_path(self.path)
except (IOError, OSError) as e:
if sys.platform.startswith("linux"):
e.args = e.args + (UDEV_RULES_STR,)
raise e
# On some platforms, HID path stays the same over device reconnects.
# That means that someone could unplug a SafeWISE, plug a different one
# and we wouldn't even know.
# So we check that the serial matches what we expect.
serial = self.handle.get_serial_number_string()
if serial != self.serial:
self.handle.close()
self.handle = None
raise TransportException(
"Unexpected device {} on path {}".format(serial, self.path.decode())
)
self.handle.set_nonblocking(True)
if self.hid_version is None:
self.hid_version = self.probe_hid_version()
def close(self) -> None:
if self.handle is not None:
# reload serial, because device.wipe() can reset it
self.serial = self.handle.get_serial_number_string()
self.handle.close()
self.handle = None
def write_chunk(self, chunk: bytes) -> None:
if len(chunk) != 64:
raise TransportException("Unexpected chunk size: %d" % len(chunk))
if self.hid_version == 2:
self.handle.write(b"\0" + bytearray(chunk))
else:
self.handle.write(chunk)
def read_chunk(self) -> bytes:
while True:
chunk = self.handle.read(64)
if chunk:
break
else:
time.sleep(0.001)
if len(chunk) != 64:
raise TransportException("Unexpected chunk size: %d" % len(chunk))
return bytes(chunk)
def probe_hid_version(self) -> int:
n = self.handle.write([0, 63] + [0xFF] * 63)
if n == 65:
return 2
n = self.handle.write([63] + [0xFF] * 63)
if n == 64:
return 1
raise TransportException("Unknown HID version")
class HidTransport(ProtocolBasedTransport):
"""
HidTransport implements transport over USB HID interface.
"""
PATH_PREFIX = "hid"
ENABLED = hid is not None
def __init__(self, device: HidDevice) -> None:
self.device = device
self.handle = HidHandle(device["path"], device["serial_number"])
protocol = ProtocolV1(self.handle)
super().__init__(protocol=protocol)
def get_path(self) -> str:
return "%s:%s" % (self.PATH_PREFIX, self.device["path"].decode())
@classmethod
def enumerate(cls, debug: bool = False) -> Iterable["HidTransport"]:
devices = []
for dev in hid.enumerate(0, 0):
usb_id = (dev["vendor_id"], dev["product_id"])
if usb_id != DEV_SafeWISE1:
continue
if debug:
if not is_debuglink(dev):
continue
else:
if not is_wirelink(dev):
continue
devices.append(HidTransport(dev))
return devices
def find_debug(self) -> "HidTransport":
if self.protocol.VERSION >= 2:
# use the same device
return self
else:
# For v1 protocol, find debug USB interface for the same serial number
for debug in HidTransport.enumerate(debug=True):
if debug.device["serial_number"] == self.device["serial_number"]:
return debug
raise TransportException("Debug HID device not found")
def is_wirelink(dev: HidDevice) -> bool:
return dev["usage_page"] == 0xFF00 or dev["interface_number"] == 0
def is_debuglink(dev: HidDevice) -> bool:
return dev["usage_page"] == 0xFF01 or dev["interface_number"] == 1 | /safewise-1.1.0.tar.gz/safewise-1.1.0/safewiselib/transport/hid.py | 0.462473 | 0.150091 | hid.py | pypi |
import logging
import os
import struct
from io import BytesIO
from typing import Tuple
from typing_extensions import Protocol as StructuralType
from . import Transport
from .. import mapping, protobuf
REPLEN = 64
V2_FIRST_CHUNK = 0x01
V2_NEXT_CHUNK = 0x02
V2_BEGIN_SESSION = 0x03
V2_END_SESSION = 0x04
LOG = logging.getLogger(__name__)
class Handle(StructuralType):
"""PEP 544 structural type for Handle functionality.
(called a "Protocol" in the proposed PEP, name which is impractical here)
Handle is a "physical" layer for a protocol.
It can open/close a connection and read/write bare data in 64-byte chunks.
Functionally we gain nothing from making this an (abstract) base class for handle
implementations, so this definition is for type hinting purposes only. You can,
but don't have to, inherit from it.
"""
def open(self) -> None:
...
def close(self) -> None:
...
def read_chunk(self) -> bytes:
...
def write_chunk(self, chunk: bytes) -> None:
...
class Protocol:
"""Wire protocol that can communicate with a SafeWISE device, given a Handle.
A Protocol implements the part of the Transport API that relates to communicating
logical messages over a physical layer. It is a thing that can:
- open and close sessions,
- send and receive protobuf messages,
given the ability to:
- open and close physical connections,
- and send and receive binary chunks.
We declare a protocol version (we have implementations of v1 and v2).
For now, the class also handles session counting and opening the underlying Handle.
This will probably be removed in the future.
We will need a new Protocol class if we change the way a SafeWISE device encapsulates
its messages.
"""
VERSION = None # type: int
def __init__(self, handle: Handle) -> None:
self.handle = handle
self.session_counter = 0
# XXX we might be able to remove this now that SafeWISEClient does session handling
def begin_session(self) -> None:
if self.session_counter == 0:
self.handle.open()
self.session_counter += 1
def end_session(self) -> None:
if self.session_counter == 1:
self.handle.close()
self.session_counter -= 1
def read(self) -> protobuf.MessageType:
raise NotImplementedError
def write(self, message: protobuf.MessageType) -> None:
raise NotImplementedError
class ProtocolBasedTransport(Transport):
"""Transport that implements its communications through a Protocol.
Intended as a base class for implementations that proxy their communication
operations to a Protocol.
"""
def __init__(self, protocol: Protocol) -> None:
self.protocol = protocol
def write(self, message: protobuf.MessageType) -> None:
self.protocol.write(message)
def read(self) -> protobuf.MessageType:
return self.protocol.read()
def begin_session(self) -> None:
self.protocol.begin_session()
def end_session(self) -> None:
self.protocol.end_session()
class ProtocolV1(Protocol):
"""Protocol version 1. Currently (11/2018) in use on all SafeWISEs.
Does not understand sessions.
"""
VERSION = 1
def write(self, msg: protobuf.MessageType) -> None:
LOG.debug(
"sending message: {}".format(msg.__class__.__name__),
extra={"protobuf": msg},
)
data = BytesIO()
protobuf.dump_message(data, msg)
ser = data.getvalue()
header = struct.pack(">HL", mapping.get_type(msg), len(ser))
buffer = bytearray(b"##" + header + ser)
while buffer:
# Report ID, data padded to 63 bytes
chunk = b"?" + buffer[: REPLEN - 1]
chunk = chunk.ljust(REPLEN, b"\x00")
self.handle.write_chunk(chunk)
buffer = buffer[63:]
def read(self) -> protobuf.MessageType:
buffer = bytearray()
# Read header with first part of message data
msg_type, datalen, first_chunk = self.read_first()
buffer.extend(first_chunk)
# Read the rest of the message
while len(buffer) < datalen:
buffer.extend(self.read_next())
# Strip padding
data = BytesIO(buffer[:datalen])
# Parse to protobuf
msg = protobuf.load_message(data, mapping.get_class(msg_type))
LOG.debug(
"received message: {}".format(msg.__class__.__name__),
extra={"protobuf": msg},
)
return msg
def read_first(self) -> Tuple[int, int, bytes]:
chunk = self.handle.read_chunk()
if chunk[:3] != b"?##":
raise RuntimeError("Unexpected magic characters")
try:
headerlen = struct.calcsize(">HL")
msg_type, datalen = struct.unpack(">HL", chunk[3 : 3 + headerlen])
except Exception:
raise RuntimeError("Cannot parse header")
data = chunk[3 + headerlen :]
return msg_type, datalen, data
def read_next(self) -> bytes:
chunk = self.handle.read_chunk()
if chunk[:1] != b"?":
raise RuntimeError("Unexpected magic characters")
return chunk[1:]
class ProtocolV2(Protocol):
"""Protocol version 2. Currently (11/2018) not used.
Intended to mimic U2F/WebAuthN session handling.
"""
VERSION = 2
def __init__(self, handle: Handle) -> None:
self.session = None
super().__init__(handle)
def begin_session(self) -> None:
# ensure open connection
super().begin_session()
# initiate session
chunk = struct.pack(">B", V2_BEGIN_SESSION)
chunk = chunk.ljust(REPLEN, b"\x00")
self.handle.write_chunk(chunk)
# get session identifier
resp = self.handle.read_chunk()
try:
headerlen = struct.calcsize(">BL")
magic, session = struct.unpack(">BL", resp[:headerlen])
except Exception:
raise RuntimeError("Cannot parse header")
if magic != V2_BEGIN_SESSION:
raise RuntimeError("Unexpected magic character")
self.session = session
LOG.debug("[session {}] session started".format(self.session))
def end_session(self) -> None:
if not self.session:
return
try:
chunk = struct.pack(">BL", V2_END_SESSION, self.session)
chunk = chunk.ljust(REPLEN, b"\x00")
self.handle.write_chunk(chunk)
resp = self.handle.read_chunk()
(magic,) = struct.unpack(">B", resp[:1])
if magic != V2_END_SESSION:
raise RuntimeError("Expected session close")
LOG.debug("[session {}] session ended".format(self.session))
finally:
self.session = None
# close connection if appropriate
super().end_session()
def write(self, msg: protobuf.MessageType) -> None:
if not self.session:
raise RuntimeError("Missing session for v2 protocol")
LOG.debug(
"[session {}] sending message: {}".format(
self.session, msg.__class__.__name__
),
extra={"protobuf": msg},
)
# Serialize whole message
data = BytesIO()
protobuf.dump_message(data, msg)
data = data.getvalue()
dataheader = struct.pack(">LL", mapping.get_type(msg), len(data))
data = dataheader + data
seq = -1
# Write it out
while data:
if seq < 0:
repheader = struct.pack(">BL", V2_FIRST_CHUNK, self.session)
else:
repheader = struct.pack(">BLL", V2_NEXT_CHUNK, self.session, seq)
datalen = REPLEN - len(repheader)
chunk = repheader + data[:datalen]
chunk = chunk.ljust(REPLEN, b"\x00")
self.handle.write_chunk(chunk)
data = data[datalen:]
seq += 1
def read(self) -> protobuf.MessageType:
if not self.session:
raise RuntimeError("Missing session for v2 protocol")
buffer = bytearray()
# Read header with first part of message data
msg_type, datalen, chunk = self.read_first()
buffer.extend(chunk)
# Read the rest of the message
while len(buffer) < datalen:
next_chunk = self.read_next()
buffer.extend(next_chunk)
# Strip padding
buffer = BytesIO(buffer[:datalen])
# Parse to protobuf
msg = protobuf.load_message(buffer, mapping.get_class(msg_type))
LOG.debug(
"[session {}] received message: {}".format(
self.session, msg.__class__.__name__
),
extra={"protobuf": msg},
)
return msg
def read_first(self) -> Tuple[int, int, bytes]:
chunk = self.handle.read_chunk()
try:
headerlen = struct.calcsize(">BLLL")
magic, session, msg_type, datalen = struct.unpack(
">BLLL", chunk[:headerlen]
)
except Exception:
raise RuntimeError("Cannot parse header")
if magic != V2_FIRST_CHUNK:
raise RuntimeError("Unexpected magic character")
if session != self.session:
raise RuntimeError("Session id mismatch")
return msg_type, datalen, chunk[headerlen:]
def read_next(self) -> bytes:
chunk = self.handle.read_chunk()
try:
headerlen = struct.calcsize(">BLL")
magic, session, sequence = struct.unpack(">BLL", chunk[:headerlen])
except Exception:
raise RuntimeError("Cannot parse header")
if magic != V2_NEXT_CHUNK:
raise RuntimeError("Unexpected magic characters")
if session != self.session:
raise RuntimeError("Session id mismatch")
return chunk[headerlen:]
def get_protocol(handle: Handle, want_v2: bool) -> Protocol:
"""Make a Protocol instance for the given handle.
Each transport can have a preference for using a particular protocol version.
This preference is overridable through `SafeWISE_PROTOCOL_V1` environment variable,
which forces the library to use V1 anyways.
As of 11/2018, no devices support V2, so we enforce V1 here. It is still possible
to set `SafeWISE_PROTOCOL_V1=0` and thus enable V2 protocol for transports that ask
for it (i.e., USB transports for SafeWISE T).
"""
force_v1 = int(os.environ.get("SafeWISE_PROTOCOL_V1", 1))
if want_v2 and not force_v1:
return ProtocolV2(handle)
else:
return ProtocolV1(handle) | /safewise-1.1.0.tar.gz/safewise-1.1.0/safewiselib/transport/protocol.py | 0.700075 | 0.295936 | protocol.py | pypi |
# Coin Definitions
We currently recognize five categories of coins.
#### `bitcoin`
The [`bitcoin/`](bitcoin) subdirectory contains definitions for Bitcoin and altcoins
based on Bitcoin code. The `coins/` subdirectory is a compatibility link to `bitcoin`.
Each Bitcoin-like coin must have a single JSON file in the `bitcoin/` subdirectory,
and a corresponding PNG image with the same name. The PNG must be 96x96 pixels and
the picture must be a circle suitable for displaying on SafeWISE T.
Testnet is considered a separate coin, so it must have its own JSON and icon.
We will not support coins that have `address_type` 0, i.e., same as Bitcoin.
#### `eth`
The file [`ethereum/networks.json`](ethereum/networks.json) has a list of descriptions
of Ethereum networks. Each network must also have a PNG icon in `ethereum/<chain>.png`
file.
#### `erc20`
`ethereum/tokens` is a submodule linking to [Ethereum Lists](https://github.com/ethereum-lists/tokens)
project with descriptions of ERC20 tokens. If you want to add or update a token
definition in SafeWISE, you need to get your change to the `tokens` repository first.
SafeWISE will only support tokens that have a unique symbol.
#### `nem`
The file [`nem/nem_mosaics.json`](nem/nem_mosaics.json) describes NEM mosaics.
#### `misc`
Supported coins that are not derived from Bitcoin, Ethereum or NEM are currently grouped
and listed in separate file [`misc/misc.json`](misc/misc.json). Each coin must also have
an icon in `misc/<short>.png`, where `short` is lowercased `shortcut` field from the JSON.
## Keys
Throughout the system, coins are identified by a _key_ - a colon-separated string
generated from the coin's type and shortcut:
* for Bitcoin-likes, key is `bitcoin:XYZ`
* for Ethereum networks, key is `eth:XYZ`
* for ERC20 tokens, key is `erc20:<chain>:XYZ`
* for NEM mosaic, key is `nem:XYZ`
* for others, key is `misc:XYZ`
If a token shortcut has a suffix, such as `CAT (BlockCat)`, the whole thing is part
of the key (so the key is `erc20:eth:CAT (BlockCat)`).
Sometimes coins end up with duplicate symbols, which in case of ERC20 tokens leads to
key collisions. We do not allow duplicate symbols in the data, so this doesn't affect
everyday use (see below). However, for validation purposes, it is sometimes useful
to work with unfiltered data that includes the duplicates. In such cases, keys are
deduplicated by adding a counter at end, e.g.: `erc20:eth:SMT:0`, `erc20:eth:SMT:1`.
Note that the suffix _is not stable_, so these coins can't be reliably uniquely identified.
## Duplicate Detection
**Duplicate symbols are not allowed** in our data. Tokens that have symbol collisions
are removed from the data set before processing. The duplicate status is mentioned
in `support.json` (see below), but it is impossible to override from there.
Duplicate detection works as follows:
1. a _symbol_ is split off from the shortcut string. E.g., for `CAT (BlockCat)`, symbol
is just `CAT`. It is compared, case-insensitive, with other coins (so `WIC` and `WiC`
are considered the same symbol), and identical symbols are put into a _bucket_.
2. if _all_ coins in the bucket also have a suffix (`CAT (BlockCat)` and `CAT (BitClave)`),
they are _not_ considered duplicate.
3. if _any_ coin in the bucket does _not_ have a suffix (`MIT` and `MIT (Mychatcoin)`),
all coins in the bucket are considered duplicate.
4. Duplicate tokens (coins from the `erc20` group) are automatically removed from data.
Duplicate non-tokens are marked but not removed. For instance, `bitcoin:FTC` (Feathercoin)
and `erc20:eth:FTC` (FTC) are duplicate, and `erc20:eth:FTC` is removed.
5. If two non-tokens collide with each other, it is an error that fails the CI build.
The file [`duplicity_overrides.json`](duplicity_overrides.json) can override detection
results: keys set to `true` are considered duplicate (in a separate bucket), keys set
to `false` are considered non-duplicate even if auto-detected. This is useful for
whitelisting a supported token explicitly, or blacklisting things that the detection
can't match (for instance "Battle" and "Bitlle" have suffixes, but they are too similar).
External contributors should not make changes to `duplicity_overrides.json`, unless
asked to.
You can use `./tools/cointool.py check -d all` to inspect duplicate detection in detail.
# Coins Details
The file [`coins_details.json`](coins_details.json) is a list of all known coins
with support status, market cap information and relevant links. This is the source
file for https://safewise.io/coins.
You should never make changes to `coins_details.json` directly. Use `./tools/coins_details.py`
to regenerate it from known data.
If you need to change information in this file, modify the source information instead -
one of the JSON files in the groups listed above, support info in `support.json`, or
make a pull request to the tokens repository.
If this is not viable for some reason, or if there is no source information (such as
links to third-party wallets), you can also edit [`coins_details.override.json`](coins_details.override.json).
External contributors should not touch this file unless asked to.
# Support Information
We keep track of support status of each coin over our devices. That is
`safewise1` for SafeWISE, `safewise2` for SafeWISE T, `connect` for SafeWISE connect
and `webwallet` for [SafeWISE Wallet](https://wallet.safewise.io/). In further description, the word "device"
applies to Connect and webwallet as well.
This information is stored in [`support.json`](support.json).
External contributors should not touch this file unless asked to.
Each coin on each device can be in one of four support states:
* **supported** explicitly: coin's key is listed in the device's `supported`
dictionary. If it's a SafeWISE device, it contains the firmware version from which
it is supported. For connect and webwallet, the value is simply `true`.
* **unsupported** explicitly: coin's key is listed in the device's `unsupported`
dictionary. The value is a string with reason for not supporting.
For connect and webwallet, if the key is not listed at all, it is also considered unsupported.
ERC20 tokens detected as duplicates are also considered unsupported.
* **soon**: coin's key is listed in the device's `supported` dictionary, with
the value `"soon"`.
ERC20 tokens that are not listed at all are also considered `soon`, unless detected
as duplicates.
* **unknown**: coin's key is not listed at all.
_Supported_ and _soon_ coins are used in code generation (i.e., included in built firmware).
_Unsupported_ and _unknown_ coins are excluded from code generation.
That means that new ERC20 tokens are included as soon as you update the tokens repository.
New coin definitions, on the other hand, are not included until someone sets their
support status to _soon_ (or a version) explicitly.
You can edit `support.json` manually, but it is usually better to use the `support.py` tool.
See [tools docs](../tools) for details.
| /safewise-1.1.0.tar.gz/safewise-1.1.0/vendor/safewise-common/defs/README.md | 0.899074 | 0.787564 | README.md | pypi |
import fnmatch
import io
import json
import logging
import re
import sys
import os
import glob
import struct
import zlib
from collections import defaultdict
from hashlib import sha256
import click
import coin_info
from coindef import CoinDef
try:
import termcolor
except ImportError:
termcolor = None
try:
import mako
import mako.template
from munch import Munch
CAN_RENDER = True
except ImportError:
CAN_RENDER = False
try:
import requests
except ImportError:
requests = None
try:
import ed25519
from PIL import Image
from safewiselib import protobuf
CAN_BUILD_DEFS = True
except ImportError:
CAN_BUILD_DEFS = False
# ======= Crayon colors ======
USE_COLORS = False
def crayon(color, string, bold=False, dim=False):
if not termcolor or not USE_COLORS:
return string
else:
if bold:
attrs = ["bold"]
elif dim:
attrs = ["dark"]
else:
attrs = []
return termcolor.colored(string, color, attrs=attrs)
def print_log(level, *args, **kwargs):
prefix = logging.getLevelName(level)
if level == logging.DEBUG:
prefix = crayon("blue", prefix, bold=False)
elif level == logging.INFO:
prefix = crayon("blue", prefix, bold=True)
elif level == logging.WARNING:
prefix = crayon("red", prefix, bold=False)
elif level == logging.ERROR:
prefix = crayon("red", prefix, bold=True)
print(prefix, *args, **kwargs)
# ======= Mako management ======
def c_str_filter(b):
if b is None:
return "NULL"
def hexescape(c):
return r"\x{:02x}".format(c)
if isinstance(b, bytes):
return '"' + "".join(map(hexescape, b)) + '"'
else:
return json.dumps(b)
def black_repr_filter(val):
if isinstance(val, str):
if '"' in val:
return repr(val)
else:
return c_str_filter(val)
elif isinstance(val, bytes):
return "b" + c_str_filter(val)
else:
return repr(val)
def ascii_filter(s):
return re.sub("[^ -\x7e]", "_", s)
def make_support_filter(support_info):
def supported_on(device, coins):
for coin in coins:
if support_info[coin.key].get(device):
yield coin
return supported_on
MAKO_FILTERS = {
"c_str": c_str_filter,
"ascii": ascii_filter,
"black_repr": black_repr_filter,
}
def render_file(src, dst, coins, support_info):
"""Renders `src` template into `dst`.
`src` is a filename, `dst` is an open file object.
"""
template = mako.template.Template(filename=src)
result = template.render(
support_info=support_info,
supported_on=make_support_filter(support_info),
**coins,
**MAKO_FILTERS,
)
dst.write(result)
# ====== validation functions ======
def highlight_key(coin, color):
"""Return a colorful string where the SYMBOL part is bold."""
keylist = coin["key"].split(":")
if keylist[-1].isdigit():
keylist[-2] = crayon(color, keylist[-2], bold=True)
else:
keylist[-1] = crayon(color, keylist[-1], bold=True)
key = crayon(color, ":".join(keylist))
name = crayon(None, "({})".format(coin['name']), dim=True)
return "{} {}".format(key, name)
def find_collisions(coins, field):
"""Detects collisions in a given field. Returns buckets of colliding coins."""
collisions = defaultdict(list)
for coin in coins:
value = coin[field]
collisions[value].append(coin)
return {k: v for k, v in collisions.items() if len(v) > 1}
def check_eth(coins):
check_passed = True
chains = find_collisions(coins, "chain")
for key, bucket in chains.items():
bucket_str = ", ".join("{} ({})".format(coin['key'], coin['name']) for coin in bucket)
chain_name_str = "colliding chain name " + crayon(None, key, bold=True) + ":"
print_log(logging.ERROR, chain_name_str, bucket_str)
check_passed = False
return check_passed
def check_btc(coins):
check_passed = True
support_infos = coin_info.support_info(coins)
# validate individual coin data
for coin in coins:
errors = coin_info.validate_btc(coin)
if errors:
check_passed = False
print_log(logging.ERROR, "invalid definition for", coin["name"])
print("\n".join(errors))
def collision_str(bucket):
"""Generate a colorful string out of a bucket of colliding coins."""
coin_strings = []
for coin in bucket:
name = coin["name"]
prefix = ""
if name.endswith("Testnet"):
color = "green"
elif name == "Bitcoin":
color = "red"
elif coin.get("unsupported"):
color = "grey"
prefix = crayon("blue", "(X)", bold=True)
else:
color = "blue"
hl = highlight_key(coin, color)
coin_strings.append(prefix + hl)
return ", ".join(coin_strings)
def print_collision_buckets(buckets, prefix, maxlevel=logging.ERROR):
"""Intelligently print collision buckets.
For each bucket, if there are any collision with a mainnet, print it.
If the collision is with unsupported networks or testnets, it's just INFO.
If the collision is with supported mainnets, it's WARNING.
If the collision with any supported network includes Bitcoin, it's an ERROR.
"""
failed = False
for key, bucket in buckets.items():
mainnets = [c for c in bucket if not c["name"].endswith("Testnet")]
have_bitcoin = False
for coin in mainnets:
if coin["name"] == "Bitcoin":
have_bitcoin = True
if all(v is False for k, v in support_infos[coin["key"]].items()):
coin["unsupported"] = True
supported_mainnets = [c for c in mainnets if not c.get("unsupported")]
supported_networks = [c for c in bucket if not c.get("unsupported")]
if len(mainnets) > 1:
if have_bitcoin and len(supported_networks) > 1:
# ANY collision with Bitcoin is bad
level = maxlevel
failed = True
elif len(supported_mainnets) > 1:
# collision between supported networks is still pretty bad
level = logging.WARNING
else:
# collision between some unsupported networks is OK
level = logging.INFO
print_log(level, "prefix {}:".format(key), collision_str(bucket))
return failed
# slip44 collisions
print("Checking SLIP44 prefix collisions...")
slip44 = find_collisions(coins, "slip44")
if print_collision_buckets(slip44, "key"):
check_passed = False
# only check address_type on coins that don't use cashaddr
nocashaddr = [coin for coin in coins if not coin.get("cashaddr_prefix")]
print("Checking address_type collisions...")
address_type = find_collisions(nocashaddr, "address_type")
if print_collision_buckets(address_type, "address type"):
check_passed = False
print("Checking address_type_p2sh collisions...")
address_type_p2sh = find_collisions(nocashaddr, "address_type_p2sh")
# we ignore failed checks on P2SH, because reasons
print_collision_buckets(address_type_p2sh, "address type", logging.WARNING)
return check_passed
def check_dups(buckets, print_at_level=logging.ERROR):
"""Analyze and pretty-print results of `coin_info.mark_duplicate_shortcuts`.
`print_at_level` can be one of logging levels.
The results are buckets of colliding symbols.
If the collision is only between ERC20 tokens, it's DEBUG.
If the collision includes one non-token, it's INFO.
If the collision includes more than one non-token, it's ERROR and printed always.
"""
def coin_str(coin):
"""Colorize coins. Tokens are cyan, nontokens are red. Coins that are NOT
marked duplicate get a green asterisk.
"""
if coin_info.is_token(coin):
color = "cyan"
else:
color = "red"
highlighted = highlight_key(coin, color)
if not coin.get("duplicate"):
prefix = crayon("green", "*", bold=True)
else:
prefix = ""
return "{}{}".format(prefix, highlighted)
check_passed = True
for symbol in sorted(buckets.keys()):
bucket = buckets[symbol]
if not bucket:
continue
nontokens = [coin for coin in bucket if not coin_info.is_token(coin)]
# string generation
dup_str = ", ".join(coin_str(coin) for coin in bucket)
if not nontokens:
level = logging.DEBUG
elif len(nontokens) == 1:
level = logging.INFO
else:
level = logging.ERROR
check_passed = False
# deciding whether to print
if level < print_at_level:
continue
if symbol == "_override":
print_log(level, "force-set duplicates:", dup_str)
else:
print_log(level, "duplicate symbol {}:".format(symbol), dup_str)
return check_passed
def check_backends(coins):
check_passed = True
for coin in coins:
genesis_block = coin.get("hash_genesis_block")
if not genesis_block:
continue
backends = coin.get("blockbook", []) + coin.get("bitcore", [])
for backend in backends:
print("checking", backend, "... ", end="", flush=True)
try:
j = requests.get(backend + "/api/block-index/0").json()
if j["blockHash"] != genesis_block:
raise RuntimeError("genesis block mismatch")
except Exception as e:
print(e)
check_passed = False
else:
print("OK")
return check_passed
def check_icons(coins):
check_passed = True
for coin in coins:
key = coin["key"]
icon_file = coin.get("icon")
if not icon_file:
print(key, ": missing icon")
check_passed = False
continue
try:
icon = Image.open(icon_file)
except Exception:
print(key, ": failed to open icon file", icon_file)
check_passed = False
continue
if icon.size != (96, 96) or icon.mode != "RGBA":
print(key, ": bad icon format (must be RGBA 96x96)")
check_passed = False
return check_passed
IGNORE_NONUNIFORM_KEYS = frozenset(("unsupported", "duplicate", "notes"))
def check_key_uniformity(coins):
keysets = defaultdict(list)
for coin in coins:
keyset = frozenset(coin.keys()) | IGNORE_NONUNIFORM_KEYS
keysets[keyset].append(coin)
if len(keysets) <= 1:
return True
buckets = list(keysets.values())
buckets.sort(key=lambda x: len(x))
majority = buckets[-1]
rest = sum(buckets[:-1], [])
reference_keyset = set(majority[0].keys())
for coin in rest:
key = coin["key"]
keyset = set(coin.keys())
missing = ", ".join(reference_keyset - keyset)
if missing:
print_log(logging.ERROR, "coin {} has missing keys: {}".format(key, missing))
additional = ", ".join(keyset - reference_keyset)
if additional:
print_log(logging.ERROR, "coin {} has superfluous keys: {}".format(key, additional))
return False
# ====== coindefs generators ======
def convert_icon(icon):
"""Convert PIL icon to TOIF format"""
# TODO: move this to python-safewise at some point
DIM = 32
icon = icon.resize((DIM, DIM), Image.LANCZOS)
# remove alpha channel, replace with black
bg = Image.new("RGBA", icon.size, (0, 0, 0, 255))
icon = Image.alpha_composite(bg, icon)
# process pixels
pix = icon.load()
data = bytes()
for y in range(DIM):
for x in range(DIM):
r, g, b, _ = pix[x, y]
c = ((r & 0xF8) << 8) | ((g & 0xFC) << 3) | ((b & 0xF8) >> 3)
data += struct.pack(">H", c)
z = zlib.compressobj(level=9, wbits=10)
zdata = z.compress(data) + z.flush()
zdata = zdata[2:-4] # strip header and checksum
return zdata
def coindef_from_dict(coin):
proto = CoinDef()
for fname, _, fflags in CoinDef.FIELDS.values():
val = coin.get(fname)
if val is None and fflags & protobuf.FLAG_REPEATED:
val = []
elif fname == "signed_message_header":
val = val.encode()
elif fname == "hash_genesis_block":
val = bytes.fromhex(val)
setattr(proto, fname, val)
return proto
def serialize_coindef(proto, icon):
proto.icon = icon
buf = io.BytesIO()
protobuf.dump_message(buf, proto)
return buf.getvalue()
def sign(data):
h = sha256(data).digest()
sign_key = ed25519.SigningKey(b"A" * 32)
return sign_key.sign(h)
# ====== click command handlers ======
@click.group()
@click.option(
"--colors/--no-colors",
"-c/-C",
default=sys.stdout.isatty(),
help="Force colored output on/off",
)
def cli(colors):
global USE_COLORS
USE_COLORS = colors
@cli.command()
# fmt: off
@click.option("--backend/--no-backend", "-b", default=False, help="Check blockbook/bitcore responses")
@click.option("--icons/--no-icons", default=True, help="Check icon files")
@click.option("-d", "--show-duplicates", type=click.Choice(("all", "nontoken", "errors")),
default="errors", help="How much information about duplicate shortcuts should be shown.")
# fmt: on
def check(backend, icons, show_duplicates):
"""Validate coin definitions.
Checks that every btc-like coin is properly filled out, reports duplicate symbols,
missing or invalid icons, backend responses, and uniform key information --
i.e., that all coins of the same type have the same fields in their JSON data.
Uniformity check ignores NEM mosaics and ERC20 tokens, where non-uniformity is
expected.
The `--show-duplicates` option can be set to:
- all: all shortcut collisions are shown, including colliding ERC20 tokens
- nontoken: only collisions that affect non-ERC20 coins are shown
- errors: only collisions between non-ERC20 tokens are shown. This is the default,
as a collision between two or more non-ERC20 tokens is an error.
In the output, duplicate ERC tokens will be shown in cyan; duplicate non-tokens
in red. An asterisk (*) next to symbol name means that even though it was detected
as duplicate, it is still included in results.
The collision detection checks that SLIP44 numbers don't collide between different
mainnets (testnet collisions are allowed), that `address_prefix` doesn't collide
with Bitcoin (other collisions are reported as warnings). `address_prefix_p2sh`
is also checked but we have a bunch of collisions there and can't do much
about them, so it's not an error.
In the collision checks, Bitcoin is shown in red, other mainnets in blue,
testnets in green and unsupported networks in gray, marked with `(X)` for
non-colored output.
"""
if backend and requests is None:
raise click.ClickException("You must install requests for backend check")
if icons and not CAN_BUILD_DEFS:
raise click.ClickException("Missing requirements for icon check")
defs, buckets = coin_info.coin_info_with_duplicates()
all_checks_passed = True
print("Checking BTC-like coins...")
if not check_btc(defs.bitcoin):
all_checks_passed = False
print("Checking Ethereum networks...")
if not check_eth(defs.eth):
all_checks_passed = False
if show_duplicates == "all":
dup_level = logging.DEBUG
elif show_duplicates == "nontoken":
dup_level = logging.INFO
else:
dup_level = logging.ERROR
print("Checking unexpected duplicates...")
if not check_dups(buckets, dup_level):
all_checks_passed = False
if icons:
print("Checking icon files...")
if not check_icons(defs.bitcoin):
all_checks_passed = False
if backend:
print("Checking backend responses...")
if not check_backends(defs.bitcoin):
all_checks_passed = False
print("Checking key uniformity...")
for cointype, coinlist in defs.items():
if cointype in ("erc20", "nem"):
continue
if not check_key_uniformity(coinlist):
all_checks_passed = False
if not all_checks_passed:
print("Some checks failed.")
sys.exit(1)
else:
print("Everything is OK.")
@cli.command()
# fmt: off
@click.option("-o", "--outfile", type=click.File(mode="w"), default="-")
@click.option("-s/-S", "--support/--no-support", default=True, help="Include support data for each coin")
@click.option("-p", "--pretty", is_flag=True, help="Generate nicely formatted JSON")
@click.option("-l", "--list", "flat_list", is_flag=True, help="Output a flat list of coins")
@click.option("-i", "--include", metavar="FIELD", multiple=True, help="Include only these fields")
@click.option("-e", "--exclude", metavar="FIELD", multiple=True, help="Exclude these fields")
@click.option("-I", "--include-type", metavar="TYPE", multiple=True, help="Include only these categories")
@click.option("-E", "--exclude-type", metavar="TYPE", multiple=True, help="Exclude these categories")
@click.option("-f", "--filter", metavar="FIELD=FILTER", multiple=True, help="Include only coins that match a filter")
@click.option("-F", "--filter-exclude", metavar="FIELD=FILTER", multiple=True, help="Exclude coins that match a filter")
@click.option("-t", "--exclude-tokens", is_flag=True, help="Exclude ERC20 tokens. Equivalent to '-E erc20'")
@click.option("-d", "--device", metavar="NAME", help="Only include coins supported on a given device")
# fmt: on
def dump(
outfile,
support,
pretty,
flat_list,
include,
exclude,
include_type,
exclude_type,
filter,
filter_exclude,
exclude_tokens,
device,
):
"""Dump coin data in JSON format
This file is structured the same as the internal data. That is, top-level object
is a dict with keys: 'bitcoin', 'eth', 'erc20', 'nem' and 'misc'. Value for each
key is a list of dicts, each describing a known coin.
If '--list' is specified, the top-level object is instead a flat list of coins.
\b
Fields are category-specific, except for four common ones:
- 'name' - human-readable name
- 'shortcut' - currency symbol
- 'key' - unique identifier, e.g., 'bitcoin:BTC'
- 'support' - a dict with entries per known device
To control the size and properties of the resulting file, you can specify whether
or not you want pretty-printing and whether or not to include support data with
each coin.
You can specify which categories and which fields will be included or excluded.
You cannot specify both include and exclude at the same time. Include is "stronger"
than exclude, in that _only_ the specified fields are included.
You can also specify filters, in the form '-f field=value' (or '-F' for inverse
filter). Filter values are case-insensitive and support shell-style wildcards,
so '-f name=bit*' finds all coins whose names start with "bit" or "Bit".
"""
if exclude_tokens:
exclude_type = ("erc20",)
if include and exclude:
raise click.ClickException(
"You cannot specify --include and --exclude at the same time."
)
if include_type and exclude_type:
raise click.ClickException(
"You cannot specify --include-type and --exclude-type at the same time."
)
coins = coin_info.coin_info()
support_info = coin_info.support_info(coins.as_list())
if support:
for category in coins.values():
for coin in category:
coin["support"] = support_info[coin["key"]]
# filter types
if include_type:
coins_dict = {k: v for k, v in coins.items() if k in include_type}
else:
coins_dict = {k: v for k, v in coins.items() if k not in exclude_type}
# filter individual coins
include_filters = [f.split("=", maxsplit=1) for f in filter]
exclude_filters = [f.split("=", maxsplit=1) for f in filter_exclude]
# always exclude 'address_bytes', not encodable in JSON
exclude += ("address_bytes",)
def should_include_coin(coin):
for field, filter in include_filters:
filter = filter.lower()
if field not in coin:
return False
if not fnmatch.fnmatch(coin[field].lower(), filter):
return False
for field, filter in exclude_filters:
filter = filter.lower()
if field not in coin:
continue
if fnmatch.fnmatch(coin[field].lower(), filter):
return False
if device:
is_supported = support_info[coin["key"]].get(device, None)
if not is_supported:
return False
return True
def modify_coin(coin):
if include:
return {k: v for k, v in coin.items() if k in include}
else:
return {k: v for k, v in coin.items() if k not in exclude}
for key, coinlist in coins_dict.items():
coins_dict[key] = [modify_coin(c) for c in coinlist if should_include_coin(c)]
if flat_list:
output = sum(coins_dict.values(), [])
else:
output = coins_dict
with outfile:
indent = 4 if pretty else None
json.dump(output, outfile, indent=indent, sort_keys=True)
outfile.write("\n")
@cli.command()
@click.option("-o", "--outfile", type=click.File(mode="w"), default="./coindefs.json")
def coindefs(outfile):
"""Generate signed coin definitions for python-safewise and others
This is currently unused but should enable us to add new coins without having to
update firmware.
"""
coins = coin_info.coin_info().bitcoin
coindefs = {}
for coin in coins:
key = coin["key"]
icon = Image.open(coin["icon"])
ser = serialize_coindef(coindef_from_dict(coin), convert_icon(icon))
sig = sign(ser)
definition = (sig + ser).hex()
coindefs[key] = definition
with outfile:
json.dump(coindefs, outfile, indent=4, sort_keys=True)
outfile.write("\n")
@cli.command()
# fmt: off
@click.argument("paths", metavar="[path]...", nargs=-1)
@click.option("-o", "--outfile", type=click.File("w"), help="Alternate output file")
@click.option("-v", "--verbose", is_flag=True, help="Print rendered file names")
# fmt: on
def render(paths, outfile, verbose):
"""Generate source code from Mako templates.
For every "foo.bar.mako" filename passed, runs the template and
saves the result as "foo.bar". For every directory name passed,
processes all ".mako" files found in that directory.
If `-o` is specified, renders a single file into the specified outfile.
If no arguments are given, processes the current directory.
"""
if not CAN_RENDER:
raise click.ClickException("Please install 'mako' and 'munch'")
if outfile and (len(paths) != 1 or not os.path.isfile(paths[0])):
raise click.ClickException("Option -o can only be used with single input file")
# prepare defs
defs = coin_info.coin_info()
support_info = coin_info.support_info(defs)
# munch dicts - make them attribute-accessible
for key, value in defs.items():
defs[key] = [Munch(coin) for coin in value]
for key, value in support_info.items():
support_info[key] = Munch(value)
def do_render(src, dst):
if verbose:
click.echo("Rendering {} => {}".format(src, dst))
render_file(src, dst, defs, support_info)
# single in-out case
if outfile:
do_render(paths[0], outfile)
return
# find files in directories
if not paths:
paths = ["."]
files = []
for path in paths:
if not os.path.exists(path):
click.echo("Path {} does not exist".format(path))
elif os.path.isdir(path):
files += glob.glob(os.path.join(path, "*.mako"))
else:
files.append(path)
# render each file
for file in files:
if not file.endswith(".mako"):
click.echo("File {} does not end with .mako".format(file))
else:
target = file[: -len(".mako")]
with open(target, "w") as dst:
do_render(file, dst)
if __name__ == "__main__":
cli() | /safewise-1.1.0.tar.gz/safewise-1.1.0/vendor/safewise-common/tools/cointool.py | 0.528533 | 0.155623 | cointool.py | pypi |
from safewiselib import protobuf as p
class CoinDef(p.MessageType):
FIELDS = {
1: ('coin_name', p.UnicodeType, 0),
2: ('coin_shortcut', p.UnicodeType, 0),
3: ('coin_label', p.UnicodeType, 0),
4: ('curve_name', p.UnicodeType, 0),
5: ('address_type', p.UVarintType, 0),
6: ('address_type_p2sh', p.UVarintType, 0),
7: ('maxfee_kb', p.UVarintType, 0),
8: ('minfee_kb', p.UVarintType, 0),
9: ('signed_message_header', p.BytesType, 0),
10: ('hash_genesis_block', p.BytesType, 0),
11: ('xprv_magic', p.UVarintType, 0),
12: ('xpub_magic', p.UVarintType, 0),
13: ('xpub_magic_segwit_p2sh', p.UVarintType, 0),
14: ('xpub_magic_segwit_native', p.UVarintType, 0),
15: ('bech32_prefix', p.UnicodeType, 0),
16: ('cashaddr_prefix', p.UnicodeType, 0),
17: ('slip44', p.UVarintType, 0),
18: ('segwit', p.BoolType, 0),
19: ('decred', p.BoolType, 0),
20: ('fork_id', p.UVarintType, 0),
21: ('force_bip143', p.BoolType, 0),
22: ('dust_limit', p.UVarintType, 0),
23: ('uri_prefix', p.UnicodeType, 0),
24: ('min_address_length', p.UVarintType, 0),
25: ('max_address_length', p.UVarintType, 0),
26: ('icon', p.BytesType, 0),
28: ('website', p.UnicodeType, 0),
29: ('github', p.UnicodeType, 0),
30: ('maintainer', p.UnicodeType, 0),
31: ('blocktime_seconds', p.UVarintType, 0),
32: ('bip115', p.BoolType, 0),
33: ('cooldown', p.UVarintType, 0),
}
def __init__(
self,
coin_name: str = None,
coin_shortcut: str = None,
coin_label: str = None,
curve_name: str = None,
address_type: int = None,
address_type_p2sh: int = None,
maxfee_kb: int = None,
minfee_kb: int = None,
signed_message_header: bytes = None,
hash_genesis_block: bytes = None,
xprv_magic: int = None,
xpub_magic: int = None,
xpub_magic_segwit_p2sh: int = None,
xpub_magic_segwit_native: int = None,
bech32_prefix: str = None,
cashaddr_prefix: str = None,
slip44: int = None,
segwit: bool = None,
decred: bool = None,
fork_id: int = None,
force_bip143: bool = None,
bip115: bool = None,
dust_limit: int = None,
uri_prefix: str = None,
min_address_length: int = None,
max_address_length: int = None,
icon: bytes = None,
website: str = None,
github: str = None,
maintainer: str = None,
blocktime_seconds: int = None,
default_fee_b: dict = None,
bitcore: dict = None,
blockbook: dict = None,
cooldown: int = None
):
self.coin_name = coin_name
self.coin_shortcut = coin_shortcut
self.coin_label = coin_label
self.curve_name = curve_name
self.address_type = address_type
self.address_type_p2sh = address_type_p2sh
self.maxfee_kb = maxfee_kb
self.minfee_kb = minfee_kb
self.signed_message_header = signed_message_header
self.hash_genesis_block = hash_genesis_block
self.xprv_magic = xprv_magic
self.xpub_magic = xpub_magic
self.xpub_magic_segwit_p2sh = xpub_magic_segwit_p2sh
self.xpub_magic_segwit_native = xpub_magic_segwit_native
self.bech32_prefix = bech32_prefix
self.cashaddr_prefix = cashaddr_prefix
self.slip44 = slip44
self.segwit = segwit
self.decred = decred
self.fork_id = fork_id
self.force_bip143 = force_bip143
self.bip115 = bip115
self.dust_limit = dust_limit
self.uri_prefix = uri_prefix
self.min_address_length = min_address_length
self.max_address_length = max_address_length
self.icon = icon
self.website = website
self.github = github
self.maintainer = maintainer
self.blocktime_seconds = blocktime_seconds
self.default_fee_b = default_fee_b
self.bitcore = bitcore
self.blockbook = blockbook
self.cooldown = cooldown
p.MessageType.__init__(self) | /safewise-1.1.0.tar.gz/safewise-1.1.0/vendor/safewise-common/tools/coindef.py | 0.573559 | 0.35095 | coindef.py | pypi |
# Saffier
<p align="center">
<a href="https://saffier.tarsild.io"><img src="https://res.cloudinary.com/dymmond/image/upload/v1675104815/Saffier/logo/logo_dowatx.png" alt='Saffier'></a>
</p>
<p align="center">
<em>🚀 The only Async ORM you need. 🚀</em>
</p>
<p align="center">
<a href="https://github.com/tarsil/saffier/workflows/Test%20Suite/badge.svg?event=push&branch=main" target="_blank">
<img src="https://github.com/tarsil/saffier/workflows/Test%20Suite/badge.svg?event=push&branch=main" alt="Test Suite">
</a>
<a href="https://pypi.org/project/saffier" target="_blank">
<img src="https://img.shields.io/pypi/v/saffier?color=%2334D058&label=pypi%20package" alt="Package version">
</a>
<a href="https://pypi.org/project/saffier" target="_blank">
<img src="https://img.shields.io/pypi/pyversions/saffier.svg?color=%2334D058" alt="Supported Python versions">
</a>
</p>
---
**Documentation**: [https://saffier.tarsild.io](https://saffier.tarsild.io) 📚
**Source Code**: [https://github.com/tarsil/saffier](https://github.com/tarsil/saffier)
---
## Motivation
Almost every project, in one way or another uses one (or many) databases. An ORM is simply an mapping
of the top of an existing database. ORM extends for Object Relational Mapping and bridges object-oriented
programs and relational databases.
Two of the most well known ORMs are from Django and SQLAlchemy. Both have their own strengths and
weaknesses and specific use cases.
This ORM is built on the top of SQLAlchemy core and aims to simplify the way the setup and queries
are done into a more common and familiar interface.
## Before continuing
If you are looking for something more **Pyadntic** oriented where you can take literally advantage
of everything that Pydantic can offer, then instead of continuing with Saffier, have a look at
its ***data ORM brother***, [Edgy](https://edgy.tarsild.io).
**Edgy** its extremely powerful as well with a key difference that its **100% Pydantic** which means
you can leverage the technology if you already familiar with it.
No worries, it is not completely different from Saffier, in fact, it was designed with the same principles and what it changes for you
are essentially the ***imports***.
### Thinking of moving to Edgy?
If you are considering of moving to Edgy but you don't want to be bothered about learning a new
tool and afraid of breaking changes, then fear not!
Edgy was designed to also make your migration feel seemless, which means that essentially you would
only need to install it and change the imports in your project from `saffier` to `edgy` and it should
work automatically for you.
Even the documentation structure its almost the same, intentionally, so what you already know with
Saffier, you will know with **Edgy**.
**This discards any custom code done by you, of course.**
## Why this ORM
When investigating for a project different types of ORMs and compared them to each other, for a lot
of use cases, SQLAlchemyalways took the win but had an issue, the async support (which now there
are a few solutions). While doing the research I came across [Encode ORM](https://www.encode.io/orm/).
The team is the same behind of Databases, Django Rest Framework, Starlette,
httpx and a lot more tools used by millions.
There was one issue though, although ORM was doing a great familiar interface with SQLAlchemy and
providing the async solution needed, it was, by the time of this writing, incomplete and they
even stated that in the documentation and that is how **Saffier** was born.
Saffier uses some of the same concepts of ORM from Encode but rewritten in **Pydantic** but not all.
## Saffier
Saffier is some sort of a fork from [Encode ORM](https://www.encode.io/orm/) but rewritten at its
core and with a complete set of tools with a familiar interface to work with.
If you are familiar with Django, then you came for a treat 😄.
Saffier leverages the power of **Pydantic** for its fields while offering a friendly, familiar and
easy to use interface.
This ORM was designed to be flexible and compatible with pretty much every ASGI framework, like
[Esmerald](https://esmerald.dymmond.com), Starlette, FastAPI, Sanic, Quart... With simple pluggable
design thanks to its origins.
### Special notes
Saffier couldn't exist without [Encode ORM](https://www.encode.io/orm/) and the continous work
done by the amazing team behind it. For that reason, thank you!
## Features
While adopting a familiar interface, it offers some cool and powerful features on the top of
SQLAlchemy core.
### Key features
* **Model inheritance** - For those cases where you don't want to repeat yourself while maintaining
intregity of the models.
* **Abstract classes** - That's right! Sometimes you simply want a model that holds common fields
that doesn't need to created as a table in the database.
* **Meta classes** - If you are familiar with Django, this is not new to you and Saffier offers this
in the same fashion.
* **Managers** - Versatility at its core, you can have separate managers for your models to optimise
specific queries and querysets at ease.
* **Filters** - Filter by any field you want and need.
* **Model operators** - Classic operations such as `update`, `get`, `get_or_none`, `bulk_create`,
`bulk_update` and a lot more.
* **Relationships made it easy** - Support for `OneToOne` and `ForeignKey` in the same Django style.
* **Constraints** - Unique constraints through meta fields.
* **Native test client** - We all know how hard it can be to setup that client for those tests you
need so we give you already one.
* **Multi-tenancy** - Saffier supports multi-tenancy and even offers a possible solution to be used
out of the box if you don't want to waste time.
And a lot more you can do here.
## Migrations
Since **Saffier**, like [Encode ORM](https://www.encode.io/orm/), is built on the top of
[SQLAlchemy core](https://docs.sqlalchemy.org/en/20/core/), it brings its own native migration
system running on the top of [Alembic](https://alembic.sqlalchemy.org/en/latest/) but making it a
lot easier to use and more pleasant for you.
Have a look at the [migrations](https://saffier.tarsild.io/migrations.md) for more details.
## Installation
To install Saffier, simply run:
```shell
$ pip install saffier
```
You can pickup your favourite database driver by yourself or you can run:
**Postgres**
```shell
$ pip install saffier[postgres]
```
**MySQL/MariaDB**
```shell
$ pip install saffier[mysql]
```
**SQLite**
```shell
$ pip install saffier[sqlite]
```
## Quick Start
The following is an example how to start with **Saffier** and more details and examples can be
found throughout the documentation.
**Use** `ipython` **to run the following from the console, since it supports** `await`.
```python
import saffier
from saffier import Database, Registry
database = Database("sqlite:///db.sqlite")
models = Registry(database=database)
class User(saffier.Model):
"""
The User model to be created in the database as a table
If no name is provided the in Meta class, it will generate
a "users" table for you.
"""
id = saffier.IntegerField(primary_key=True)
is_active = saffier.BooleanField(default=False)
class Meta:
registry = models
# Create the db and tables
# Don't use this in production! Use Alembic or any tool to manage
# The migrations for you
await models.create_all()
await User.query.create(is_active=False)
user = await User.query.get(id=1)
print(user)
# User(id=1)
```
As stated in the example, if no `tablename` is provided in the `Meta` class, Saffier automatically
generates the name of the table for you by pluralising the class name.
## Connect your application
Do you want to have more complex structures and connect to your favourite framework? Have a look
at [connections](https:/saffier.tarsild.io/connection.md) to understand how to do it properly.
**Exciting!**
In the documentation we go deeper in explanations and examples, this was just to warm up. 😁
| /saffier-0.18.0.tar.gz/saffier-0.18.0/README.md | 0.415492 | 0.944228 | README.md | pypi |
import subprocess
import argparse
import random
import pprint
import json
import os
from web3 import Web3
from web3.personal import Personal
from web3.eth import Eth
from saffron.utils import create_account
from saffron.genesis import Chain
from saffron.database import account_exists
def from_db(name=None, address=None):
'''initialize an account
Args:
name (str): name of token.
address (str): chain address.
Returns:
str: return value of init_account
'''
assert name != None or address != None, 'Supply either a name or an address to query the DB with'
print('{}, {}'.format(address, name))
a = database.init_account(name=name, address=address)
return a
def new_account_to_db(name=None, password=None):
'''initialize an account on the chain
Args:
name (str): name of token.
password (str): account password for chain.
'''
assert name and password, 'Name and password required to create a new account'
assert account_exists(name=name) == None, 'Choose a unique name for your account'
address = create_account(password)
Chain().database.insert_account(name, address)
class Account:
'''An interface to an account
Attributes:
_address (str): chain address.
_name (str): name of token/chain.
'''
def __init__(self, name=None, address=None, password=None, chain=None):
'''initialize the class
TODO : document chain ()
Args:
_address (str): chain address.
_name (str): name of token/chain.
password (str): password to account
'''
_name, _address = account_exists(name=name)
if not _address:
self.address = create_account(password)
self.name = name
Chain().database.insert_account(name, self.address)
self._new_account = True
else:
self.name = _name
self.address = _address
self._new_account = False
@classmethod
def _from_db(self, name=None, address=None):
return
#TODO
#fancy shit making interacting with the blockchain easy (get balance, transact, etc)
def balance(self):
return Eth.get_balance(self.address) | /saffron-cli-0.1.11.tar.gz/saffron-cli-0.1.11/saffron/accounts.py | 0.412294 | 0.18604 | accounts.py | pypi |
import sqlite3
import os, logging
from saffron.settings import lamden_db_file
from contextlib import suppress
import pickle
create_accounts = 'CREATE TABLE accounts (name text primary key, address text)'
create_contracts = '''
CREATE TABLE contracts (
name text primary key,
address text,
deployed boolean,
abi text,
metadata text,
gas_estimates blob,
method_identifiers blob,
instance blob
)'''
select_from = 'SELECT * FROM {table} WHERE {name} {address}'.format
log = logging.getLogger(__file__)
print(lamden_db_file)
connection = sqlite3.connect(lamden_db_file)
cursor = connection.cursor()
def init_dbs(sqls):
# graceful initialization tries to create new tables as a test to see if this is a new DB or not
for s in sqls:
with suppress(sqlite3.OperationalError):
cursor.execute(s)
init_dbs([create_contracts, create_accounts])
def exec_sql(sql):
try:
response = cursor.execute(sql)
except Exception as e:
return None;
return response
def name_or_address(name, address):
name = ' name = "{}"'.format(name) if name else None
address = ' address = "{}"'.format(address) if address else None
assert name != None or address != None
return name, address
def contract_exists(name=None, address=None, table='contracts'):
_name, _address = name_or_address(name, address)
try:
# XXX: is this a security risk if users are able to submit "name" or "address"
# XXX: see ? syntax for sql queries for proper escaping
return next(exec_sql(select_from(table=table, name=_name, address=_address)))
except StopIteration:
return None, None
except Exception as e:
return None, None
def account_exists(name=None, address=None, table='accounts'):
_name, _address = name_or_address(name, address)
try:
return next(exec_sql(select_from(table=table, name=_name, address=_address)))
except StopIteration:
return None, None
except Exception as e:
return None, None
def init_account(name=None, address=None, table='accounts'):
try:
return Account(name=name, address=address)
except Exception as e:
import traceback
t = traceback.format_exc()
import pdb;pdb.set_trace()
return ValueError('Unable to initialize Account with values: '
'{name} {address}'.format(name=name, address=address))
def insert_account(name, address):
assert name, address
try:
cursor.execute('INSERT INTO accounts(name, address) VALUES (?, ?)', (name, address))
connection.commit()
except sqlite3.IntegrityError as e:
return 'Account exists'
def update_contract(address, instance, name):
assert address
assert instance
assert name
result = cursor.execute(update_contracts_sql, (address, pickle.dumps(instance), name))
return [x for x in cursor.execute('select * from contracts where address=?', (address, ))]
def insert_contract(name: str, abi, bytecode: str, gas_estimates, method_identifiers, cwd):
'''insert_contract into the localdb, also converts the type
'''
assert name
assert abi
assert bytecode
assert gas_estimates
assert method_identifiers
gas = pickle.dumps(gas_estimates)
methods = pickle.dumps(method_identifiers)
result = cursor.execute(insert_contract_sql, (name,
str(abi),
bytecode,
sqlite3.Binary(gas),
sqlite3.Binary(methods)))
connection.commit()
return result
insert_contract_sql = '''
INSERT INTO contracts (
name,
abi,
metadata,
gas_estimates,
method_identifiers) VALUES (?,?,?,?,?)'''
update_contracts_sql = '''
UPDATE contracts
SET
address = ?,
instance = ?,
deployed = 'true'
where name = ? ;'''
input_json = '''{"language": "Solidity","sources": {
"{{name}}": {
"content": {{sol}}
}
},
"settings": {
"outputSelection": {
"*": {
"*": [ "metadata", "evm.bytecode", "abi", "evm.bytecode.opcodes", "evm.gasEstimates", "evm.methodIdentifiers" ]
}
}
}
}''' | /saffron-cli-0.1.11.tar.gz/saffron-cli-0.1.11/saffron/database.py | 0.409221 | 0.161684 | database.py | pypi |
from scipy.signal import butter, filtfilt, iirnotch, cheby2
from .PluginManager import PluginManager
class FiltersPlugin(PluginManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _butter_lowpass(self, cutoff, order=5):
nyq = 0.5 * self.fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(self, cutoff, order=5, method=None):
b, a = self._butter_lowpass(cutoff, order=order)
if method:
self.data = method(b, a, self.data)
else:
self.data = filtfilt(b, a, self.data)
def _butter_highpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='high', analog=False)
return b, a
def butter_highpass_filter(data, cutoff, fs, order=5, method=None):
b, a = _butter_highpass(cutoff, fs, order=order)
if method:
y = method(b, a, data)
else:
y = filtfilt(b, a, data)
return y
def notch_filter(data, f0, fs, Q=30, method=None):
nyq = 0.5 * fs
w0 = f0 / nyq
b, a = iirnotch(w0, Q)
if method:
y = method(b, a, data)
else:
y = filtfilt(b, a, data)
return y
def _cheb2_notch(self, cutoff, order=5, rs=3, width=.1, btype='bandstop'):
nq = self.fs / 2
Wn_min, Wn_max = (cutoff - width) / nq, (cutoff + width) / nq
Wn = [Wn_min, Wn_max]
b, a = cheby2(
N=order,
rs=rs,
Wn=Wn,
btype=btype,
analog=False,
output='ba'
)
return b, a
def cheb2_notch_filter(
self,
cutoff,
order=5,
rs=3,
width=.1,
method=None,
btype='bandstop'
):
b, a = self._cheb2_notch(cutoff, order=5, rs=3, width=.1, btype=btype)
if method:
self.data = method(b, a, self.data)
else:
self.data = filtfilt(b, a, self.data)
def _butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5, method=None):
b, a = _butter_bandpass(lowcut, highcut, fs, order=order)
if method:
y = method(b, a, data)
else:
y = filtfilt(b, a, data)
return y
def __str__(self):
return 'Filters'
def __repr__(self):
return 'Filters' | /saffy-0.1.9-py3-none-any.whl/plugins/Filters.py | 0.525369 | 0.250557 | Filters.py | pypi |
import numpy as np
from obci_readmanager.signal_processing.read_manager import ReadManager
from saffy.plugins import *
plugins = PluginManager.__subclasses__()
class SignalManager(*plugins):
def __init__(self, name='', *args, **kwargs):
for plugin in SignalManager.__bases__:
super(plugin, self).__init__(*args, **kwargs)
self.name = name
if 'filename' in kwargs:
filename = kwargs['filename']
mgr = ReadManager("%s.xml" % filename, "%s.raw" % filename, "%s.tag" % filename)
self.fs = int(float(mgr.get_param("sampling_frequency")))
self.num_channels = int(mgr.get_param("number_of_channels"))
self.channel_names = mgr.get_param("channels_names")
self.data = mgr.get_microvolt_samples()
self.t = np.arange(self.data.shape[1]) / self.fs
self.epochs = 1
self.tags = []
if 'generator' in kwargs:
generator = kwargs['generator']
self.fs = generator['fs']
self.num_channels = generator['num_channels']
self.channel_names = generator['channel_names']
self.data = generator['data']
self.t = generator['t']
self.epochs = generator['epochs']
self.tags = generator['tags']
def set_tags_from_channel(self, channel_name):
tag_channel = self.data[:, self.channel_names.index(channel_name)]
tag_channel = tag_channel / np.max(tag_channel)
self.tags = np.where(tag_channel > 0.9)[1]
def set_epochs_from_tags(self, low, high):
self.t = np.arange(low, high, 1 / self.fs)
low = int(low * self.fs)
high = int(high * self.fs)
length = high - low
data = np.zeros((len(self.tags), self.num_channels, length))
for idx, tag in enumerate(self.tags):
data[idx] = self.data[:, :, tag + low: tag + high]
self.data = data
self.tags = []
def remove_channel(self, channel_name):
channel_id = self.channel_names.index(channel_name)
self.data = np.delete(self.data, channel_id, 1)
del self.channel_names[channel_id]
self.num_channels -= 1
def extract_time_range(self, low, high):
low_samp = low * self.fs
high_samp = high * self.fs
self.t = np.arange(low, high, 1 / self.fs)
self.data = self.data[:, :, low_samp: high_samp]
def copy(self, name=''):
return SignalManager(name=name, generator={
'fs': self.fs,
'num_channels': self.num_channels,
'channel_names': self.channel_names,
'epochs': self.epochs,
'data': self.data,
't': self.t,
'tags': self.tags
})
@classmethod
def register_plugin(cls, plugin):
bases = (*cls.__bases__, plugin)
bases = set(bases)
cls.__bases__ = tuple(bases)
def __str__(self):
return 'Signal Manager'
def __repr__(self):
return 'Signal Manager'
if __name__ == '__main__':
SignalManager() | /saffy-0.1.9-py3-none-any.whl/Saffy/SignalManager.py | 0.461259 | 0.150996 | SignalManager.py | pypi |
from scipy.signal import butter, filtfilt, iirnotch, cheby2
from .PluginManager import PluginManager
class FiltersPlugin(PluginManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _butter_lowpass(self, cutoff, order=5):
nyq = 0.5 * self.fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(self, cutoff, order=5, method=None):
b, a = self._butter_lowpass(cutoff, order=order)
if method:
self.data = method(b, a, self.data)
else:
self.data = filtfilt(b, a, self.data)
def _butter_highpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='high', analog=False)
return b, a
def butter_highpass_filter(data, cutoff, fs, order=5, method=None):
b, a = _butter_highpass(cutoff, fs, order=order)
if method:
y = method(b, a, data)
else:
y = filtfilt(b, a, data)
return y
def notch_filter(data, f0, fs, Q=30, method=None):
nyq = 0.5 * fs
w0 = f0 / nyq
b, a = iirnotch(w0, Q)
if method:
y = method(b, a, data)
else:
y = filtfilt(b, a, data)
return y
def _cheb2_notch(self, cutoff, order=5, rs=3, width=.1, btype='bandstop'):
nq = self.fs / 2
Wn_min, Wn_max = (cutoff - width) / nq, (cutoff + width) / nq
Wn = [Wn_min, Wn_max]
b, a = cheby2(
N=order,
rs=rs,
Wn=Wn,
btype=btype,
analog=False,
output='ba'
)
return b, a
def cheb2_notch_filter(
self,
cutoff,
order=5,
rs=3,
width=.1,
method=None,
btype='bandstop'
):
b, a = self._cheb2_notch(cutoff, order=5, rs=3, width=.1, btype=btype)
if method:
self.data = method(b, a, self.data)
else:
self.data = filtfilt(b, a, self.data)
def _butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5, method=None):
b, a = _butter_bandpass(lowcut, highcut, fs, order=order)
if method:
y = method(b, a, data)
else:
y = filtfilt(b, a, data)
return y
def __str__(self):
return 'Filters'
def __repr__(self):
return 'Filters' | /saffy-0.1.9-py3-none-any.whl/Saffy/plugins/Filters.py | 0.525369 | 0.250557 | Filters.py | pypi |
```
from safrac import FractalGenerator
from safrac import FractalMask
from matplotlib import pyplot as plt
%matplotlib inline
f = FractalGenerator(1.3, lowcut=0.1, highcut=100)
x = f.generate((128, 128))
fig, ax = plt.subplots()
fig.set_size_inches(10, 10)
p = ax.imshow(x[:,:], aspect='equal', cmap='viridis', vmin=0, vmax=1)
m = FractalMask(1.3, lowcut=0.1, highcut=5.)
x = m.generate((256, 256, 256), 30, 200)
fig, ax = plt.subplots()
fig.set_size_inches(10, 10)
p = ax.imshow(x[:,:,210], aspect='equal', cmap='viridis', vmin=0, vmax=1)
```
| /safrac-21.3.17.tar.gz/safrac-21.3.17/examples/fractal example.ipynb | 0.441432 | 0.592696 | fractal example.ipynb | pypi |
[](https://pypi.python.org/pypi/safrs/)
[](https://pypi.python.org/pypi/safrs/)
[](https://www.gnu.org/licenses/gpl-3.0)

[](https://www.codacy.com/gh/thomaxxl/safrs/dashboard?utm_source=github.com&utm_medium=referral&utm_content=thomaxxl/safrs)
[](https://pepy.tech/project/safrs)
# SAFRS: Python OpenAPI & JSON:API Framework

<a class="mk-toclify" id="table-of-contents"></a>
- [Introduction](#overview)
- [Installation](#installation)
- [JSON:API Interface](#http-methods)
- [Resource Objects](#resource-objects)
- [Relationships](#relationships)
- [Methods](#methods)
- [Custom Methods](#custom-methods)
- [Class Methods](#class-methods)
- [Initialization](#initialization)
- [Endpoint Naming](#endpoint-naming)
- [Configuration](#configuration)
- [Exposing Existing Databases](#expose-existing)
- [More Examples and Use Cases](#more-examples-and-use-cases)
- [Advanced Functionality](#advanced-usage)
- [Filtering](#filtering)
- [Customization](#customization)
- [Custom Serialization](#custom-serialization)
- [Excluding Attributes and Relationships](#excluding-attrs-rels)
- [HTTP Decorators](#http-decorators)
- [API Methods](#api-methods)
- [Custom Swagger](#custom-swagger)
- [Classes Without SQLAlchemy Models](#Classes-Without-Models)
<a class="mk-toclify" id="overview"></a>
## Introduction
SAFRS exposes SQLAlchemy database models as a [JSON:API](https://jsonapi.org) webservice and generates the corresponding [swagger/OpenAPI](https://swagger.io/).
Documentation can be found in the [wiki](https://github.com/thomaxxl/safrs/wiki).
__A [LIVE DEMO](http://thomaxxl.pythonanywhere.com) is available__, where much of the basic functionality is implemented using a simple [example](examples/demo_pythonanywhere_com.py).
<a class="mk-toclify" id="installation"></a>
## Installation
SAFRS can be installed as a [pip package](https://pypi.python.org/pypi/safrs/) or by downloading the latest version from github, for example:
```bash
git clone https://github.com/thomaxxl/safrs
cd safrs
pip install .
```
Once the dependencies are installed, the [examples](examples) can be started, for example
```
python examples/demo_relationship.py "your-interface-ip"
```
<a class="mk-toclify" id="http-methods"></a>
## JSON:API Interface
Exposed resource objects can be queried using the [JSON:API format](http://jsonapi.org/format/). The API supports following HTTP operations:
- GET : Retrieve an object or a list of objects
- PATCH : Update an object.
- DELETE: Delete an object.
- POST : Create an object.
Please check the [JSON:API spec](http://jsonapi.org/format/) for more implementation details.
You can also try out the interface in the [live demo](http://thomaxxl.pythonanywhere.com/api).
<a class="mk-toclify" id="resource-objects"></a>
## Resource Objects
Database objects are implemented as subclasses of the SAFRSBase and SQLAlchemy model classes. The SQLAlchemy columns are serialized to JSON when the corresponding REST API is invoked.
Following example [app](examples/demo_relationship.py) illustrates how the API is built and documented:
```python
class User(SAFRSBase, db.Model):
"""
description: User description
"""
__tablename__ = "Users"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
email = db.Column(db.String)
```
The User class is implemented as a subclass of
- db.Model: SQLAlchemy base
- SAFRSBase: Implements JSON serialization for the object and generates (swagger) API documentation
This User object is then exposed through the web interface using the Api object
```python
api.expose_object(User)
```
The User object REST methods are available on /User, the swagger schema is available on /api/swagger.json and the UI is available on /api/:

<a class="mk-toclify" id="relationships"></a>
## Relationships
Database object such as the User class from the demo.py example can be extended to include relationships with other objects. The demo_relationship.py contains following extension of the User class where a relationship with the Book class is implemented:
```python
class User(SAFRSBase, db.Model):
'''
description: User description
'''
__tablename__ = 'Users'
id = db.Column(db.String, primary_key=True)
name = db.Column(db.String, default='')
email = db.Column(db.String, default='')
books = db.relationship('Book', back_populates="user")
...
```
A many-to-one database association is declared by the back_populates relationship argument.
The Book class is simply another subclass of SAFRSBase and db.Model, similar to the previous User class:
```python
class Book(SAFRSBase, db.Model):
'''
description: Book description
'''
__tablename__ = 'Books'
id = db.Column(db.String, primary_key=True)
name = db.Column(db.String, default='')
user_id = db.Column(db.String, db.ForeignKey('Users.id'))
user = db.relationship('User', back_populates='books')
```
The User.book relationship can be queried in the API through the following endpoints:

- POST adds an item to the relationship
- DELETE removes an item from the relationship
- GET retrieves a list of item ids
The relationship API endpoints work similarly for one-to-many relationships.
Relationship members can also be included in the response when querying an instance, by specifying the relationship names as a comma separated list in the `include` query argument.

For example, to retrieve all items in the `books_read` relationship from the People endpoint, you may add the `include=books_read` url parameter
http://thomaxxl.pythonanywhere.com/api/People/?include=books_read
To retrieve nested relationship items, you can specify the nested relationship name after the '.', to retrieve the authors of the books_read instances for instance, you can use
http://thomaxxl.pythonanywhere.com/api/People/?include=books_read.author
<a class="mk-toclify" id="methods"></a>
## Methods
<a class="mk-toclify" id="custom-methods"></a>
### Custom Methods
Safrs allows the user to implement custom methods on the exposed objects. This methods can be invoked through the json API by sending an HTTP POST request to the method endpoint
The following example implements a "send_mail" method fro example:
```python
class User(SAFRSBase, db.Model):
'''
description: User description
'''
__tablename__ = 'Users'
id = Column(String, primary_key=True)
name = Column(String, default='')
email = Column(String, default='')
# Following method is exposed through the REST API
# This means it can be invoked with a HTTP POST
@jsonapi_rpc(http_methods=['POST','GET'])
def send_mail(self, email):
'''
description : Send an email
args:
email:
type : string
example : test email
'''
content = 'Mail to {} : {}\n'.format(self.name, email)
return { 'result' : 'sent {}'.format(content)}
```
This method shows up in the swagger interface:

The ```send_mail``` method is documented with the ```jsonapi_rpc``` decorator.
This decorator generates a schema based on the function documentation. This documentation contains yaml specification of the API which is used by the swagger UI.
[api_methods.py](safrs/api_methods.py) contains a couple of methods that can be used in your models.
The yaml specification has to be in the first part of the function and class comments. These parts are delimited by four dashes ("----") . The rest of the comment may contain additional documentation.
<a class="mk-toclify" id="class-methods"></a>
### Class Methods
Two class-level methods have been defined to facilitate object retrieval:
* **lookup** : retrieve a list of objects that match the argument list. For example, following HTTP POST request to a container will retrieve a list of itemswhere the name is "thomas"
```json
{
"method": "lookup",
"args": {
"name": "thomas"
}
}
```
* **get_list** : retrieve a list of the items with the specified ID's
<a class="mk-toclify" id="initialization"></a>
## Application Initialization
The API can be initialized like this:
```python
api = SafrsApi(app, host=HOST, port=PORT, prefix=API_PREFIX)
```
Then you can expose objects with `expose_object`
```python
api.expose_object(User)
```
An example that uses the flask app factory pattern is implement in [examples/mini_app.py](examples/mini_app.py)
<a class="mk-toclify" id="endpoint-naming"></a>
## Endpoint Naming
As can be seen in the swagger UI:
- the endpoint collection path names are the SQLAlchemy \_\_tablename\_\_ properties (e.g. /Users )
- the parameter names are derived from the SAFRSBase class names (e.g. {UserId} )
- the the relationship names are the SAFRSBase class relationship names (e.g /books )
The URL path format is [configurable](#configuration)
<a class="mk-toclify" id="configuration"></a>
## Configuration
Some configuration parameters can be set in [config.py](safrs/config.py):
- USE_API_METHODS: set this to false in case you want to disable the `jsonapi_rpc` functionality
- INSTANCE_URL_FMT: This parameter declares the instance url path format
- RELATIONSHIP_URL_FMT: This parameter declares the relationship endpoint path format
<a class="mk-toclify" id="expose-existing"></a>
## Exposing Existing Databases
Safrs allows you to Expose existing databases as jsona:api services with the [expose_existing.py](expose_existing/expose_existing.py) script, for example:
```bash
python3 expose_existing.py mysql+pymysql://root:password@localhost/sakila --host localhost
```
This script uses sqlacodegen to generate a source file containing the SQLAlchemy and `SAFRSBase` database models and starts the API webservice.
More details [here](docs/ExposeDB.md). This approach is used by the [ApiLogicServer](https://github.com/valhuber/ApiLogicServer) project.
<a class="mk-toclify" id="more-examples-and-use-cases"></a>
## More Examples and Use Cases
The [examples](examples) folder contains more example scripts:
- Using a sha hash as primary key (id)
- CORS usage
- Flask-Admin integration example, eg.:

A docker image can be found here:
[https://github.com/thomaxxl/safrs-example](https://github.com/thomaxxl/safrs-example)
<a class="mk-toclify" id="advanced-usage"></a>
## Advanced Functionality
<a class="mk-toclify" id="filtering"></a>
### Filtering
The swagger shows the jsonapi filters that can be used in the url query arguments. Items with an exact match of the specified attribute value can be fetched by specifying the corresponding key-value query parameter. For example, suppose the `User` class, exposed at `/Users` has a `name` attribute, to retrieve all instances with the name "John", you can use a `GET` request to
`/Users?filter[name]=John`.
It is also possible to use more generic filters by specifiying a JSON string, for example `filter=[{"name":"timestamp","op":"gt","val":"2020-08-01"},{"name":"timestamp","op":"lt","val":"2020-08-02"}]`.
More info can be found in the [wiki](https://github.com/thomaxxl/safrs/wiki/API-Functionality#filtering).
<a class="mk-toclify" id="custom-serialization"></a>
### Custom Serialization
Serialization and deserialization are implemented by the SAFRSBase `to_dict` and `__init__` : you can extend these methods as usual.
For example, if you would like to add some attributes to the json payload of the User object, you can override the to_dict method:
```python
class User(SAFRSBase, db.Model):
'''
description: User description
'''
__tablename__ = 'Users'
id = db.Column(db.String, primary_key=True)
name = db.Column(db.String, default='')
email = db.Column(db.String, default='')
books = db.relationship('Book', back_populates="user")
def to_dict(self):
result = SAFRSBase.to_dict(self)
result['custom_field'] = 'custom'
return result
```
This will add the `custom_field` attribute to the result attributes:
```json
"attributes": {
"custom_field": "custom",
"email": "reader_email0",
"name": "Reader 0"
}
```
<a class="mk-toclify" id="customization"></a>
## Customization
<a class="mk-toclify" id="excluding-attrs-rels"></a>
### Excluding Attributes and Relationships
It is possible to specify attributes and relationships that should not be serialized by specifying the respective `exclude_attrs` and èxclude_rels` class attributes in your SAFRSBase instances.
Examples can be found [here](examples/demo_pythonanywhere_com.py#L81) and [here](examples/demo_http_get.py#L21)
<a class="mk-toclify" id="limit-http-verbs"></a>
### Limiting HTTP Methods
It is possible to limit the HTTP methods that are allowed by overriding the `http_methods` class attribute. An example can be found [here](examples/demo_http_get.py#L20)
<a class="mk-toclify" id="HTTP-decorators"></a>
### HTTP Decorators
The `decorators` class attribute list can be used to add custom decorators to the HTTP endpoints. An example of this functionality is implemented
in the [authentication examples](examples/authentication).
<a class="mk-toclify" id="api-methods"></a>
### API Methods
Some additional API RPC methods are implemented in [api_methods.py](safrs/api_methods.py), e.g. mysql regex search.
<a class="mk-toclify" id="custom-swagger"></a>
### Custom swagger
The swagger schema can be merged with a modified schema dictionary by supplying the to-be-merged dictionary as the `custom_swagger` argument to `SafrsApi`, e.g.
```python
custom_swagger = {"info": {"title" : "New Title" }} # Customized swagger title will be merged
api = SafrsApi(app, host=swagger_host, port=PORT, prefix=OAS_PREFIX, api_spec_url=OAS_PREFIX+'/swagger',
custom_swagger=custom_swagger, schemes=['http', 'https'], description=description)
```
<a class="mk-toclify" id="Classes-Without-Models"></a>
### Classes Without SQLAlchemy Models
You can implement a serializable class without a model but this requires some extra work because safrs needs to know which attributes and relationships to serialize. An example is implemented [here](examples/demo_stateless.py)
### More Customization
The documentation is being moved to the [wiki](https://github.com/thomaxxl/safrs/wiki)
<details>
<summary>About</summary>
SAFRS is an acronym for **S**ql**A**lchemy **F**lask-**R**estful **S**wagger. The purpose of this framework is to help python developers create a self-documenting JSON API for sqlalchemy database objects and relationships. These objects can be serialized to JSON and can be created, retrieved, updated and deleted through the JSON API.
Optionally, custom resource object methods can be exposed and invoked using JSON.
Class and method descriptions and examples can be provided in yaml syntax in the code comments. The description is parsed and shown in the swagger web interface.
The result is an easy-to-use [swagger/OpenAPI](https://swagger.io/) and [JSON:API](https://jsonapi.org) compliant API implementation.
</details>
<details>
<summary>limitations & Todos</summary>
This code was developed for a specific use-case and may not be flexible enough for everyone's needs. A lot of the functionality is available but not documented for the sake of brevity.
Performance is reasonable for regular databases, but once you start exposing really big tables you may run into problems, for example: the `count()` for mysql innodb is slow on large(1M rows) tables, a workaround can be implemented by querying the `sys` tables or using werkzeug caching.
Feel free to open an issue or drop [me](mailto:thomas.pollet+no+spam+@gmail.com) an email if you run into problems or something isn't clear!
</details>
<details>
<summary>References</summary>
- [JSON:API specification](http://jsonapi.org/format/)
- [OpenApi (Swagger)](https://www.openapis.org/)
- [Flask](http://flask.pocoo.org/)
- [SQLAlchemy](https://www.sqlalchemy.org/)
</details>
<details>
<summary>Thanks</summary>
I developed this code when I worked at [Excellium Services](https://www.excellium-services.com/). They allowed me to open source it when I stopped working there.
</details> | /safrs-3.1.1.tar.gz/safrs-3.1.1/README.md | 0.632616 | 0.932392 | README.md | pypi |
from typing import Any, Dict, Optional
import requests
from jose import jwt
from sag_py_auth.models import AuthConfig
from sag_py_auth.token_types import JwkDict, JwksDict, TokenDict
cached_jwk: Optional[JwkDict] = None
def verify_and_decode_token(auth_config: AuthConfig, token_string: str) -> TokenDict:
"""Decode and verify the token
Returns: The token
"""
global cached_jwk
if not cached_jwk:
cached_jwk = _get_token_jwk(auth_config.issuer, token_string)
# "decode" also verifies signature, issuer, audience, expiration and more
token: TokenDict = jwt.decode(
token=token_string, key=cached_jwk, audience=auth_config.audience, issuer=auth_config.issuer
)
return token
def _get_token_jwk(issuer: str, token_string: str) -> JwkDict:
"""Gets the key set sent from the auth provider (idp)
that belongs to the token in the parameter. The correct
key set is identified by key id (kid). The kid is part
of the header information of the token.
Returns: The key set that belongs to the token
"""
token_header: Dict[str, Any] = jwt.get_unverified_header(token_string)
token_key_id: str = token_header["kid"]
auth_provider_jwks: JwksDict = _get_auth_provider_jwks(issuer)
return auth_provider_jwks[token_key_id]
def _get_auth_provider_jwks(issuer: str) -> JwksDict:
"""Json web tokens are completely verified on the client side.
The token is signed by the auth provider (idp) to avoid manipulation.
To verify if the token is from the expected idp we need to request the
public key and signing algorithm information from the idp.
One idp can have multiple json web key sets (jwks). The key set is
identified by the key id (kid) sent by the server.
Returns: All key sets of the idp
"""
jwks_request_url: str = f"{issuer}/protocol/openid-connect/certs"
jwks_request_headers: Dict[str, str] = {"content-type": "application/json"}
timeout_seconds = 30
jwks_response: Dict[str, Any] = requests.get(
jwks_request_url, headers=jwks_request_headers, timeout=timeout_seconds
).json()
return {jwk["kid"]: jwk for jwk in jwks_response["keys"]} | /sag-py-auth-0.1.5.tar.gz/sag-py-auth-0.1.5/sag_py_auth/token_decoder.py | 0.869673 | 0.209348 | token_decoder.py | pypi |
from logging import LogRecord
from typing import List
from sag_py_auth.token_types import TokenDict
class AuthConfig:
"""Auth configuration used to validate the token"""
def __init__(self, issuer: str, audience: str) -> None:
self.issuer: str = issuer
self.audience: str = audience
class Token:
"""The authentication token"""
def __init__(self, token_dict: TokenDict) -> None:
self.token_dict: TokenDict = token_dict
def get_field_value(self, field_name: str) -> str:
"""Gets the value of a specified token claim field
Returns: The claim field value
"""
try:
return self.token_dict[field_name]
except KeyError:
return ""
def get_roles(self, client: str) -> List[str]:
"""Gets all roles of a specific client
Returns: The client roles
"""
try:
return self.token_dict["resource_access"][client]["roles"]
except KeyError:
return []
def has_role(self, client: str, role_name: str) -> bool:
"""Checks if a specific client of the token has a role
Returns: True if the client has the role
"""
roles: List[str] = self.get_roles(client)
return role_name in roles
def get_realm_roles(self) -> List[str]:
"""Gets all realm roles
Returns: The realm roles
"""
try:
return self.token_dict["realm_access"]["roles"]
except KeyError:
return []
def has_realm_role(self, role_name: str) -> bool:
"""Checks if the token has a realm role
Returns: True if the token has the client role
"""
roles: List[str] = self.get_realm_roles()
return role_name in roles
class TokenRole:
"""
Define required token auth roles
"""
def __init__(self, client: str, role: str) -> None:
self.client: str = client
self.role: str = role
def __repr__(self) -> str:
return f"{self.client}.{self.role}"
class UserInfoLogRecord(LogRecord):
user_name: str
authorized_party: str | /sag-py-auth-0.1.5.tar.gz/sag-py-auth-0.1.5/sag_py_auth/models.py | 0.895534 | 0.316554 | models.py | pypi |
import inspect
import logging
import time
from typing import Any, Callable, Dict, Tuple, TypeVar, cast
# With python 3.10 param spec can be used instead - as described here:
# https://stackoverflow.com/questions/66408662/type-annotations-for-decorators
F = TypeVar("F", bound=Callable[..., Any])
def log_execution_time(
log_level: int = logging.INFO, logger_name: str = __name__, log_params: Tuple[str, ...] = ()
) -> Callable[[F], F]:
"""This decorator logs the execution time of sync and async methods
Args:
log_level (int, optional): The log level used for the log message. Defaults to logging.INFO.
logger_name (str, optional): A logger used for the log message. Defaults to __name__.
log_params (Tuple(str), optional): Parameters of the decorated function to be logged in 'extra'.
Returns:
F: The return value of the original function
"""
def decorator(func: F) -> F:
if inspect.iscoroutinefunction(func):
async def wrapper_async(*args: Any, **kw: Any) -> Any:
start_time = _get_current_time()
result = await func(*args, **kw) # types: ignore
end_time = _get_current_time()
extra_params = _get_params_to_log(log_params, func, args, kw)
_calculate_and_log_execution_time(
start_time, end_time, logger_name, log_level, func.__name__, extra_params
)
return result
return cast(F, wrapper_async)
else:
def wrapper_sync(*args: Any, **kw: Any) -> Any:
start_time = _get_current_time()
result = func(*args, **kw)
end_time = _get_current_time()
extra_params = _get_params_to_log(log_params, func, args, kw)
_calculate_and_log_execution_time(
start_time, end_time, logger_name, log_level, func.__name__, extra_params
)
return result
return cast(F, wrapper_sync)
return decorator
def _get_current_time() -> int:
return round(int(time.time() * 1000))
def _calculate_and_log_execution_time(
start_time: int, end_time: int, logger_name: str, log_level: int, func_name: str, extra_params: Dict[str, Any] = {}
) -> None:
execution_time = end_time - start_time
extra_args = {"function_name": func_name, "execution_time": execution_time}
extra_args.update(extra_params)
time_logger = logging.getLogger(logger_name)
time_logger.log(log_level, "%s took %s ms.", func_name, execution_time, extra=extra_args)
def _get_params_to_log(
log_params: Tuple[str, ...], func: F, func_args: Tuple[Any, ...], func_kwargs: Dict[str, Any]
) -> Dict[str, Any]:
"""This function filters parameters and their values of a given function and returns them as a dictionary.
Args:
log_params (tuple[str]): A tuple of parameter names to filter by.
func (F): The decorated function whose arguments are considered.
func_args (tuple): Arguments of the decorated function.
func_kwargs (Dict): Keyword arguments of the decorated function.
Returns:
dict: A dictionary of key/value-pairs
"""
params_dict_log: Dict[str, Any] = {}
if log_params:
if len(func.__code__.co_varnames) == len(func_args):
params_dict_log.update(zip(func.__code__.co_varnames, func_args)) # transform args to kwargs
params_dict_log.update(func_kwargs)
return {k: v for k, v in params_dict_log.items() if k in log_params} | /sag-py-execution-time-decorator-1.0.1.tar.gz/sag-py-execution-time-decorator-1.0.1/sag_py_execution_time_decorator/execution_time_decorator.py | 0.843638 | 0.258566 | execution_time_decorator.py | pypi |
class Constants:
"""
Collection of various constants which are meant to static but still changeable
from the calling application at startup if necessary.
The class should not instantiated directly but used via the module level `constant` variable.
"""
# timeout in seconds for TCP connections
SOCKET_TIMEOUT = 5.0
# interval in seconds to check the internal queue for new messages to be cached in the database
QUEUE_CHECK_INTERVAL = 2.0
# interval in seconds to send cached events from the database to Logstash
QUEUED_EVENTS_FLUSH_INTERVAL = 10.0
# count of cached events to send cached events from the database to Logstash; events are sent
# to Logstash whenever QUEUED_EVENTS_FLUSH_COUNT or QUEUED_EVENTS_FLUSH_INTERVAL is reached,
# whatever happens first
QUEUED_EVENTS_FLUSH_COUNT = 50
# maximum number of events to be sent to Logstash in one batch (i.e. using a single connection)
QUEUED_EVENTS_BATCH_SIZE = 50
# maximum number of events to be updated within one SQLite statement
FORMATTER_RECORD_FIELD_SKIP_LIST = {
"args",
"asctime",
"created",
"exc_info",
"exc_text",
"filename",
"funcName",
"id",
"levelname",
"levelno",
"lineno",
"message",
"color_message",
"module",
"msecs",
"msg",
"name",
"pathname",
"process",
"processName",
"relativeCreated",
"stack_info",
"thread",
"threadName",
}
# fields to be set on the top-level of a Logstash event/message, do not modify this
# unless you know what you are doing
FORMATTER_LOGSTASH_MESSAGE_FIELD_LIST = {"logsource", "program", "type", "tags", "@metadata"}
# enable rate limiting for error messages (e.g. network errors) emitted by the logger
# used in LogProcessingWorker, i.e. when transmitting log messages to the Logstash server.
# Use a string like '5 per minute' or None to disable (default), for details see
# http://limits.readthedocs.io/en/stable/string-notation.html
ERROR_LOG_RATE_LIMIT = None
constants = Constants() # pylint: disable=invalid-name | /sag-py-logging-logstash-2.3.6.tar.gz/sag-py-logging-logstash-2.3.6/sag_py_logging_logstash/constants.py | 0.777764 | 0.501709 | constants.py | pypi |
import json
import logging
from abc import ABC, abstractmethod
from typing import Iterator, Union
import requests
from requests.auth import HTTPBasicAuth
logger = logging.getLogger(__name__)
class TimeoutNotSet:
pass
class Transport(ABC):
"""The :class:`Transport <Transport>` is the abstract base class of
all transport protocols.
:param host: The name of the host.
:type host: str
:param port: The TCP/UDP port.
:type port: int
:param timeout: The connection timeout.
:type timeout: None or float
:param ssl_enable: Activates TLS.
:type ssl_enable: bool
:param use_logging: Use logging for debugging.
:type use_logging: bool
"""
def __init__(
self,
host: str,
port: int,
timeout: Union[None, float],
ssl_enable: bool,
use_logging: bool,
):
self._host = host
self._port = port
self._timeout = None if timeout is TimeoutNotSet else timeout # type: ignore
self._ssl_enable = ssl_enable
self._use_logging = use_logging
super().__init__()
@abstractmethod
def send(self, events: list, **kwargs):
pass
@abstractmethod
def close(self):
pass
class HttpTransport(Transport):
"""The :class:`HttpTransport <HttpTransport>` implements a client for the
logstash plugin `inputs_http`.
For more details visit:
https://www.elastic.co/guide/en/logstash/current/plugins-inputs-http.html
:param host: The hostname of the logstash HTTP server.
:type host: str
:param port: The TCP port of the logstash HTTP server.
:type port: int
:param timeout: The connection timeout. (Default: None)
:type timeout: float
:param ssl_enable: Activates TLS. (Default: True)
:type ssl_enable: bool
:param use_logging: Use logging for debugging.
:type use_logging: bool
:param username: Username for basic authorization. (Default: "")
:type username: str
:param password: Password for basic authorization. (Default: "")
:type password: str
:param index_name: A string with the prefix of the elasticsearch index that will be created.
:type index_name: str
:param max_content_length: The max content of an HTTP request in bytes.
(Default: 100MB)
:type max_content_length: int
"""
def __init__(
self,
host: str,
port: int,
timeout: Union[None, float] = TimeoutNotSet, # type: ignore
ssl_enable: bool = True,
use_logging: bool = False,
**kwargs,
):
super().__init__(host, port, timeout, ssl_enable, use_logging)
self._username = kwargs.get("username", None)
self._password = kwargs.get("password", None)
self._index_name = kwargs.get("index_name", None)
self._max_content_length = kwargs.get("max_content_length", 100 * 1024 * 1024)
self.__session = None
@property
def url(self) -> str:
"""The URL of the logstash pipeline based on the hostname, the index, the port and
the TLS usage.
:return: The URL of the logstash HTTP pipeline.
:rtype: str
"""
protocol = "http"
if self._ssl_enable:
protocol = "https"
if self._index_name is not None:
return f"{protocol}://{self._host}:{self._port}/{self._index_name}"
return f"{protocol}://{self._host}:{self._port}"
def __batches(self, events: list) -> Iterator[list]:
"""Generate dynamic sized batches based on the max content length.
:param events: A list of events.
:type events: list
:return: A iterator which generates batches of events.
:rtype: Iterator[list]
"""
current_batch = []
event_iter = iter(events)
while True:
try:
current_event = next(event_iter)
except StopIteration:
current_event = None
if not current_batch:
return
yield current_batch
if current_event is None:
return
if len(current_event) > self._max_content_length:
msg = "The event size <%s> is greater than the max content length <%s>. Skipping event."
if self._use_logging:
logger.warning(msg, len(current_event), self._max_content_length)
continue
obj = json.loads(current_event)
content_length = len(json.dumps(current_batch + [obj]).encode("utf8"))
if content_length > self._max_content_length:
batch = current_batch
current_batch = [obj]
yield batch
else:
current_batch += [obj]
def __auth(self) -> HTTPBasicAuth:
"""The authentication method for the logstash pipeline. If the username
or the password is not set correctly it will return None.
:return: A HTTP basic auth object or None.
:rtype: HTTPBasicAuth
"""
if self._username is None or self._password is None:
return None
return HTTPBasicAuth(self._username, self._password)
def close(self) -> None:
"""Close the HTTP session."""
if self.__session is not None:
self.__session.close()
def send(self, events: list, **kwargs):
"""Send events to the logstash pipeline.
Max Events: `logstash_async.Constants.QUEUED_EVENTS_BATCH_SIZE`
Max Content Length: `HttpTransport._max_content_length`
The method receives a list of events from the worker. It tries to send
as much of the events as possible in one request. If the total size of
the received events is greater than the maximal content length the
events will be divide into batches.
:param events: A list of events
:type events: list
"""
self.__session = requests.Session()
for batch in self.__batches(events):
if self._use_logging:
logger.debug("Batch length: %s, Batch size: %s", len(batch), len(json.dumps(batch).encode("utf8")))
response = self.__session.post(
self.url,
headers={"Content-Type": "application/json"},
json=batch,
timeout=self._timeout,
auth=self.__auth(),
)
if response.status_code != 200:
self.close()
response.raise_for_status()
self.close() | /sag-py-logging-logstash-2.3.6.tar.gz/sag-py-logging-logstash-2.3.6/sag_py_logging_logstash/transport.py | 0.863694 | 0.173323 | transport.py | pypi |
import os
import hashlib
import requests
import shutil
from typing import Optional, List
from pathlib import Path
from os.path import join
from saga.Commit import Commit
from saga.path_utils import (
copy_dir_to_dir,
relative_paths_in_dir,
changed_files
)
from saga.file_types.file_utils import parse_file
class Repository(object):
def __init__(self, directory: Path):
self.base_directory = directory
def debug(self):
for branch in self.branches:
commit_hash = self.head_commit_from_branch(branch)
string = ""
while commit_hash:
string = f"{commit_hash} - {string}"
commit = self.get_commit(commit_hash)
if commit is None or not any(commit.parent_commit_hashes):
commit_hash = None
else:
commit_hash = commit.parent_commit_hashes[0]
print(f"{branch}: {string}")
@property
def saga_directory(self) -> Path:
# TODO: move all of these to paths
return self.base_directory / ".saga/"
@property
def commit_directory(self) -> Path:
return self.saga_directory / "commits"
@property
def state_directory(self) -> Path:
return self.saga_directory / "states"
@property
def index_directory(self) -> Path:
return self.saga_directory / "index"
@property
def branch_directory(self) -> Path:
return self.saga_directory / "branches"
@property
def branches(self) -> List[str]:
return [
str(path.parts[-1]) for path in self.branch_directory.iterdir()
]
@property
def head_location(self) -> Path:
return self.saga_directory / "head"
@property
def remote_location(self) -> Path:
return self.saga_directory / "remote"
@property
def head(self) -> str:
if self.head_location.exists():
with open(self.head_location, "r") as f:
return f.readline()
else:
with open(self.head_location, "w+") as f:
f.write("master")
return "master"
@property
def index_hash(self):
"""
Returns the hash of a list of the files + contents, currently
in the index folder.
"""
file_hashes = []
# we sort to have the same files every time
for file_name in sorted(relative_paths_in_dir(self.index_directory)):
index_file_name = join(self.index_directory, file_name)
if os.path.isfile(index_file_name):
f = open(index_file_name, "rb")
file_bytes = f.read()
m = hashlib.sha256()
# we encode the file contents
# and we encode the file path
m.update(file_bytes)
m.update(file_name.encode("utf-8"))
file_hashes.append(m.hexdigest())
m = hashlib.sha256()
# TODO: this should be a merkle tree eventually
# (or maybe an merkel-mountain-range), to reap the benefits
m.update(",".join(file_hashes).encode('utf-8'))
return m.hexdigest()
@property
def head_commit_hash(self):
"""
Returns the commit hash on HEAD
"""
return self.head_commit_from_branch(self.head)
@property
def remote_repository(self):
"""
Returns the URL of the currently tracked remote repository
"""
if self.remote_location.exists():
with open(self.remote_location, "r") as f:
return f.readline()
else:
with open(self.remote_location, "w+") as f:
f.write("http://localhost:3000")
return "http://localhost:3000"
def head_commit_from_branch(self, branch_name: str) -> Optional[str]:
branch_file = self.branch_directory / branch_name
if not branch_file.exists():
return None
with open(branch_file, 'r') as f:
return f.readline()
@property
def state_hash(self):
"""
Returns the state hash of the most recent commit
"""
head_commit = self.get_commit(self.head_commit_hash)
return head_commit.state_hash
@property
def curr_state_directory(self) -> Path:
"""
Returns the path with of the head state directory
"""
return self.state_directory / self.state_hash
@property
def uncommited_in_index(self):
index_state_hash = self.index_hash
commit_state_hash = self.state_hash
return index_state_hash != commit_state_hash
def get_commit(self, commit_hash: str) -> Optional[Commit]:
with (self.commit_directory / commit_hash).open("rb") as f:
return Commit.from_bytes(f.read())
return None
def add_commit(
self,
state_hash: str,
previous_commit_hashes: List[str],
commit_message: str
) -> Optional[Commit]:
"""
Adds a commit to the head branch
"""
commit = Commit(state_hash, previous_commit_hashes, commit_message)
commit_hash = commit.hash
commit_bytes = commit.to_bytes()
with open(self.commit_directory / commit_hash, "wb+") as f:
f.write(commit_bytes)
return commit
def update_head_commit(self, commit: Commit):
head = self.branch_directory / self.head
with open(head, "w") as f:
f.write(commit.hash)
def set_head(self, branch_name: str):
with open(self.head_location, "w") as f:
f.write(branch_name)
def restore_state(self, state_hash: str):
# copy the state directory to the current directory
copy_dir_to_dir(join(".saga/states", state_hash), ".")
# remove all the files that shouldn't be in this state
inserted, changed, removed = changed_files(
".",
join(".saga/states", state_hash)
)
for path in inserted:
if os.path.isdir(path):
os.rmdir(path)
else:
os.remove(path)
# make the index the proper state
shutil.rmtree(self.index_directory)
os.mkdir(self.index_directory)
copy_dir_to_dir(join(".saga/states", state_hash), self.index_directory)
def set_remote(self, remote_repository):
with self.remote_location.open("w+") as f:
f.write(remote_repository)
print(f"Set new remote repository to {self.remote_repository}")
def get_state_hash(self, branch):
commit_hash = self.head_commit_from_branch(branch)
commit = self.get_commit(commit_hash)
return commit.state_hash | /saga_vcs-0.0.22-py3-none-any.whl/saga/Repository.py | 0.566498 | 0.180793 | Repository.py | pypi |
def lcs_similarity(A, B, similarity_function):
m = len(A)
n = len(B)
# L[i][j] is the highest similar metric of the
# close longest common subsequece up to indexes i, j
L = [[None]*(n + 1) for i in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
L[i][j] = 0
else:
L[i][j] = max(
similarity_function(A[i - 1], B[j - 1]) + L[i - 1][j - 1],
L[i - 1][j],
L[i][j - 1]
)
# Create an array to store indexes of close common subsequence
matches = []
# Start from the right-most-bottom-most corner and
# one by one store characters in lcs[]
i = m
j = n
while i > 0 and j > 0:
# If current character in X[] and Y are same, then
# current character is part of LCS
if similarity_function(A[i - 1], B[j - 1]) + L[i - 1][j - 1] > \
max(L[i - 1][j], L[i][j - 1]):
matches.append(
([i - 1], [j - 1], similarity_function(A[i - 1], B[j - 1]))
)
i -= 1
j -= 1
# If not same, then find the larger of two and
# go in the direction of larger value
elif L[i-1][j] > L[i][j-1]:
i -= 1
else:
j -= 1
matches.reverse()
return matches
# Standard LCS: the similarity function is just exact equality
def lcs(A, B):
def equal(a, b):
if a == b:
return 1
return 0
return lcs_similarity(A, B, equal)
def list_from_path(matrix, path):
curr = matrix
for step in path:
curr = curr[step]
return curr
def same_paths(dict_a, dict_b):
return {k for k in dict_a if k in dict_b and dict_a[k] == dict_b[k]}
def similarity_function(A, B):
if type(A) != type(B):
return 0
elif A is None:
return 1
elif type(A) in (int, float, bool):
return 1 if A == B else 0
elif type(A) == str:
if len(A) == 0 and len(B) == 0:
return 1
indexes = lcs(A, B)
if any(indexes):
return len(indexes) / max(len(A), len(B))
return 0
elif type(A) == list:
if len(A) == 0 and len(B) == 0:
return 1
A = [a for a in A if a is not None]
B = [b for b in B if b is not None]
indexes = lcs_similarity(A, B, similarity_function)
if any(indexes):
total = sum(sim for _, _, sim in indexes)
return total / max(len(A), len(B))
return 0
elif type(A) == dict:
# we just do the number of unchanged keys divided
# by the max number of keys
unchanged = same_paths(A, B)
if any(unchanged):
return len(unchanged) / max(len(A), len(B))
return 0
else:
print("Unknown type {}".format(type(A)))
return 0
def lcs_with_sim(A, B):
return lcs_similarity(A, B, similarity_function)
# returns a mapping from "dimension down" to "matches at that level"
def lcs_multi_dimension(A, B):
dimension_matches = dict()
dimension_matches[1] = lcs_similarity(A, B, similarity_function)
dimension = 1
matches = dimension_matches[1]
while any(matches):
dimension += 1
dimension_matches[dimension] = []
for path_a, path_b, _ in dimension_matches[dimension - 1]:
list_a = list_from_path(A, path_a)
list_b = list_from_path(B, path_b)
if type(list_a) != list or type(list_b) != list:
matches = []
break
matches = lcs_similarity(list_a, list_b, similarity_function)
for idx_a, idx_b, sim in matches:
dimension_matches[dimension].append(
(path_a + idx_a, path_b + idx_b, sim)
)
return dimension_matches
def get_matching_path(dim_matches, path_is_A, path):
for path_a, path_b, sim in dim_matches[len(path)]:
if path_is_A:
if path_a == path:
return path_b, sim
else:
if path_b == path:
return path_a, sim
return None, None | /saga_vcs-0.0.22-py3-none-any.whl/saga/base_file/mixed_data_type/lcs.py | 0.480722 | 0.628806 | lcs.py | pypi |
class SagaError(BaseException):
"""
Raised when an action failed and at least one compensation also failed.
"""
def __init__(self, exception, compensation_exceptions):
"""
:param exception: BaseException the exception that caused this SagaException
:param compensation_exceptions: list[BaseException] all exceptions that happened while executing compensations
"""
self.action = exception
self.compensations = compensation_exceptions
class Action(object):
"""
Groups an action with its corresponding compensation. For internal use.
"""
def __init__(self, action, compensation):
"""
:param action: Callable a function executed as the action
:param compensation: Callable a function that reverses the effects of action
"""
self.__kwargs = None
self.__action = action
self.__compensation = compensation
def act(self, **kwargs):
"""
Execute this action
:param kwargs: dict If there was an action executed successfully before this action, then kwargs contains the
return values of the previous action
:return: dict optional return value of this action
"""
self.__kwargs = kwargs
return self.__action(**kwargs)
def compensate(self):
"""
Execute the compensation.
:return: None
"""
if self.__kwargs:
self.__compensation(**self.__kwargs)
else:
self.__compensation()
class Saga(object):
"""
Executes a series of Actions.
If one of the actions raises, the compensation for the failed action and for all previous actions
are executed in reverse order.
Each action can return a dict, that is then passed as kwargs to the next action.
While executing compensations possible Exceptions are recorded and raised wrapped in a SagaException once all
compensations have been executed.
"""
def __init__(self, actions):
"""
:param actions: list[Action]
"""
self.actions = actions
def execute(self):
"""
Execute this Saga.
:return: None
"""
kwargs = {}
for action_index in range(len(self.actions)):
try:
kwargs = self.__get_action(action_index).act(**kwargs) or {}
except BaseException as e:
compensation_exceptions = self.__run_compensations(action_index)
raise SagaError(e, compensation_exceptions)
if type(kwargs) is not dict:
raise TypeError('action return type should be dict or None but is {}'.format(type(kwargs)))
def __get_action(self, index):
"""
Returns an action by index.
:param index: int
:return: Action
"""
return self.actions[index]
def __run_compensations(self, last_action_index):
"""
:param last_action_index: int
:return: None
"""
compensation_exceptions = []
for compensation_index in range(last_action_index, -1, -1):
try:
self.__get_action(compensation_index).compensate()
except BaseException as ex:
compensation_exceptions.append(ex)
return compensation_exceptions
class SagaBuilder(object):
"""
Build a Saga.
"""
def __init__(self):
self.actions = []
@staticmethod
def create():
return SagaBuilder()
def action(self, action, compensation):
"""
Add an action and a corresponding compensation.
:param action: Callable an action to be executed
:param compensation: Callable an action that reverses the effects of action
:return: SagaBuilder
"""
action = Action(action, compensation)
self.actions.append(action)
return self
def build(self):
"""
Returns a new Saga ready to execute all actions passed to the builder.
:return: Saga
"""
return Saga(self.actions) | /saga_py-1.1.0.tar.gz/saga_py-1.1.0/saga/saga.py | 0.84137 | 0.385288 | saga.py | pypi |
from typing import Text, Any, Dict, List, Union, Optional
import errno
import os
import io
def home_dir():
import pathlib
return pathlib.Path.home()
def exists(file):
"""
Check file or director whether exists; other related methods:
os.path.isfile()
os.path.isdir()
:param file:
:return:
"""
from os import path
return path.exists(file)
def create_dir(dir_path):
# type: (Text) -> None
"""Creates a directory and its super paths.
Succeeds even if the path already exists."""
try:
os.makedirs(dir_path)
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
def create_dir_for_file(file_path):
# type: (Text) -> None
"""Creates any missing parent directories of this files path."""
try:
os.makedirs(os.path.dirname(file_path))
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
def list_directory(path):
# type: (Text) -> List[Text]
"""Returns all files and folders excluding hidden files.
If the path points to a file, returns the file. This is a recursive
implementation returning files in any depth of the path."""
import six
if not isinstance(path, six.string_types):
raise ValueError("Resourcename must be a string type")
if os.path.isfile(path):
return [path]
elif os.path.isdir(path):
results = []
for base, dirs, files in os.walk(path):
# remove hidden files
goodfiles = filter(lambda x: not x.startswith('.'), files)
results.extend(os.path.join(base, f) for f in goodfiles)
return results
else:
raise ValueError("Could not locate the resource '{}'."
"".format(os.path.abspath(path)))
def list_files(path):
# type: (Text) -> List[Text]
"""Returns all files excluding hidden files.
If the path points to a file, returns the file."""
return [fn for fn in list_directory(path) if os.path.isfile(fn)]
def list_subdirectories(path):
# type: (Text) -> List[Text]
"""Returns all folders excluding hidden files.
If the path points to a file, returns an empty list."""
import glob
return [fn
for fn in glob.glob(os.path.join(path, '*'))
if os.path.isdir(fn)]
def write_to_file(filename, text, auto_create_dir=False):
# type: (Text, Text, bool) -> None
"""Write a text to a file."""
from os.path import expanduser
filename=expanduser(filename)
if auto_create_dir:
create_dir_for_file(filename)
with io.open(filename, 'w', encoding="utf-8") as f:
f.write(str(text))
def read_file(filename, encoding="utf-8-sig"):
"""Read text from a file."""
with io.open(filename, encoding=encoding) as f:
return f.read()
def remove_dir(target_dir):
import shutil
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
def lines(filename):
with open(filename) as f:
lines = f.readlines()
return lines
def list_with_suffix(dir, suffix):
import os
rs=[]
for root, dirs, files in os.walk(dir):
for file in files:
if file.endswith(suffix):
rs.append(os.path.join(root, file))
return rs
def list_match(dir, pat):
"""
>>> io_utils.list_match('.', 'mod_*.json')
:param dir:
:param pat:
:return:
"""
import os
from fnmatch import fnmatch
rs=[]
for root, dirs, files in os.walk(dir):
for file in files:
if fnmatch(file, pat):
rs.append(os.path.join(root, file))
return rs
def list_files_with_basename(dir_pattern):
import glob
import ntpath
files = glob.glob(dir_pattern)
bases = []
for f in files:
bases.append(ntpath.basename(f))
return bases | /sagas-0.6.0-py3-none-any.whl/io_utils.py | 0.718496 | 0.209126 | io_utils.py | pypi |
import re
from pkg_resources import resource_filename
class DictionaryLoadError(Exception):
"""Indicates a problem while loading a dictionary"""
MAX_PREFIX_LENGTH = 4
MAX_SUFFIX_LENGTH = 6
def segment_indexes(wordlen):
"""Generate possible segment indexes.
A word can be divided into three parts: prefix+stem+suffix. The
prefix and suffix are optional, but the stem is mandatory.
In this function we generate all the possible ways of breaking down
a word of the given length. The generator returns a pair of values,
representing the stem index and the suffix index.
"""
prelen = 0
suflen = 0
while prelen <= MAX_PREFIX_LENGTH:
stemlen = wordlen - prelen
suflen = 0
while stemlen >= 1 and suflen <= MAX_SUFFIX_LENGTH:
# Cannot have zero-length stem.
yield (prelen, prelen + stemlen)
stemlen -= 1
suflen += 1
prelen += 1
def _data_file_path(filename):
""" Return the path to the given data file """
return resource_filename(__name__, filename)
def load_dict(filename, encoding='latin1'):
"""Load and return the given dictionary"""
dict = {}
seen = set()
lemmas = 0
entries = 0
lemmaID = ""
p_AZ = re.compile('^[A-Z]')
p_iy = re.compile('iy~$')
infile = open(_data_file_path(filename), 'r', encoding=encoding)
print("loading %s ... " % (filename), end='')
for line in infile:
if line.startswith(';; '): # a new lemma
m = re.search('^;; (.*)$', line)
lemmaID = m.group(1)
if lemmaID in seen:
raise DictionaryLoadError(
"lemmaID %s in %s isn't unique!" % \
(lemmaID, filename))
else:
seen.add(lemmaID)
lemmas += 1;
elif line.startswith(';'): # a comment
continue
else: # an entry
line = line.strip(' \n')
(entry, voc, cat, glossPOS) = re.split('\t', line)
m = re.search('<pos>(.+?)</pos>', glossPOS)
if m:
POS = m.group(1)
gloss = glossPOS
else:
gloss = glossPOS
#voc = "%s (%s)" % (buckwalter.buck2uni(voc), voc)
if cat.startswith('Pref-0') or cat.startswith('Suff-0'):
POS = "" # null prefix or suffix
elif cat.startswith('F'):
POS = "%s/FUNC_WORD" % voc
elif cat.startswith('IV'):
POS = "%s/VERB_IMPERFECT" % voc
elif cat.startswith('PV'):
POS = "%s/VERB_PERFECT" % voc
elif cat.startswith('CV'):
POS = "%s/VERB_IMPERATIVE" % voc
elif cat.startswith('N') and p_AZ.search(gloss):
POS = "%s/NOUN_PROP" % voc # educated guess
# (99% correct)
elif cat.startswith('N') and p_iy.search(voc):
POS = "%s/NOUN" % voc # (was NOUN_ADJ:
# some of these are really ADJ's
# and need to be tagged manually)
elif cat.startswith('N'):
POS = "%s/NOUN" % voc
else:
raise DictionaryLoadError(
"no POS can be deduced in %s: %s" % \
(filename, line))
gloss = re.sub('<pos>.+?</pos>', '', gloss)
gloss = gloss.strip()
dict.setdefault(entry, []).append(
(entry, voc, cat, gloss, POS, lemmaID))
entries += 1
infile.close()
if not lemmaID == "":
print("loaded %d lemmas and %d entries" % (lemmas, entries))
else:
print("loaded %d entries" % (entries))
return dict
def load_table(filename, encoding='latin1'):
"""Load and return the given table"""
p = re.compile('\s+')
table = {}
infile = open(_data_file_path(filename), 'r', encoding=encoding)
for line in infile:
if line.startswith(';'): continue # comment line
line = line.strip()
p.sub(' ', line)
table[line] = 1
infile.close()
return table | /sagas-0.6.0-py3-none-any.whl/pyaramorph/util.py | 0.523908 | 0.363421 | util.py | pypi |
import streamlit as st
import pandas as pd
import urllib.request
import json
from sagas.conf.conf import cf
from delegates.geo_helper import get_states
fetch_csv = st.cache(pd.read_csv)
fetch_json = st.cache(pd.read_json)
"""
# Hello, world!
This is an example [`streamlit`](https://streamlit.io/) app running on [Glitch](https://glitch.com/).
This Glitch app install streamlit, and does a `streamlit run app.py` on port 3000 to start the app. You can edit `app.py` (where this code lies) to test this out!
"""
"""
---
## Streamlit Playground
"""
with st.echo():
print("You can see this code! 😮")
st.json({
'foo': 'bar',
'baz': 'boz',
'stuff': [
'stuff 1',
'stuff 2',
'stuff 3',
'stuff 5',
],
})
st.graphviz_chart('''
digraph {
rankdir=LR;
a -> c;
b -> c;
c -> d;
d -> e;
d -> f;
f -> g;
}''')
if st.button("balloon me"):
st.balloons()
"""
---
## Opioid Sales
Source: Washington Posts's [ARCOS API](https://arcos-api.ext.nile.works/__swagger__/) - Original article [here](https://www.washingtonpost.com/graphics/2019/investigations/dea-pain-pill-database/)
"""
# states is a dict of postal state code to human-readable state name
# e.g. {"CA":"California", "NY":"New York"}, etc.
states = get_states()
state = st.selectbox("State", options=list(states.keys()), format_func=lambda k: states.get(k), )
# raw_pharmacy_data = fetch_json(
# "https://arcos-api.ext.nile.works/v1/total_pharmacies_state?state={state}&key=WaPo".format(state=state))
## only state==CA
raw_pharmacy_data = fetch_json(f'{cf.conf_dir}/ai/streamlit/total_pharmacies_state.json')
st.markdown("""### Opioid Sales in {state}
""".format(state=states.get(state)))
raw_pharmacy_data
st.vega_lite_chart(raw_pharmacy_data, {
'mark': 'bar',
'encoding': {
'x': {'field': 'buyer_county', 'type': 'nominal', "sort": "y"},
'y': {'field': 'total_dosage_unit', 'type': 'quantitative', 'aggregate': 'sum'}
},
}, height=1000)
"**Note**: there seems to be a bug with vegalite in Streamlit where the height is fixed at 200px... I might be wrong about this, but I'll file a bug report if I can't fix it"
"""
---
## Apple Stock Price 2007-2012
Source: Mike Bostock [https://observablehq.com/@mbostock/vega-lite-line-chart](https://observablehq.com/@mbostock/vega-lite-line-chart)
"""
# aapl = fetch_csv(
# 'https://gist.githubusercontent.com/mbostock/14613fb82f32f40119009c94f5a46d72/raw/d0d70ffb7b749714e4ba1dece761f6502b2bdea2/aapl.csv')
aapl = fetch_csv(f'{cf.conf_dir}/ai/streamlit/aapl.csv')
aapl
st.vega_lite_chart(aapl, {
'mark': "line",
'encoding': {
'x': {'field': 'date', 'type': 'temporal'},
'y': {'field': 'close', 'type': 'quantitative'},
},
}, height=1000)
"""
---
## High Schools in Los Angeles
Source: Los Angeles Times [highschool-homicide-analysis
](https://github.com/datadesk/highschool-homicide-analysis)
"""
# raw_la_schools = fetch_csv(
# "https://github.com/datadesk/highschool-homicide-analysis/raw/master/output/la-high-schools.csv")
raw_la_schools = fetch_csv(f'{cf.conf_dir}/ai/streamlit/la-high-schools.csv')
map_df = pd.DataFrame()
map_df["lat"] = raw_la_schools["Latitude"]
map_df["lon"] = raw_la_schools["Longitude"]
dot_size = st.slider("Dot size", min_value=1, max_value=20, value=5)
st.deck_gl_chart(viewport={
'latitude': 33.94,
'longitude': -118.18,
'zoom': 10,
'pitch': 50,
},
layers=[{
'data': map_df,
'radiusScale': dot_size,
'type': 'ScatterplotLayer'
}])
st.write("Dot size: ", dot_size)
"""
---
## How to use this Glitch app
You can remix the Glitch project and you'll have your own project running streamlit.
"""
"""
---
## Limitations
There are some limitations to Glitch, [for example](https://glitch.com/help/restrictions/):
>Projects sleep after 5 minutes if they are not used
> Projects have a limit of 200MB of disk space in the container
(and installing `streamlit` already takes up ~180MB, it seems like)
> Projects have a limit of 512MB of RAM.
> Projects are limited to 4000 requests per hour
A few other points to consider:
- There's no GPU access, so you probably can't do a ton of crazy ML/AI stuff.
- Glitch autosaves your work while you are typing - which means, when you're editing your streamlit app, it will continuously restart over and over again while you're coding - which is a little annoying... You can turn off auto-run to avoid this
- Glitch uses Python 3.5 (as of now), so some Altair charts [may not render correctly](https://github.com/altair-viz/altair/issues/972) (which is what `st.line_chart` and `st.bar_chart` uses under the hood)
But hey, it's free, you can use this to do quick tests, and it requires no setup!
""" | /sagas-0.6.0-py3-none-any.whl/delegates/sl_hello.py | 0.588298 | 0.330255 | sl_hello.py | pypi |
import streamlit as st
import pandas as pd
import numpy as np
import altair as alt
from sagas.conf.conf import cf
DATE_TIME = "date/time"
DATA_URL = (
# "http://s3-us-west-2.amazonaws.com/streamlit-demo-data/uber-raw-data-sep14.csv.gz"
f"{cf.conf_dir}/ai/code/uber-raw-data-sep14.csv.gz"
)
st.title("Uber Pickups in New York City")
st.markdown(
"""
This is a demo of a Streamlit app that shows the Uber pickups
geographical distribution in New York City. Use the slider
to pick a specific hour and look at how the charts change.
[See source code](https://github.com/streamlit/demo-uber-nyc-pickups/blob/master/app.py)
""")
@st.cache(persist=True)
def load_data(nrows):
data = pd.read_csv(DATA_URL, nrows=nrows)
lowercase = lambda x: str(x).lower()
data.rename(lowercase, axis="columns", inplace=True)
data[DATE_TIME] = pd.to_datetime(data[DATE_TIME])
return data
data = load_data(100000)
hour = st.slider("Hour to look at", 0, 23)
data = data[data[DATE_TIME].dt.hour == hour]
st.subheader("Geo data between %i:00 and %i:00" % (hour, (hour + 1) % 24))
midpoint = (np.average(data["lat"]), np.average(data["lon"]))
st.deck_gl_chart(
viewport={
"latitude": midpoint[0],
"longitude": midpoint[1],
"zoom": 11,
"pitch": 50,
},
layers=[
{
"type": "HexagonLayer",
"data": data,
"radius": 100,
"elevationScale": 4,
"elevationRange": [0, 1000],
"pickable": True,
"extruded": True,
}
],
)
st.subheader("Breakdown by minute between %i:00 and %i:00" % (hour, (hour + 1) % 24))
filtered = data[
(data[DATE_TIME].dt.hour >= hour) & (data[DATE_TIME].dt.hour < (hour + 1))
]
hist = np.histogram(filtered[DATE_TIME].dt.minute, bins=60, range=(0, 60))[0]
chart_data = pd.DataFrame({"minute": range(60), "pickups": hist})
st.write(alt.Chart(chart_data, height=150)
.mark_area(
interpolate='step-after',
line=True
).encode(
x=alt.X("minute:Q", scale=alt.Scale(nice=False)),
y=alt.Y("pickups:Q"),
tooltip=['minute', 'pickups']
))
if st.checkbox("Show raw data", False):
st.subheader("Raw data by minute between %i:00 and %i:00" % (hour, (hour + 1) % 24))
st.write(data) | /sagas-0.6.0-py3-none-any.whl/delegates/sl_uber_nyc.py | 0.590897 | 0.339663 | sl_uber_nyc.py | pypi |
from sage.all import AA, PolynomialRing, QQ, QQbar, SR, gcd, prod, pi
from sage_acsv.kronecker import _kronecker_representation
from sage_acsv.helpers import ACSVException, RationalFunctionReduce, DetHessianWithLog, OutputFormat
from sage_acsv.debug import Timer, acsv_logger
MAX_MIN_CRIT_RETRIES = 3
def diagonal_asy(F, r=None, linear_form=None, return_points=False, output_format=None, as_symbolic=False):
r"""Asymptotics in a given direction r of the multivariate rational function F.
INPUT:
* ``F`` -- The rational function ``G/H`` in ``d`` variables. This function is
assumed to have a combinatorial expansion.
* ``r`` -- A vector of length d of positive integers.
* ``linear_form`` -- (Optional) A linear combination of the input
variables that separates the critical point solutions.
* ``return_points`` -- If ``True``, also returns the coordinates of
minimal critical points. By default ``False``.
* ``output_format`` -- (Optional) A string or :class:`.OutputFormat` specifying
the way the asymptotic growth is returned. Allowed values currently are:
- ``"tuple"`` or ``None``, the default: the growth is returned as a list of
tuples of the form ``(a, n^b, pi^c, d)`` such that the `r`-diagonal of `F`
is the sum of ``a^n n^b pi^c d + O(a^n n^{b-1})`` over these tuples.
- ``"symbolic"``: the growth is returned as an expression from the symbolic
ring ``SR`` in the variable ``n``.
- ``"asymptotic"``: the growth is returned as an expression from an appropriate
``AsymptoticRing`` in the variable ``n``.
* ``as_symbolic`` -- deprecated in favor of the equivalent
``output_format="symbolic"``. Will be removed in a future release.
OUTPUT:
A representation of the asymptotic main term, either as a list of tuples,
or as a symbolic expression.
NOTE:
The code randomly generates a linear form, which for generic rational functions
separates the solutions of an intermediate polynomial system with high probability.
This separation step can fail, but (assuming `F` has a finite number of critical
points) the code can be rerun until a separating form is found.
Examples::
sage: from sage_acsv import diagonal_asy
sage: var('x,y,z,w')
(x, y, z, w)
sage: diagonal_asy(1/(1-x-y))
[(4, 1/sqrt(n), 1/sqrt(pi), 1)]
sage: diagonal_asy(1/(1-(1+x)*y), r = [1,2], return_points=True)
([(4, 1/sqrt(n), 1/sqrt(pi), 1)], [[1, 1/2]])
sage: diagonal_asy(1/(1-(x+y+z)+(3/4)*x*y*z), output_format="symbolic")
0.840484893481498?*24.68093482214177?^n/(pi*n)
sage: diagonal_asy(1/(1-(x+y+z)+(3/4)*x*y*z))
[(24.68093482214177?, 1/n, 1/pi, 0.840484893481498?)]
sage: var('n')
n
sage: asy = diagonal_asy(
....: 1/(1 - w*(1 + x)*(1 + y)*(1 + z)*(x*y*z + y*z + y + z + 1))
....: )
sage: sum([
....: a.radical_expression()^n * b * c * d.radical_expression()
....: for (a, b, c, d) in asy
....: ])
1/4*(12*sqrt(2) + 17)^n*sqrt(17/2*sqrt(2) + 12)/(pi^(3/2)*n^(3/2))
Not specifying any ``output_format`` falls back to the default tuple
representation::
sage: from sage_acsv import diagonal_asy, OutputFormat
sage: var('x')
x
sage: diagonal_asy(1/(1 - 2*x))
[(2, 1, 1, 1)]
sage: diagonal_asy(1/(1 - 2*x), output_format="tuple")
[(2, 1, 1, 1)]
Passing ``"symbolic"`` lets the function return an element of the
symbolic ring in the variable ``n`` that describes the asymptotic growth::
sage: growth = diagonal_asy(1/(1 - 2*x), output_format="symbolic"); growth
2^n
sage: growth.parent()
Symbolic Ring
The argument ``"asymptotic"`` constructs an asymptotic expansion over
an appropriate ``AsymptoticRing`` in the variable ``n``, including the
appropriate error term::
sage: assume(SR.an_element() > 0) # required to make coercions involving SR work properly
sage: growth = diagonal_asy(1/(1 - x - y), output_format="asymptotic"); growth
1/sqrt(pi)*4^n*n^(-1/2) + O(4^n*n^(-3/2))
sage: growth.parent()
Asymptotic Ring <SR^n * n^QQ * Arg_SR^n> over Symbolic Ring
The function times individual steps of the algorithm, timings can
be displayed by increasing the printed verbosity level of our debug logger::
sage: import logging
sage: from sage_acsv.debug import acsv_logger
sage: acsv_logger.setLevel(logging.INFO)
sage: diagonal_asy(1/(1 - x - y))
INFO:sage_acsv:... Executed Kronecker in ... seconds.
INFO:sage_acsv:... Executed Minimal Points in ... seconds.
INFO:sage_acsv:... Executed Final Asymptotics in ... seconds.
[(4, 1/sqrt(n), 1/sqrt(pi), 1)]
sage: acsv_logger.setLevel(logging.WARNING)
Tests:
Check that passing a non-supported ``output_format`` errors out::
sage: diagonal_asy(1/(1 - x - y), output_format='hello world')
Traceback (most recent call last):
...
ValueError: 'hello world' is not a valid OutputFormat
sage: diagonal_asy(1/(1 - x - y), output_format=42)
Traceback (most recent call last):
...
ValueError: 42 is not a valid OutputFormat
"""
G, H = F.numerator(), F.denominator()
if r is None:
n = len(H.variables())
r = [1 for _ in range(n)]
# Initialize variables
vs = list(H.variables())
RR, (t, lambda_, u_) = PolynomialRing(QQ, 't, lambda_, u_').objgens()
expanded_R, _ = PolynomialRing(QQ, len(vs)+3, vs + [t, lambda_, u_]).objgens()
vs = [expanded_R(v) for v in vs]
t, lambda_, u_ = expanded_R(t), expanded_R(lambda_), expanded_R(u_)
vsT = vs + [t, lambda_]
all_variables = (vs, lambda_, t, u_)
d = len(vs)
rd = r[-1]
# Make sure G and H are coprime, and that H does not vanish at 0
G, H = RationalFunctionReduce(G, H)
G, H = expanded_R(G), expanded_R(H)
if H.subs({v: 0 for v in H.variables()}) == 0:
raise ValueError("Denominator vanishes at 0.")
# In case form doesn't separate, we want to try again
for _ in range(MAX_MIN_CRIT_RETRIES):
try:
# Find minimal critical points in Kronecker Representation
min_crit_pts = MinimalCriticalCombinatorial(
G, H, all_variables,
r=r,
linear_form=linear_form
)
break
except Exception as e:
if isinstance(e, ACSVException) and e.retry:
acsv_logger.warning(
"Randomly generated linear form was not suitable, "
f"encountered error: {e}\nRetrying..."
)
continue
else:
raise e
else:
return
timer = Timer()
# Find det(zH_z Hess) where Hess is the Hessian of z_1...z_n * log(g(z_1, ..., z_n))
Det = DetHessianWithLog(H, vsT[0:-2], r)
# Find exponential growth
T = prod([vs[i]**r[i] for i in range(d)])
# Find constants appearing in asymptotics in terms of original variables
A = SR(-G / vs[-1] / H.derivative(vs[-1]))
B = SR(1 / Det / rd**(d-1) / 2**(d-1))
C = SR(1 / T)
# Compute constants at contributing singularities
asm_quantities = [
[QQbar(q.subs([SR(v) == V for (v, V) in zip(vs, cp)])) for q in [A, B, C]]
for cp in min_crit_pts
]
n = SR.var('n')
asm_vals = [
(c, QQ(1 - d)/2, a * b.sqrt())
for (a, b, c) in asm_quantities
]
timer.checkpoint("Final Asymptotics")
if as_symbolic:
from warnings import warn
warn(
"The as_symbolic argument has been deprecated in favor of output_format='symbolic' "
"and will be removed in a future release.",
DeprecationWarning,
stacklevel=2,
)
if output_format is None:
output_format = OutputFormat.SYMBOLIC
if output_format is None:
output_format = OutputFormat.TUPLE
else:
output_format = OutputFormat(output_format)
if output_format in (OutputFormat.TUPLE, OutputFormat.SYMBOLIC):
n = SR.var('n')
result = [
(base, n**exponent, pi**exponent, constant)
for (base, exponent, constant) in asm_vals
]
if output_format == OutputFormat.SYMBOLIC:
result = sum([a**n * b * c * d for (a, b, c, d) in result])
elif output_format == OutputFormat.ASYMPTOTIC:
from sage.all import AsymptoticRing
AR = AsymptoticRing('SR^n * n^QQ', SR)
n = AR.gen()
result = sum([
base**n * n**exponent * pi**exponent * constant
+ (base**n * n**(exponent - 1)).O()
for (base, exponent, constant) in asm_vals
])
else:
raise NotImplementedError(f"Missing implementation for {output_format}")
if return_points:
return result, min_crit_pts
return result
def MinimalCriticalCombinatorial(G, H, variables, r=None, linear_form=None):
r"""Compute minimal critical points of a combinatorial multivariate
rational function F=G/H admitting a finite number of critical points.
Typically, this function is called as a subroutine of :func:`.diagonal_asy`.
INPUT:
* ``G, H`` -- Coprime polynomials with `F = G/H`
* ``variables`` -- Tuple of variables of ``G`` and ``H``, followed
by ``lambda_, t, u_``
* ``r`` -- (Optional) Length `d` vector of positive integers
* ``linear_form`` -- (Optional) A linear combination of the input
variables that separates the critical point solutions
OUTPUT:
List of minimal critical points of `F` in the direction `r`,
as a list of tuples of algebraic numbers.
NOTE:
The code randomly generates a linear form, which for generic rational functions
separates the solutions of an intermediate polynomial system with high probability.
This separation step can fail, but (assuming F has a finite number of critical points)
the code can be rerun until a separating form is found.
Examples::
sage: from sage_acsv import MinimalCriticalCombinatorial
sage: R.<x, y, w, lambda_, t, u_> = QQ[]
sage: pts = MinimalCriticalCombinatorial(
....: 1,
....: 1 - w*(y + x + x^2*y + x*y^2),
....: ([w, x, y], lambda_, t, u_)
....: )
sage: sorted(pts)
[[-1/4, -1, -1], [1/4, 1, 1]]
"""
timer = Timer()
# Fetch the variables we need
vs, lambda_, t, u_ = variables
vsT = vs + [t, lambda_]
# If direction r is not given, default to the diagonal
if r is None:
r = [1 for i in range(len(vs))]
# Create the critical point equations system
vsH = H.variables()
system = [
vsH[i]*H.derivative(vsH[i]) - r[i]*lambda_
for i in range(len(vsH))
] + [H, H.subs({z: z*t for z in vsH})]
# Compute the Kronecker representation of our system
timer.checkpoint()
P, Qs = _kronecker_representation(system, u_, vsT, lambda_, linear_form)
timer.checkpoint("Kronecker")
Qt = Qs[-2] # Qs ordering is H.variables() + [t, lambda_]
Pd = P.derivative()
# Solutions to Pt are solutions to the system where t is not 1
one_minus_t = gcd(Pd - Qt, P)
Pt, _ = P.quo_rem(one_minus_t)
rts_t_zo = list(
filter(
lambda k: (Qt/Pd).subs(u_=k) > 0 and (Qt/Pd).subs(u_=k) < 1,
Pt.roots(AA, multiplicities=False)
)
)
non_min = [[(q/Pd).subs(u_=u) for q in Qs[0:-2]] for u in rts_t_zo]
# Filter the real roots for minimal points with positive coords
pos_minimals = []
for u in one_minus_t.roots(AA, multiplicities=False):
is_min = True
v = [(q/Pd).subs(u_=u) for q in Qs[0:-2]]
if any([k <= 0 for k in v]):
continue
for pt in non_min:
if all([a == b for (a, b) in zip(v, pt)]):
is_min = False
break
if is_min:
pos_minimals.append(u)
# Remove non-smooth points and points with zero coordinates (where lambda=0)
for i in range(len(pos_minimals)):
x = (Qs[-1]/Pd).subs(u_=pos_minimals[i])
if x == 0:
acsv_logger.warning(
f"Removing critical point {pos_minimals[i]} because it either "
"has a zero coordinate or is not smooth."
)
pos_minimals.pop(i)
# Verify necessary assumptions
if len(pos_minimals) == 0:
raise ACSVException("No smooth minimal critical points found.")
elif len(pos_minimals) > 1:
raise ACSVException(
"More than one minimal point with positive real coordinates found."
)
# Find all minimal critical points
minCP = [(q/Pd).subs(u_=pos_minimals[0]) for q in Qs[0:-2]]
minimals = []
for u in one_minus_t.roots(QQbar, multiplicities=False):
v = [(q/Pd).subs(u_=u) for q in Qs[0:-2]]
if all([a.abs() == b.abs() for (a, b) in zip(minCP, v)]):
minimals.append(u)
# Get minimal point coords, and make exact if possible
minimal_coords = [[(q/Pd).subs(u_=u) for q in Qs[0:-2]] for u in minimals]
[[a.exactify() for a in b] for b in minimal_coords]
timer.checkpoint("Minimal Points")
return [[(q/Pd).subs(u_=u) for q in Qs[0:-2]] for u in minimals] | /sage_acsv-0.1.1.tar.gz/sage_acsv-0.1.1/sage_acsv/asymptotics.py | 0.904669 | 0.572693 | asymptotics.py | pypi |
from enum import Enum
from sage.all import QQ, ceil, gcd, matrix, randint
class OutputFormat(Enum):
"""Output options for displaying the asymptotic behavior determined
by :func:`.diagonal_asy`.
See also:
- :func:`.diagonal_asy`
"""
ASYMPTOTIC = "asymptotic"
SYMBOLIC = "symbolic"
TUPLE = "tuple"
def RationalFunctionReduce(G, H):
r"""Reduction of G and H by dividing out their GCD.
INPUT:
* ``G``, ``H`` -- polynomials
OUTPUT:
A tuple ``(G/d, H/d)``, where ``d`` is the GCD of ``G`` and ``H``.
"""
g = gcd(G, H)
return G/g, H/g
def GenerateLinearForm(system, vsT, u_, linear_form=None):
r"""Generate a linear form of the input system.
This is an integer linear combination of the variables that
take unique values on the solutions of the system. The
generated linear form is with high probability separating.
INPUT:
* ``system`` -- A polynomial system of equations
* ``vsT`` -- A list of variables in the system
* ``u_`` -- A variable not in the system
* ``linear_form`` -- (Optional) A precomputed linear form in the
variables of the system. If passed, the returned form is
based on the given linear form and not randomly generated.
OUTPUT:
A linear form.
"""
if linear_form is not None:
return u_ - linear_form
maxcoeff = ceil(max([
max([abs(x) for x in f.coefficients()]) for f in system
]))
maxdegree = max([f.degree() for f in system])
return u_ - sum([
randint(-maxcoeff*maxdegree-31, maxcoeff*maxdegree+31)*z
for z in vsT
])
def DetHessianWithLog(H, vs, r):
r"""Computes the determinant of `z_d H_{z_d} Hess`, where `Hess` is
the Hessian of a given map.
The map underlying `Hess` is defined as
`(z_1, \ldots, z_{d-1}) \mapsto z_1 \cdots z_{d-1} \log(g(z_1, \ldots, z_{d-1}))`,
with `g` defined from IFT via `H(z_a1,...,z_{d-1},g(z_1,...,z_{d-1}))` at
a critical point in direction `r`.
INPUT:
* ``H`` -- a polynomail (the denominator of the rational GF `F` in ACSV)
* ``vs`` -- list of variables ``z_1, ..., z_d``
* ``r`` -- direction vector of length `d` with positive integers
OUTPUT:
The determinant as a rational function in the variables ``vs``.
"""
z_d = vs[-1]
d = len(vs)
# Build d x d matrix of U[i,j] = z_i * z_j * H'_{z_i * z_j}
U = matrix(
[
[
v1 * v2 * H.derivative(v1, v2)/(z_d * H.derivative(z_d))
for v2 in vs
] for v1 in vs
]
)
V = [QQ(r[k] / r[-1]) for k in range(d)]
# Build (d-1) x (d-1) Matrix for Hessian
Hess = [
[
V[i] * V[j] + U[i][j] - V[j] * U[i][-1] - V[i]*U[j][-1]
+ V[i] * V[j] * U[-1][-1]
for j in range(d-1)
] for i in range(d-1)
]
for i in range(d-1):
Hess[i][i] = Hess[i][i] + V[i]
# Return determinant
return matrix(Hess).determinant()
class ACSVException(Exception):
def __init__(self, message, retry=False):
super().__init__(message)
self._message = message
self._retry = retry
def __str__(self):
return self._message
@property
def retry(self):
return self._retry | /sage_acsv-0.1.1.tar.gz/sage_acsv-0.1.1/sage_acsv/helpers.py | 0.929224 | 0.789619 | helpers.py | pypi |
from sage.all import PolynomialRing, QQ, gcd
from sage.rings.polynomial.multi_polynomial_ideal import MPolynomialIdeal
from sage_acsv.helpers import ACSVException, GenerateLinearForm
from sage_acsv.debug import acsv_logger
def _kronecker_representation(system, u_, vs, lambda_=None, linear_form=None):
r"""Computes the Kronecker Representation of a system of polynomials.
This method is intended for internal use and requires a consistent
setup of parameters. Use the :func:`.kronecker` wrapper function
to avoid doing the setup yourself.
INPUT:
* ``system`` -- A system of polynomials in ``d`` variables
* ``u_`` -- Variable not contained in the variables in system
* ``vs`` -- Variables of the system
* ``lambda_``: (Optional) Parameter introduced for critical point computation
* ``linear_form`` -- (Optional) A linear combination of the input
variables that separates the critical point solutions
OUTPUT:
A polynomial ``P`` and ``d`` polynomials ``Q1, ..., Q_d`` such
that ``z_i = Q_i(u)/P'(u)`` for ``u`` ranging over the roots of ``P``
Examples::
sage: from sage_acsv.kronecker import kronecker
sage: var('x, y')
(x, y)
sage: kronecker(
....: [x**3+y**3-10, y**2-2],
....: [x, y],
....: linear_form=x + y
....: ) # indirect doctest
(u_^6 - 6*u_^4 - 20*u_^3 + 36*u_^2 - 120*u_ + 100,
[60*u_^3 - 72*u_^2 + 360*u_ - 600, 12*u_^4 - 72*u_^2 + 240*u_])
"""
# Generate a linear form
linear_form = GenerateLinearForm(system, vs, u_, linear_form)
expanded_R = u_.parent()
rabinowitsch_R = PolynomialRing(
QQ,
list(expanded_R.gens()),
len(expanded_R.gens()),
order="degrevlex"
)
u_ = rabinowitsch_R(u_)
if lambda_:
lambda_ = rabinowitsch_R(lambda_)
rabinowitsch_system = [rabinowitsch_R(f) for f in system]
rabinowitsch_system.append(rabinowitsch_R(linear_form))
# Compute Grobner basis for ordered system of polynomials
ideal = MPolynomialIdeal(rabinowitsch_R, rabinowitsch_system)
try:
ideal = MPolynomialIdeal(rabinowitsch_R, ideal.groebner_basis())
except Exception:
raise ACSVException("Trouble computing Groebner basis. System may be too large.")
ideal = ideal.radical()
gb = ideal.transformed_basis('fglm')
rabinowitsch_R = rabinowitsch_R.change_ring(order="lex")
u_ = rabinowitsch_R(u_)
if lambda_:
lambda_ = rabinowitsch_R(lambda_)
Ps = [
p for p in gb
if len(p.variables()) != 0
and not any([z in vs for z in p.variables()])
]
if len(Ps) != 1:
acsv_logger.debug(
f"Rabinowitsch system: {rabinowitsch_system}\n"
f"Ps: {Ps}\n"
f"basis: {gb}"
)
raise ACSVException(
"No P polynomial found for Kronecker Representation.",
retry=True
)
u_ = Ps[0].variables()[0]
R = PolynomialRing(QQ, u_)
P = R(Ps[0])
P, _ = P.quo_rem(gcd(P, P.derivative(u_)))
Pd = P.derivative(u_)
# Find Q_i for each variable
Qs = []
for z in vs:
z = rabinowitsch_R(z)
eqns = [f for f in gb if z in f.variables()]
if len(eqns) != 1:
acsv_logger.debug(
f"equations: {eqns}\n"
f"z: {z}\n"
f"vs: {vs}"
)
raise ACSVException(
"Linear form does not separate the roots.",
retry=True
)
eq = eqns[0].polynomial(z)
if eq.degree() != 1:
acsv_logger.debug(
f"eq: {eq}\n"
f"z: {z}"
)
raise ACSVException(
"Linear form does not separate the roots.",
retry=True
)
_, rem = (Pd * eq.roots()[0][0]).quo_rem(P)
Qs.append(rem)
# Forget base ring, move to univariate polynomial ring in u over a field
Qs = [R(Q) for Q in Qs]
return P, Qs
def kronecker(system, vs, linear_form=None):
r"""Computes the Kronecker Representation of a system of polynomials
INPUT:
* ``system`` -- A system of polynomials in ``d`` variables
* ``vs`` -- Variables of the system
* ``linear_form`` -- (Optional) A linear combination of the
input variables that separates the critical point solutions
OUTPUT:
A polynomial ``P`` and ``d`` polynomials ``Q1, ..., Q_d`` such that
``z_i = Q_i(u)/P'(u)`` for ``u`` ranging over the roots of ``P``.
Examples::
sage: from sage_acsv import kronecker
sage: var('x,y')
(x, y)
sage: kronecker([x**3+y**3-10, y**2-2], [x,y], x+y)
(u_^6 - 6*u_^4 - 20*u_^3 + 36*u_^2 - 120*u_ + 100,
[60*u_^3 - 72*u_^2 + 360*u_ - 600, 12*u_^4 - 72*u_^2 + 240*u_])
"""
R, u_ = PolynomialRing(QQ, 'u_').objgen()
R = PolynomialRing(QQ, len(vs) + 1, vs + [u_])
system = [R(f) for f in system]
vs = [R(v) for v in vs]
u_ = R(u_)
return _kronecker_representation(system, u_, vs, linear_form=linear_form) | /sage_acsv-0.1.1.tar.gz/sage_acsv-0.1.1/sage_acsv/kronecker.py | 0.908741 | 0.599808 | kronecker.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.